aboutsummaryrefslogtreecommitdiffstats
path: root/src/util/set.c
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2015-02-04 18:46:23 -0800
committerJason Ekstrand <[email protected]>2015-02-07 17:01:05 -0800
commit623c3a858d9c18f7d62a82597a488e7b54a4a4f4 (patch)
treecb9d5e482e71e4d881d85aeb602efbca11cb6112 /src/util/set.c
parentc9287e797b3e58bd4403f79d484e7235dc59262d (diff)
util/set: Do a full search when adding new items
Previously, the set_insert function would bail early if it found a deleted slot that it could re-use. However, this is a problem if the key being inserted is already in the set but further down the list. If this happens, the element ends up getting inserted in the set twice. This commit makes it so that we walk over all of the possible entries for the given key and then, if we don't find the key, place it in the available free entry we found. Reviewed-by: Eric Anholt <[email protected]>
Diffstat (limited to 'src/util/set.c')
-rw-r--r--src/util/set.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/src/util/set.c b/src/util/set.c
index c3252a09401..f01f8699ac2 100644
--- a/src/util/set.c
+++ b/src/util/set.c
@@ -251,6 +251,7 @@ static struct set_entry *
set_add(struct set *ht, uint32_t hash, const void *key)
{
uint32_t hash_address;
+ struct set_entry *available_entry = NULL;
if (ht->entries >= ht->max_entries) {
set_rehash(ht, ht->size_index + 1);
@@ -264,12 +265,11 @@ set_add(struct set *ht, uint32_t hash, const void *key)
uint32_t double_hash;
if (!entry_is_present(entry)) {
- if (entry_is_deleted(entry))
- ht->deleted_entries--;
- entry->hash = hash;
- entry->key = key;
- ht->entries++;
- return entry;
+ /* Stash the first available entry we find */
+ if (available_entry == NULL)
+ available_entry = entry;
+ if (entry_is_free(entry))
+ break;
}
/* Implement replacement when another insert happens
@@ -293,6 +293,15 @@ set_add(struct set *ht, uint32_t hash, const void *key)
hash_address = (hash_address + double_hash) % ht->size;
} while (hash_address != hash % ht->size);
+ if (available_entry) {
+ if (entry_is_deleted(available_entry))
+ ht->deleted_entries--;
+ available_entry->hash = hash;
+ available_entry->key = key;
+ ht->entries++;
+ return available_entry;
+ }
+
/* We could hit here if a required resize failed. An unchecked-malloc
* application could ignore this result.
*/