summaryrefslogtreecommitdiffstats
path: root/src/util/set.c
diff options
context:
space:
mode:
authorConnor Abbott <[email protected]>2019-05-20 14:59:40 +0200
committerConnor Abbott <[email protected]>2019-05-31 19:14:16 +0200
commit6f9beb28bb55ccf01445c8e57d3dca5216ec530f (patch)
tree784f9a24a071af2fa7b072347a6b1dff2dfc478f /src/util/set.c
parent451211741c894e7aea33f92a75fe9870fc5f1478 (diff)
util/set: Add specialized resizing add function
A significant portion of the time spent in nir_opt_cse for the Dolphin ubershaders was in resizing the set. When resizing a hash table, we know in advance that each new element to be inserted will be different from every other element, so we don't have to compare them, and there will be no tombstone elements, so we don't have to worry about caching the first-seen tombstone. We add a specialized add function which skips these steps entirely, speeding up resizing. Compile-time results from my shader-db database: Difference at 95.0% confidence -2.29143 +/- 0.845534 -0.529475% +/- 0.194767% (Student's t, pooled s = 1.08807) Reviewed-by: Eric Anholt <[email protected]> Acked-by: Jason Ekstrand <[email protected]>
Diffstat (limited to 'src/util/set.c')
-rw-r--r--src/util/set.c26
1 files changed, 23 insertions, 3 deletions
diff --git a/src/util/set.c b/src/util/set.c
index a1cf05cb4dc..7311f4336e8 100644
--- a/src/util/set.c
+++ b/src/util/set.c
@@ -245,8 +245,26 @@ _mesa_set_search_pre_hashed(const struct set *set, uint32_t hash,
return set_search(set, hash, key);
}
-static struct set_entry *
-set_add(struct set *ht, uint32_t hash, const void *key);
+static void
+set_add_rehash(struct set *ht, uint32_t hash, const void *key)
+{
+ uint32_t size = ht->size;
+ uint32_t start_address = hash % size;
+ uint32_t double_hash = hash % ht->rehash + 1;
+ uint32_t hash_address = start_address;
+ do {
+ struct set_entry *entry = ht->table + hash_address;
+ if (likely(entry->key == NULL)) {
+ entry->hash = hash;
+ entry->key = key;
+ return;
+ }
+
+ hash_address = hash_address + double_hash;
+ if (hash_address >= size)
+ hash_address -= size;
+ } while (true);
+}
static void
set_rehash(struct set *ht, unsigned new_size_index)
@@ -273,9 +291,11 @@ set_rehash(struct set *ht, unsigned new_size_index)
ht->deleted_entries = 0;
set_foreach(&old_ht, entry) {
- set_add(ht, entry->hash, entry->key);
+ set_add_rehash(ht, entry->hash, entry->key);
}
+ ht->entries = old_ht.entries;
+
ralloc_free(old_ht.table);
}