summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTimothy Arceri <[email protected]>2017-10-26 09:35:48 +1100
committerEmil Velikov <[email protected]>2017-11-03 18:21:42 +0000
commitb4bf9f6a41881b3a8cb63bd23d15ac0c08df4982 (patch)
tree082b3c7d0acb12f91a7e80c07d7a540829a91707
parent2516c3217ded38a9d025d90502cf5b029593c66c (diff)
radv: add cache items to in memory cache when reading from disk
Otherwise we will leak them, load duplicates from disk rather than memory and never write items loaded from disk to the apps pipeline cache. Fixes: fd24be134ffd 'radv: make use of on-disk cache' Reviewed-by: Bas Nieuwenhuizen <[email protected]> (cherry picked from commit 1e84e53712aed4892fbaf98e6f26ffdf76f06165) Squashed with commit: radv: use correct alloc function when loading from disk Fixes regression in: dEQP-VK.api.object_management.alloc_callback_fail.graphics_pipeline Fixes: 1e84e53712ae "radv: add cache items to in memory cache when reading from disk" Reviewed-by: Bas Nieuwenhuizen <[email protected]> (cherry picked from commit e92405c55aa885bee5dfb05fac032cab5e419290)
-rw-r--r--src/amd/vulkan/radv_pipeline_cache.c154
1 files changed, 84 insertions, 70 deletions
diff --git a/src/amd/vulkan/radv_pipeline_cache.c b/src/amd/vulkan/radv_pipeline_cache.c
index 9ba9a3b61ba..2bf63793f6b 100644
--- a/src/amd/vulkan/radv_pipeline_cache.c
+++ b/src/amd/vulkan/radv_pipeline_cache.c
@@ -170,6 +170,75 @@ radv_pipeline_cache_search(struct radv_pipeline_cache *cache,
return entry;
}
+static void
+radv_pipeline_cache_set_entry(struct radv_pipeline_cache *cache,
+ struct cache_entry *entry)
+{
+ const uint32_t mask = cache->table_size - 1;
+ const uint32_t start = entry->sha1_dw[0];
+
+ /* We'll always be able to insert when we get here. */
+ assert(cache->kernel_count < cache->table_size / 2);
+
+ for (uint32_t i = 0; i < cache->table_size; i++) {
+ const uint32_t index = (start + i) & mask;
+ if (!cache->hash_table[index]) {
+ cache->hash_table[index] = entry;
+ break;
+ }
+ }
+
+ cache->total_size += entry_size(entry);
+ cache->kernel_count++;
+}
+
+
+static VkResult
+radv_pipeline_cache_grow(struct radv_pipeline_cache *cache)
+{
+ const uint32_t table_size = cache->table_size * 2;
+ const uint32_t old_table_size = cache->table_size;
+ const size_t byte_size = table_size * sizeof(cache->hash_table[0]);
+ struct cache_entry **table;
+ struct cache_entry **old_table = cache->hash_table;
+
+ table = malloc(byte_size);
+ if (table == NULL)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ cache->hash_table = table;
+ cache->table_size = table_size;
+ cache->kernel_count = 0;
+ cache->total_size = 0;
+
+ memset(cache->hash_table, 0, byte_size);
+ for (uint32_t i = 0; i < old_table_size; i++) {
+ struct cache_entry *entry = old_table[i];
+ if (!entry)
+ continue;
+
+ radv_pipeline_cache_set_entry(cache, entry);
+ }
+
+ free(old_table);
+
+ return VK_SUCCESS;
+}
+
+static void
+radv_pipeline_cache_add_entry(struct radv_pipeline_cache *cache,
+ struct cache_entry *entry)
+{
+ if (cache->kernel_count == cache->table_size / 2)
+ radv_pipeline_cache_grow(cache);
+
+ /* Failing to grow that hash table isn't fatal, but may mean we don't
+ * have enough space to add this new kernel. Only add it if there's room.
+ */
+ if (cache->kernel_count < cache->table_size / 2)
+ radv_pipeline_cache_set_entry(cache, entry);
+}
+
bool
radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
struct radv_pipeline_cache *cache,
@@ -201,6 +270,21 @@ radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
if (!entry) {
pthread_mutex_unlock(&cache->mutex);
return false;
+ } else {
+ size_t size = entry_size(entry);
+ struct cache_entry *new_entry = vk_alloc(&cache->alloc, size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
+ if (!new_entry) {
+ free(entry);
+ pthread_mutex_unlock(&cache->mutex);
+ return false;
+ }
+
+ memcpy(new_entry, entry, entry_size(entry));
+ free(entry);
+ entry = new_entry;
+
+ radv_pipeline_cache_add_entry(cache, new_entry);
}
}
@@ -246,76 +330,6 @@ radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
return true;
}
-
-static void
-radv_pipeline_cache_set_entry(struct radv_pipeline_cache *cache,
- struct cache_entry *entry)
-{
- const uint32_t mask = cache->table_size - 1;
- const uint32_t start = entry->sha1_dw[0];
-
- /* We'll always be able to insert when we get here. */
- assert(cache->kernel_count < cache->table_size / 2);
-
- for (uint32_t i = 0; i < cache->table_size; i++) {
- const uint32_t index = (start + i) & mask;
- if (!cache->hash_table[index]) {
- cache->hash_table[index] = entry;
- break;
- }
- }
-
- cache->total_size += entry_size(entry);
- cache->kernel_count++;
-}
-
-
-static VkResult
-radv_pipeline_cache_grow(struct radv_pipeline_cache *cache)
-{
- const uint32_t table_size = cache->table_size * 2;
- const uint32_t old_table_size = cache->table_size;
- const size_t byte_size = table_size * sizeof(cache->hash_table[0]);
- struct cache_entry **table;
- struct cache_entry **old_table = cache->hash_table;
-
- table = malloc(byte_size);
- if (table == NULL)
- return VK_ERROR_OUT_OF_HOST_MEMORY;
-
- cache->hash_table = table;
- cache->table_size = table_size;
- cache->kernel_count = 0;
- cache->total_size = 0;
-
- memset(cache->hash_table, 0, byte_size);
- for (uint32_t i = 0; i < old_table_size; i++) {
- struct cache_entry *entry = old_table[i];
- if (!entry)
- continue;
-
- radv_pipeline_cache_set_entry(cache, entry);
- }
-
- free(old_table);
-
- return VK_SUCCESS;
-}
-
-static void
-radv_pipeline_cache_add_entry(struct radv_pipeline_cache *cache,
- struct cache_entry *entry)
-{
- if (cache->kernel_count == cache->table_size / 2)
- radv_pipeline_cache_grow(cache);
-
- /* Failing to grow that hash table isn't fatal, but may mean we don't
- * have enough space to add this new kernel. Only add it if there's room.
- */
- if (cache->kernel_count < cache->table_size / 2)
- radv_pipeline_cache_set_entry(cache, entry);
-}
-
void
radv_pipeline_cache_insert_shaders(struct radv_device *device,
struct radv_pipeline_cache *cache,