summaryrefslogtreecommitdiffstats
path: root/src/amd/vulkan/radv_pipeline_cache.c
diff options
context:
space:
mode:
authorAlex Smith <[email protected]>2017-10-19 11:49:39 +0100
committerBas Nieuwenhuizen <[email protected]>2017-10-21 03:52:43 +0200
commit0fdd531457ecaba263e6a27e030d451774f54b32 (patch)
tree4d00b02dd9702429f30bb0c24adc379170587cef /src/amd/vulkan/radv_pipeline_cache.c
parentc71d44c7f845a9ef23251d9a0c95267f4a711578 (diff)
radv: Fix pipeline cache locking issues
Need to lock around the whole process of retrieving cached shaders, and around GetPipelineCacheData. This fixes GPU hangs observed when creating multiple pipelines in parallel, which appeared to be due to invalid shader code being pulled from the cache. Signed-off-by: Alex Smith <[email protected]> Reviewed-by: Bas Nieuwenhuizen <[email protected]>
Diffstat (limited to 'src/amd/vulkan/radv_pipeline_cache.c')
-rw-r--r--src/amd/vulkan/radv_pipeline_cache.c30
1 files changed, 23 insertions, 7 deletions
diff --git a/src/amd/vulkan/radv_pipeline_cache.c b/src/amd/vulkan/radv_pipeline_cache.c
index 034dc35af87..a75356b8229 100644
--- a/src/amd/vulkan/radv_pipeline_cache.c
+++ b/src/amd/vulkan/radv_pipeline_cache.c
@@ -177,15 +177,20 @@ radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
struct radv_shader_variant **variants)
{
struct cache_entry *entry;
- if (cache)
- entry = radv_pipeline_cache_search(cache, sha1);
- else
- entry = radv_pipeline_cache_search(device->mem_cache, sha1);
+
+ if (!cache)
+ cache = device->mem_cache;
+
+ pthread_mutex_lock(&cache->mutex);
+
+ entry = radv_pipeline_cache_search_unlocked(cache, sha1);
if (!entry) {
if (!device->physical_device->disk_cache ||
- (device->instance->debug_flags & RADV_DEBUG_NO_CACHE))
+ (device->instance->debug_flags & RADV_DEBUG_NO_CACHE)) {
+ pthread_mutex_unlock(&cache->mutex);
return false;
+ }
uint8_t disk_sha1[20];
disk_cache_compute_key(device->physical_device->disk_cache,
@@ -193,8 +198,10 @@ radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
entry = (struct cache_entry *)
disk_cache_get(device->physical_device->disk_cache,
disk_sha1, NULL);
- if (!entry)
+ if (!entry) {
+ pthread_mutex_unlock(&cache->mutex);
return false;
+ }
}
char *p = entry->code;
@@ -204,8 +211,10 @@ radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
struct cache_entry_variant_info info;
variant = calloc(1, sizeof(struct radv_shader_variant));
- if (!variant)
+ if (!variant) {
+ pthread_mutex_unlock(&cache->mutex);
return false;
+ }
memcpy(&info, p, sizeof(struct cache_entry_variant_info));
p += sizeof(struct cache_entry_variant_info);
@@ -231,6 +240,7 @@ radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
p_atomic_inc(&entry->variants[i]->ref_count);
memcpy(variants, entry->variants, sizeof(entry->variants));
+ pthread_mutex_unlock(&cache->mutex);
return true;
}
@@ -509,12 +519,17 @@ VkResult radv_GetPipelineCacheData(
RADV_FROM_HANDLE(radv_pipeline_cache, cache, _cache);
struct cache_header *header;
VkResult result = VK_SUCCESS;
+
+ pthread_mutex_lock(&cache->mutex);
+
const size_t size = sizeof(*header) + cache->total_size;
if (pData == NULL) {
+ pthread_mutex_unlock(&cache->mutex);
*pDataSize = size;
return VK_SUCCESS;
}
if (*pDataSize < sizeof(*header)) {
+ pthread_mutex_unlock(&cache->mutex);
*pDataSize = 0;
return VK_INCOMPLETE;
}
@@ -545,6 +560,7 @@ VkResult radv_GetPipelineCacheData(
}
*pDataSize = p - pData;
+ pthread_mutex_unlock(&cache->mutex);
return result;
}