aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/amd/vulkan/radv_pipeline_cache.c54
-rw-r--r--src/amd/vulkan/radv_private.h3
2 files changed, 39 insertions, 18 deletions
diff --git a/src/amd/vulkan/radv_pipeline_cache.c b/src/amd/vulkan/radv_pipeline_cache.c
index 074c5acb7f4..b08395f5cfb 100644
--- a/src/amd/vulkan/radv_pipeline_cache.c
+++ b/src/amd/vulkan/radv_pipeline_cache.c
@@ -41,12 +41,31 @@ struct cache_entry {
char code[0];
};
+static void
+radv_pipeline_cache_lock(struct radv_pipeline_cache *cache)
+{
+ if (cache->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT)
+ return;
+
+ pthread_mutex_lock(&cache->mutex);
+}
+
+static void
+radv_pipeline_cache_unlock(struct radv_pipeline_cache *cache)
+{
+ if (cache->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT)
+ return;
+
+ pthread_mutex_unlock(&cache->mutex);
+}
+
void
radv_pipeline_cache_init(struct radv_pipeline_cache *cache,
struct radv_device *device)
{
cache->device = device;
pthread_mutex_init(&cache->mutex, NULL);
+ cache->flags = 0;
cache->modified = false;
cache->kernel_count = 0;
@@ -156,11 +175,11 @@ radv_pipeline_cache_search(struct radv_pipeline_cache *cache,
{
struct cache_entry *entry;
- pthread_mutex_lock(&cache->mutex);
+ radv_pipeline_cache_lock(cache);
entry = radv_pipeline_cache_search_unlocked(cache, sha1);
- pthread_mutex_unlock(&cache->mutex);
+ radv_pipeline_cache_unlock(cache);
return entry;
}
@@ -318,7 +337,7 @@ radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
*found_in_application_cache = false;
}
- pthread_mutex_lock(&cache->mutex);
+ radv_pipeline_cache_lock(cache);
entry = radv_pipeline_cache_search_unlocked(cache, sha1);
@@ -329,7 +348,7 @@ radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
* present in the cache.
*/
if (radv_is_cache_disabled(device) || !device->physical_device->disk_cache) {
- pthread_mutex_unlock(&cache->mutex);
+ radv_pipeline_cache_unlock(cache);
return false;
}
@@ -346,7 +365,7 @@ radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
}
if (!entry) {
- pthread_mutex_unlock(&cache->mutex);
+ radv_pipeline_cache_unlock(cache);
return false;
} else {
size_t size = entry_size(entry);
@@ -354,7 +373,7 @@ radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
if (!new_entry) {
free(entry);
- pthread_mutex_unlock(&cache->mutex);
+ radv_pipeline_cache_unlock(cache);
return false;
}
@@ -394,7 +413,7 @@ radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
p_atomic_inc(&entry->variants[i]->ref_count);
}
- pthread_mutex_unlock(&cache->mutex);
+ radv_pipeline_cache_unlock(cache);
return true;
}
@@ -408,7 +427,7 @@ radv_pipeline_cache_insert_shaders(struct radv_device *device,
if (!cache)
cache = device->mem_cache;
- pthread_mutex_lock(&cache->mutex);
+ radv_pipeline_cache_lock(cache);
struct cache_entry *entry = radv_pipeline_cache_search_unlocked(cache, sha1);
if (entry) {
for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
@@ -421,7 +440,7 @@ radv_pipeline_cache_insert_shaders(struct radv_device *device,
if (variants[i])
p_atomic_inc(&variants[i]->ref_count);
}
- pthread_mutex_unlock(&cache->mutex);
+ radv_pipeline_cache_unlock(cache);
return;
}
@@ -429,7 +448,7 @@ radv_pipeline_cache_insert_shaders(struct radv_device *device,
* present in the cache.
*/
if (radv_is_cache_disabled(device)) {
- pthread_mutex_unlock(&cache->mutex);
+ radv_pipeline_cache_unlock(cache);
return;
}
@@ -442,7 +461,7 @@ radv_pipeline_cache_insert_shaders(struct radv_device *device,
entry = vk_alloc(&cache->alloc, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
if (!entry) {
- pthread_mutex_unlock(&cache->mutex);
+ radv_pipeline_cache_unlock(cache);
return;
}
@@ -485,7 +504,7 @@ radv_pipeline_cache_insert_shaders(struct radv_device *device,
if (device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE &&
cache == device->mem_cache) {
vk_free2(&cache->alloc, NULL, entry);
- pthread_mutex_unlock(&cache->mutex);
+ radv_pipeline_cache_unlock(cache);
return;
}
@@ -503,7 +522,7 @@ radv_pipeline_cache_insert_shaders(struct radv_device *device,
radv_pipeline_cache_add_entry(cache, entry);
cache->modified = true;
- pthread_mutex_unlock(&cache->mutex);
+ radv_pipeline_cache_unlock(cache);
return;
}
@@ -587,6 +606,7 @@ VkResult radv_CreatePipelineCache(
cache->alloc = device->vk.alloc;
radv_pipeline_cache_init(cache, device);
+ cache->flags = pCreateInfo->flags;
if (pCreateInfo->initialDataSize > 0) {
radv_pipeline_cache_load(cache,
@@ -626,16 +646,16 @@ VkResult radv_GetPipelineCacheData(
struct cache_header *header;
VkResult result = VK_SUCCESS;
- pthread_mutex_lock(&cache->mutex);
+ radv_pipeline_cache_lock(cache);
const size_t size = sizeof(*header) + cache->total_size;
if (pData == NULL) {
- pthread_mutex_unlock(&cache->mutex);
+ radv_pipeline_cache_unlock(cache);
*pDataSize = size;
return VK_SUCCESS;
}
if (*pDataSize < sizeof(*header)) {
- pthread_mutex_unlock(&cache->mutex);
+ radv_pipeline_cache_unlock(cache);
*pDataSize = 0;
return VK_INCOMPLETE;
}
@@ -666,7 +686,7 @@ VkResult radv_GetPipelineCacheData(
}
*pDataSize = p - pData;
- pthread_mutex_unlock(&cache->mutex);
+ radv_pipeline_cache_unlock(cache);
return result;
}
diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h
index d655fa07d9b..1ac8c324604 100644
--- a/src/amd/vulkan/radv_private.h
+++ b/src/amd/vulkan/radv_private.h
@@ -386,8 +386,9 @@ struct cache_entry;
struct radv_pipeline_cache {
struct vk_object_base base;
- struct radv_device * device;
+ struct radv_device * device;
pthread_mutex_t mutex;
+ VkPipelineCacheCreateFlags flags;
uint32_t total_size;
uint32_t table_size;