summaryrefslogtreecommitdiffstats
path: root/src/intel/vulkan
diff options
context:
space:
mode:
authorRafael Antognolli <[email protected]>2019-01-23 12:36:39 -0800
committerRafael Antognolli <[email protected]>2019-01-24 17:39:40 -0800
commitf2ece26601adda221ce5ae69c984f8b79a9cb0d0 (patch)
tree308c1b280a96f4cf64ff2cd85eb687e03e7b5d00 /src/intel/vulkan
parentc1efa240c91e78385e8e433af134289b006016d0 (diff)
anv/allocator: Avoid race condition in anv_block_pool_map.
Accessing bo->map and then pool->center_bo_offset without a lock is racy. One way of avoiding such race condition is to store the bo->map + center_bo_offset into pool->map at the time the block pool is growing, which happens within a lock. v2: Only set pool->map if not using softpin (Jason). v3: Move things around and only update center_bo_offset if not using softpin too (Jason). Cc: Jason Ekstrand <[email protected]> Reported-by: Ian Romanick <[email protected]> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=109442 Fixes: fc3f58832015cbb177179e7f3420d3611479b4a9 Reviewed-by: Jason Ekstrand <[email protected]>
Diffstat (limited to 'src/intel/vulkan')
-rw-r--r--src/intel/vulkan/anv_allocator.c20
-rw-r--r--src/intel/vulkan/anv_private.h13
2 files changed, 27 insertions, 6 deletions
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index 89f26789c85..006175c8c65 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -436,7 +436,9 @@ anv_block_pool_init(struct anv_block_pool *pool,
pool->bo_flags = bo_flags;
pool->nbos = 0;
pool->size = 0;
+ pool->center_bo_offset = 0;
pool->start_address = gen_canonical_address(start_address);
+ pool->map = NULL;
/* This pointer will always point to the first BO in the list */
pool->bo = &pool->bos[0];
@@ -536,6 +538,7 @@ anv_block_pool_expand_range(struct anv_block_pool *pool,
if (map == MAP_FAILED)
return vk_errorf(pool->device->instance, pool->device,
VK_ERROR_MEMORY_MAP_FAILED, "gem mmap failed: %m");
+ assert(center_bo_offset == 0);
} else {
/* Just leak the old map until we destroy the pool. We can't munmap it
* without races or imposing locking on the block allocate fast path. On
@@ -549,6 +552,11 @@ anv_block_pool_expand_range(struct anv_block_pool *pool,
if (map == MAP_FAILED)
return vk_errorf(pool->device->instance, pool->device,
VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
+
+ /* Now that we mapped the new memory, we can write the new
+ * center_bo_offset back into pool and update pool->map. */
+ pool->center_bo_offset = center_bo_offset;
+ pool->map = map + center_bo_offset;
gem_handle = anv_gem_userptr(pool->device, map, size);
if (gem_handle == 0) {
munmap(map, size);
@@ -573,10 +581,6 @@ anv_block_pool_expand_range(struct anv_block_pool *pool,
if (!pool->device->info.has_llc)
anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_CACHED);
- /* Now that we successfull allocated everything, we can write the new
- * center_bo_offset back into pool. */
- pool->center_bo_offset = center_bo_offset;
-
/* For block pool BOs we have to be a bit careful about where we place them
* in the GTT. There are two documented workarounds for state base address
* placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset
@@ -670,8 +674,12 @@ anv_block_pool_get_bo(struct anv_block_pool *pool, int32_t *offset)
void*
anv_block_pool_map(struct anv_block_pool *pool, int32_t offset)
{
- struct anv_bo *bo = anv_block_pool_get_bo(pool, &offset);
- return bo->map + pool->center_bo_offset + offset;
+ if (pool->bo_flags & EXEC_OBJECT_PINNED) {
+ struct anv_bo *bo = anv_block_pool_get_bo(pool, &offset);
+ return bo->map + offset;
+ } else {
+ return pool->map + offset;
+ }
}
/** Grows and re-centers the block pool.
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index 3889065c93c..110b2ccf023 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -663,6 +663,19 @@ struct anv_block_pool {
*/
uint32_t center_bo_offset;
+ /* Current memory map of the block pool. This pointer may or may not
+ * point to the actual beginning of the block pool memory. If
+ * anv_block_pool_alloc_back has ever been called, then this pointer
+ * will point to the "center" position of the buffer and all offsets
+ * (negative or positive) given out by the block pool alloc functions
+ * will be valid relative to this pointer.
+ *
+ * In particular, map == bo.map + center_offset
+ *
+ * DO NOT access this pointer directly. Use anv_block_pool_map() instead,
+ * since it will handle the softpin case as well, where this points to NULL.
+ */
+ void *map;
int fd;
/**