diff options
author | Jason Ekstrand <[email protected]> | 2019-10-25 14:52:37 -0500 |
---|---|---|
committer | Jason Ekstrand <[email protected]> | 2019-10-31 13:46:08 +0000 |
commit | 6f4fa8176952bef3973e65aa3f37bd077fc10895 (patch) | |
tree | 1f2a137e73bbab847041b8e3db62b05510794178 | |
parent | b781c85c79944ccc0a6b0e49daae574672c6dd26 (diff) |
anv: Handle state pool relocations using "wrapper" BOs
Instead of depending on a mutable BO in the state pool for handling
growing state pools, add a concept of "wrapper" BOs which just wrap an
actual BO. This way, the wrapper can exist once for all of time and we
can put it in relocation lists even if the actual BO it references gets
swapped out.
Reviewed-by: Lionel Landwerlin <[email protected]>
-rw-r--r-- | src/intel/vulkan/anv_allocator.c | 21 | ||||
-rw-r--r-- | src/intel/vulkan/anv_batch_chain.c | 19 | ||||
-rw-r--r-- | src/intel/vulkan/anv_private.h | 30 |
3 files changed, 56 insertions, 14 deletions
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c index a39a1715d53..7cfca6ea808 100644 --- a/src/intel/vulkan/anv_allocator.c +++ b/src/intel/vulkan/anv_allocator.c @@ -432,11 +432,6 @@ anv_block_pool_init(struct anv_block_pool *pool, pool->start_address = gen_canonical_address(start_address); pool->map = NULL; - /* This pointer will always point to the first BO in the list */ - pool->bo = &pool->bos[0]; - - anv_bo_init(pool->bo, 0, 0); - if (!(pool->bo_flags & EXEC_OBJECT_PINNED)) { /* Just make it 2GB up-front. The Linux kernel won't actually back it * with pages until we either map and fault on one of them or we use @@ -445,7 +440,15 @@ anv_block_pool_init(struct anv_block_pool *pool, pool->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "block pool"); if (pool->fd == -1) return vk_error(VK_ERROR_INITIALIZATION_FAILED); + + anv_bo_init(&pool->wrapper_bo, 0, 0); + pool->wrapper_bo.is_wrapper = true; + pool->bo = &pool->wrapper_bo; } else { + /* This pointer will always point to the first BO in the list */ + anv_bo_init(&pool->bos[0], 0, 0); + pool->bo = &pool->bos[0]; + pool->fd = -1; } @@ -620,9 +623,11 @@ anv_block_pool_expand_range(struct anv_block_pool *pool, * it. Simply "allocate" it from our array if we didn't do it before. * The offset doesn't matter since we are not pinning the BO anyway. */ - if (pool->nbos == 0) + if (pool->nbos == 0) { + pool->wrapper_bo.map = &pool->bos[0]; pool->nbos++; - bo = pool->bo; + } + bo = pool->wrapper_bo.map; bo_size = size; bo_offset = 0; } @@ -776,8 +781,6 @@ anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state) result = anv_block_pool_expand_range(pool, center_bo_offset, size); - pool->bo->flags = pool->bo_flags; - done: pthread_mutex_unlock(&pool->device->mutex); diff --git a/src/intel/vulkan/anv_batch_chain.c b/src/intel/vulkan/anv_batch_chain.c index fcd8754ac6c..a677345c121 100644 --- a/src/intel/vulkan/anv_batch_chain.c +++ b/src/intel/vulkan/anv_batch_chain.c @@ -154,11 +154,12 @@ anv_reloc_list_add(struct anv_reloc_list *list, struct drm_i915_gem_relocation_entry *entry; int index; - uint64_t target_bo_offset = READ_ONCE(target_bo->offset); + struct anv_bo *unwrapped_target_bo = anv_bo_unwrap(target_bo); + uint64_t target_bo_offset = READ_ONCE(unwrapped_target_bo->offset); if (address_u64_out) *address_u64_out = target_bo_offset + delta; - if (target_bo->flags & EXEC_OBJECT_PINNED) { + if (unwrapped_target_bo->flags & EXEC_OBJECT_PINNED) { if (list->deps == NULL) { list->deps = _mesa_pointer_set_create(NULL); if (unlikely(list->deps == NULL)) @@ -1063,6 +1064,8 @@ anv_execbuf_add_bo(struct anv_execbuf *exec, { struct drm_i915_gem_exec_object2 *obj = NULL; + bo = anv_bo_unwrap(bo); + if (bo->index < exec->bo_count && exec->bos[bo->index] == bo) obj = &exec->objects[bo->index]; @@ -1219,7 +1222,7 @@ anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer, struct anv_reloc_list *list) { for (size_t i = 0; i < list->num_relocs; i++) - list->relocs[i].target_handle = list->reloc_bos[i]->index; + list->relocs[i].target_handle = anv_bo_unwrap(list->reloc_bos[i])->index; } static void @@ -1246,6 +1249,7 @@ adjust_relocations_to_state_pool(struct anv_state_pool *pool, struct anv_reloc_list *relocs, uint32_t last_pool_center_bo_offset) { + assert(!from_bo->is_wrapper); assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset); uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset; @@ -1284,8 +1288,10 @@ anv_reloc_list_apply(struct anv_device *device, struct anv_bo *bo, bool always_relocate) { + bo = anv_bo_unwrap(bo); + for (size_t i = 0; i < list->num_relocs; i++) { - struct anv_bo *target_bo = list->reloc_bos[i]; + struct anv_bo *target_bo = anv_bo_unwrap(list->reloc_bos[i]); if (list->relocs[i].presumed_offset == target_bo->offset && !always_relocate) continue; @@ -1354,6 +1360,7 @@ relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer, * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1. */ for (uint32_t i = 0; i < exec->bo_count; i++) { + assert(!exec->bos[i]->is_wrapper); if (exec->bos[i]->offset == (uint64_t)-1) return false; } @@ -1363,8 +1370,10 @@ relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer, * what address is actually written in the surface state object at any * given time. The only option is to always relocate them. */ + struct anv_bo *surface_state_bo = + anv_bo_unwrap(cmd_buffer->device->surface_state_pool.block_pool.bo); anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs, - cmd_buffer->device->surface_state_pool.block_pool.bo, + surface_state_bo, true /* always relocate surface states */); /* Since we own all of the batch buffers, we know what values are stored diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h index 512acb513f8..82cc6120316 100644 --- a/src/intel/vulkan/anv_private.h +++ b/src/intel/vulkan/anv_private.h @@ -615,6 +615,11 @@ struct anv_bo { uint64_t offset; uint64_t size; + + /* Map for internally mapped BOs. + * + * If ANV_BO_WRAPPER is set in flags, map points to the wrapped BO. + */ void *map; /** Flags to pass to the kernel through drm_i915_exec_object2::flags */ @@ -622,6 +627,15 @@ struct anv_bo { /** True if this BO may be shared with other processes */ bool is_external:1; + + /** True if this BO is a wrapper + * + * When set to true, none of the fields in this BO are meaningful except + * for anv_bo::is_wrapper and anv_bo::map which points to the actual BO. + * See also anv_bo_unwrap(). Wrapper BOs are not allowed when use_softpin + * is set in the physical device. + */ + bool is_wrapper:1; }; static inline void @@ -635,6 +649,15 @@ anv_bo_init(struct anv_bo *bo, uint32_t gem_handle, uint64_t size) bo->map = NULL; bo->flags = 0; bo->is_external = false; + bo->is_wrapper = false; +} + +static inline struct anv_bo * +anv_bo_unwrap(struct anv_bo *bo) +{ + while (bo->is_wrapper) + bo = bo->map; + return bo; } /* Represents a lock-free linked list of "free" things. This is used by @@ -679,6 +702,13 @@ struct anv_block_pool { uint64_t bo_flags; + /* Wrapper BO for use in relocation lists. This BO is simply a wrapper + * around the actual BO so that we grow the pool after the wrapper BO has + * been put in a relocation list. This is only used in the non-softpin + * case. + */ + struct anv_bo wrapper_bo; + struct anv_bo bos[ANV_MAX_BLOCK_POOL_BOS]; struct anv_bo *bo; uint32_t nbos; |