aboutsummaryrefslogtreecommitdiffstats
path: root/src/intel/vulkan/anv_allocator.c
diff options
context:
space:
mode:
authorRafael Antognolli <[email protected]>2018-11-21 11:36:49 -0800
committerRafael Antognolli <[email protected]>2019-01-17 15:08:02 -0800
commite3dc56d7311c82d6dbda7948f9ca4345d44f8bdb (patch)
treeed5e3729b7f1f0af2e5712a0a5d2c65421e916fb /src/intel/vulkan/anv_allocator.c
parentfc3f58832015cbb177179e7f3420d3611479b4a9 (diff)
anv: Update usage of block_pool->bo.
Change block_pool->bo to be a pointer, and update its usage everywhere. This makes it simpler to switch it later to a list of BOs. v3: - Use a static "bos" field in the struct, instead of malloc'ing it. This will be later changed to a fixed length array of BOs. Reviewed-by: Jason Ekstrand <[email protected]>
Diffstat (limited to 'src/intel/vulkan/anv_allocator.c')
-rw-r--r--src/intel/vulkan/anv_allocator.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index 35ef41f5bb9..44281c0b77f 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -436,7 +436,9 @@ anv_block_pool_init(struct anv_block_pool *pool,
pool->bo_flags = bo_flags;
pool->start_address = gen_canonical_address(start_address);
- anv_bo_init(&pool->bo, 0, 0);
+ pool->bo = &pool->bos;
+
+ anv_bo_init(pool->bo, 0, 0);
pool->fd = memfd_create("block pool", MFD_CLOEXEC);
if (pool->fd == -1)
@@ -584,13 +586,13 @@ anv_block_pool_expand_range(struct anv_block_pool *pool,
* the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
* hard work for us.
*/
- anv_bo_init(&pool->bo, gem_handle, size);
+ anv_bo_init(pool->bo, gem_handle, size);
if (pool->bo_flags & EXEC_OBJECT_PINNED) {
- pool->bo.offset = pool->start_address + BLOCK_POOL_MEMFD_CENTER -
+ pool->bo->offset = pool->start_address + BLOCK_POOL_MEMFD_CENTER -
center_bo_offset;
}
- pool->bo.flags = pool->bo_flags;
- pool->bo.map = map;
+ pool->bo->flags = pool->bo_flags;
+ pool->bo->map = map;
return VK_SUCCESS;
}
@@ -604,7 +606,7 @@ anv_block_pool_expand_range(struct anv_block_pool *pool,
void*
anv_block_pool_map(struct anv_block_pool *pool, int32_t offset)
{
- return pool->bo.map + pool->center_bo_offset + offset;
+ return pool->bo->map + pool->center_bo_offset + offset;
}
/** Grows and re-centers the block pool.
@@ -656,7 +658,7 @@ anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
assert(state == &pool->state || back_used > 0);
- uint32_t old_size = pool->bo.size;
+ uint32_t old_size = pool->bo->size;
/* The block pool is always initialized to a nonzero size and this function
* is always called after initialization.
@@ -682,7 +684,7 @@ anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
while (size < back_required + front_required)
size *= 2;
- assert(size > pool->bo.size);
+ assert(size > pool->bo->size);
/* We compute a new center_bo_offset such that, when we double the size
* of the pool, we maintain the ratio of how much is used by each side.
@@ -719,7 +721,7 @@ anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
result = anv_block_pool_expand_range(pool, center_bo_offset, size);
- pool->bo.flags = pool->bo_flags;
+ pool->bo->flags = pool->bo_flags;
done:
pthread_mutex_unlock(&pool->device->mutex);
@@ -730,7 +732,7 @@ done:
* needs to do so in order to maintain its concurrency model.
*/
if (state == &pool->state) {
- return pool->bo.size - pool->center_bo_offset;
+ return pool->bo->size - pool->center_bo_offset;
} else {
assert(pool->center_bo_offset > 0);
return pool->center_bo_offset;