diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/intel/vulkan/anv_allocator.c | 8 | ||||
-rw-r--r-- | src/intel/vulkan/anv_batch_chain.c | 40 | ||||
-rw-r--r-- | src/intel/vulkan/anv_private.h | 52 | ||||
-rw-r--r-- | src/intel/vulkan/anv_util.c | 75 | ||||
-rw-r--r-- | src/intel/vulkan/anv_wsi_wayland.c | 14 |
5 files changed, 35 insertions, 154 deletions
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c index d5c033c97e1..ae18f8e3837 100644 --- a/src/intel/vulkan/anv_allocator.c +++ b/src/intel/vulkan/anv_allocator.c @@ -272,7 +272,7 @@ anv_block_pool_init(struct anv_block_pool *pool, if (ftruncate(pool->fd, BLOCK_POOL_MEMFD_SIZE) == -1) return; - anv_vector_init(&pool->mmap_cleanups, + u_vector_init(&pool->mmap_cleanups, round_to_power_of_two(sizeof(struct anv_mmap_cleanup)), 128); pool->state.next = 0; @@ -289,14 +289,14 @@ anv_block_pool_finish(struct anv_block_pool *pool) { struct anv_mmap_cleanup *cleanup; - anv_vector_foreach(cleanup, &pool->mmap_cleanups) { + u_vector_foreach(cleanup, &pool->mmap_cleanups) { if (cleanup->map) munmap(cleanup->map, cleanup->size); if (cleanup->gem_handle) anv_gem_close(pool->device, cleanup->gem_handle); } - anv_vector_finish(&pool->mmap_cleanups); + u_vector_finish(&pool->mmap_cleanups); close(pool->fd); } @@ -420,7 +420,7 @@ anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state) assert(center_bo_offset >= pool->back_state.end); assert(size - center_bo_offset >= pool->state.end); - cleanup = anv_vector_add(&pool->mmap_cleanups); + cleanup = u_vector_add(&pool->mmap_cleanups); if (!cleanup) goto fail; *cleanup = ANV_MMAP_CLEANUP_INIT; diff --git a/src/intel/vulkan/anv_batch_chain.c b/src/intel/vulkan/anv_batch_chain.c index 95854f42c25..11bd4ef778d 100644 --- a/src/intel/vulkan/anv_batch_chain.c +++ b/src/intel/vulkan/anv_batch_chain.c @@ -434,7 +434,7 @@ anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer) { return (struct anv_address) { .bo = &cmd_buffer->device->surface_state_block_pool.bo, - .offset = *(int32_t *)anv_vector_head(&cmd_buffer->bt_blocks), + .offset = *(int32_t *)u_vector_head(&cmd_buffer->bt_blocks), }; } @@ -494,7 +494,7 @@ anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data) if (result != VK_SUCCESS) return result; - struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos); + struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos); if (seen_bbo == NULL) { anv_batch_bo_destroy(new_bbo, cmd_buffer); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); @@ -528,7 +528,7 @@ anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer, { struct anv_block_pool *block_pool = &cmd_buffer->device->surface_state_block_pool; - int32_t *bt_block = anv_vector_head(&cmd_buffer->bt_blocks); + int32_t *bt_block = u_vector_head(&cmd_buffer->bt_blocks); struct anv_state state; state.alloc_size = align_u32(entries * 4, 32); @@ -567,7 +567,7 @@ anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer) struct anv_block_pool *block_pool = &cmd_buffer->device->surface_state_block_pool; - int32_t *offset = anv_vector_add(&cmd_buffer->bt_blocks); + int32_t *offset = u_vector_add(&cmd_buffer->bt_blocks); if (offset == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); @@ -603,15 +603,15 @@ anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer) anv_batch_bo_start(batch_bo, &cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START_length * 4); - int success = anv_vector_init(&cmd_buffer->seen_bbos, + int success = u_vector_init(&cmd_buffer->seen_bbos, sizeof(struct anv_bo *), 8 * sizeof(struct anv_bo *)); if (!success) goto fail_batch_bo; - *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = batch_bo; + *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo; - success = anv_vector_init(&cmd_buffer->bt_blocks, sizeof(int32_t), + success = u_vector_init(&cmd_buffer->bt_blocks, sizeof(int32_t), 8 * sizeof(int32_t)); if (!success) goto fail_seen_bbos; @@ -630,9 +630,9 @@ anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer) return VK_SUCCESS; fail_bt_blocks: - anv_vector_finish(&cmd_buffer->bt_blocks); + u_vector_finish(&cmd_buffer->bt_blocks); fail_seen_bbos: - anv_vector_finish(&cmd_buffer->seen_bbos); + u_vector_finish(&cmd_buffer->seen_bbos); fail_batch_bo: anv_batch_bo_destroy(batch_bo, cmd_buffer); @@ -643,15 +643,15 @@ void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer) { int32_t *bt_block; - anv_vector_foreach(bt_block, &cmd_buffer->bt_blocks) { + u_vector_foreach(bt_block, &cmd_buffer->bt_blocks) { anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool, *bt_block); } - anv_vector_finish(&cmd_buffer->bt_blocks); + u_vector_finish(&cmd_buffer->bt_blocks); anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc); - anv_vector_finish(&cmd_buffer->seen_bbos); + u_vector_finish(&cmd_buffer->seen_bbos); /* Destroy all of the batch buffers */ list_for_each_entry_safe(struct anv_batch_bo, bbo, @@ -679,12 +679,12 @@ anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer) &cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START_length * 4); - while (anv_vector_length(&cmd_buffer->bt_blocks) > 1) { - int32_t *bt_block = anv_vector_remove(&cmd_buffer->bt_blocks); + while (u_vector_length(&cmd_buffer->bt_blocks) > 1) { + int32_t *bt_block = u_vector_remove(&cmd_buffer->bt_blocks); anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool, *bt_block); } - assert(anv_vector_length(&cmd_buffer->bt_blocks) == 1); + assert(u_vector_length(&cmd_buffer->bt_blocks) == 1); cmd_buffer->bt_next = 0; cmd_buffer->surface_relocs.num_relocs = 0; @@ -693,7 +693,7 @@ anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer) cmd_buffer->seen_bbos.head = 0; cmd_buffer->seen_bbos.tail = 0; - *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = + *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = anv_cmd_buffer_current_batch_bo(cmd_buffer); } @@ -760,7 +760,7 @@ anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer, struct list_head *list) { list_for_each_entry(struct anv_batch_bo, bbo, list, link) { - struct anv_batch_bo **bbo_ptr = anv_vector_add(&cmd_buffer->seen_bbos); + struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos); if (bbo_ptr == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); @@ -1064,7 +1064,7 @@ anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer) * relocations to the validate list. */ struct anv_batch_bo **bbo; - anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) { + u_vector_foreach(bbo, &cmd_buffer->seen_bbos) { adjust_relocations_to_block_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs, &(*bbo)->last_ss_pool_bo_offset); @@ -1100,14 +1100,14 @@ anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer) * the correct indices in the object array. We have to do this after we * reorder the list above as some of the indices may have changed. */ - anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) + u_vector_foreach(bbo, &cmd_buffer->seen_bbos) anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs); anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs); if (!cmd_buffer->device->info.has_llc) { __builtin_ia32_mfence(); - anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) { + u_vector_foreach(bbo, &cmd_buffer->seen_bbos) { for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE) __builtin_ia32_clflush((*bbo)->bo.map + i); } diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h index a4a1dd0ddc4..b51e95401a2 100644 --- a/src/intel/vulkan/anv_private.h +++ b/src/intel/vulkan/anv_private.h @@ -46,6 +46,7 @@ #include "brw_compiler.h" #include "util/macros.h" #include "util/list.h" +#include "util/u_vector.h" /* Pre-declarations needed for WSI entrypoints */ struct wl_surface; @@ -241,51 +242,6 @@ void anv_abortfv(const char *format, va_list va) anv_noreturn; * wraparound. */ -struct anv_vector { - uint32_t head; - uint32_t tail; - uint32_t element_size; - uint32_t size; - void *data; -}; - -int anv_vector_init(struct anv_vector *queue, uint32_t element_size, uint32_t size); -void *anv_vector_add(struct anv_vector *queue); -void *anv_vector_remove(struct anv_vector *queue); - -static inline int -anv_vector_length(struct anv_vector *queue) -{ - return (queue->head - queue->tail) / queue->element_size; -} - -static inline void * -anv_vector_head(struct anv_vector *vector) -{ - assert(vector->tail < vector->head); - return (void *)((char *)vector->data + - ((vector->head - vector->element_size) & - (vector->size - 1))); -} - -static inline void * -anv_vector_tail(struct anv_vector *vector) -{ - return (void *)((char *)vector->data + (vector->tail & (vector->size - 1))); -} - -static inline void -anv_vector_finish(struct anv_vector *queue) -{ - free(queue->data); -} - -#define anv_vector_foreach(elem, queue) \ - static_assert(__builtin_types_compatible_p(__typeof__(queue), struct anv_vector *), ""); \ - for (uint32_t __anv_vector_offset = (queue)->tail; \ - elem = (queue)->data + (__anv_vector_offset & ((queue)->size - 1)), __anv_vector_offset < (queue)->head; \ - __anv_vector_offset += (queue)->element_size) - struct anv_bo { uint32_t gem_handle; @@ -364,7 +320,7 @@ struct anv_block_pool { * Array of mmaps and gem handles owned by the block pool, reclaimed when * the block pool is destroyed. */ - struct anv_vector mmap_cleanups; + struct u_vector mmap_cleanups; uint32_t block_size; @@ -1236,13 +1192,13 @@ struct anv_cmd_buffer { * * initialized by anv_cmd_buffer_init_batch_bo_chain() */ - struct anv_vector seen_bbos; + struct u_vector seen_bbos; /* A vector of int32_t's for every block of binding tables. * * initialized by anv_cmd_buffer_init_batch_bo_chain() */ - struct anv_vector bt_blocks; + struct u_vector bt_blocks; uint32_t bt_next; struct anv_reloc_list surface_relocs; diff --git a/src/intel/vulkan/anv_util.c b/src/intel/vulkan/anv_util.c index 62f47051ec7..2972cd2b8c4 100644 --- a/src/intel/vulkan/anv_util.c +++ b/src/intel/vulkan/anv_util.c @@ -125,78 +125,3 @@ __vk_errorf(VkResult error, const char *file, int line, const char *format, ...) return error; } - -int -anv_vector_init(struct anv_vector *vector, uint32_t element_size, uint32_t size) -{ - assert(util_is_power_of_two(size)); - assert(element_size < size && util_is_power_of_two(element_size)); - - vector->head = 0; - vector->tail = 0; - vector->element_size = element_size; - vector->size = size; - vector->data = malloc(size); - - return vector->data != NULL; -} - -void * -anv_vector_add(struct anv_vector *vector) -{ - uint32_t offset, size, split, src_tail, dst_tail; - void *data; - - if (vector->head - vector->tail == vector->size) { - size = vector->size * 2; - data = malloc(size); - if (data == NULL) - return NULL; - src_tail = vector->tail & (vector->size - 1); - dst_tail = vector->tail & (size - 1); - if (src_tail == 0) { - /* Since we know that the vector is full, this means that it's - * linear from start to end so we can do one copy. - */ - memcpy(data + dst_tail, vector->data, vector->size); - } else { - /* In this case, the vector is split into two pieces and we have - * to do two copies. We have to be careful to make sure each - * piece goes to the right locations. Thanks to the change in - * size, it may or may not still wrap around. - */ - split = align_u32(vector->tail, vector->size); - assert(vector->tail <= split && split < vector->head); - memcpy(data + dst_tail, vector->data + src_tail, - split - vector->tail); - memcpy(data + (split & (size - 1)), vector->data, - vector->head - split); - } - free(vector->data); - vector->data = data; - vector->size = size; - } - - assert(vector->head - vector->tail < vector->size); - - offset = vector->head & (vector->size - 1); - vector->head += vector->element_size; - - return vector->data + offset; -} - -void * -anv_vector_remove(struct anv_vector *vector) -{ - uint32_t offset; - - if (vector->head == vector->tail) - return NULL; - - assert(vector->head - vector->tail <= vector->size); - - offset = vector->tail & (vector->size - 1); - vector->tail += vector->element_size; - - return vector->data + offset; -} diff --git a/src/intel/vulkan/anv_wsi_wayland.c b/src/intel/vulkan/anv_wsi_wayland.c index 71527d36d5f..5b1a6759ac6 100644 --- a/src/intel/vulkan/anv_wsi_wayland.c +++ b/src/intel/vulkan/anv_wsi_wayland.c @@ -37,7 +37,7 @@ struct wsi_wl_display { struct wl_drm * drm; /* Vector of VkFormats supported */ - struct anv_vector formats; + struct u_vector formats; uint32_t capabilities; }; @@ -57,7 +57,7 @@ wsi_wl_display_add_vk_format(struct wsi_wl_display *display, VkFormat format) { /* Don't add a format that's already in the list */ VkFormat *f; - anv_vector_foreach(f, &display->formats) + u_vector_foreach(f, &display->formats) if (*f == format) return; @@ -68,7 +68,7 @@ wsi_wl_display_add_vk_format(struct wsi_wl_display *display, VkFormat format) if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) return; - f = anv_vector_add(&display->formats); + f = u_vector_add(&display->formats); if (f) *f = format; } @@ -230,7 +230,7 @@ static const struct wl_registry_listener registry_listener = { static void wsi_wl_display_destroy(struct wsi_wayland *wsi, struct wsi_wl_display *display) { - anv_vector_finish(&display->formats); + u_vector_finish(&display->formats); if (display->drm) wl_drm_destroy(display->drm); anv_free(&wsi->physical_device->instance->alloc, display); @@ -250,7 +250,7 @@ wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display) display->display = wl_display; display->physical_device = wsi->physical_device; - if (!anv_vector_init(&display->formats, sizeof(VkFormat), 8)) + if (!u_vector_init(&display->formats, sizeof(VkFormat), 8)) goto fail; struct wl_registry *registry = wl_display_get_registry(wl_display); @@ -383,7 +383,7 @@ wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface, struct wsi_wl_display *display = wsi_wl_get_display(device, surface->display); - uint32_t count = anv_vector_length(&display->formats); + uint32_t count = u_vector_length(&display->formats); if (pSurfaceFormats == NULL) { *pSurfaceFormatCount = count; @@ -394,7 +394,7 @@ wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface, *pSurfaceFormatCount = count; VkFormat *f; - anv_vector_foreach(f, &display->formats) { + u_vector_foreach(f, &display->formats) { *(pSurfaceFormats++) = (VkSurfaceFormatKHR) { .format = *f, /* TODO: We should get this from the compositor somehow */ |