diff options
author | Chia-I Wu <[email protected]> | 2019-01-09 14:16:01 -0800 |
---|---|---|
committer | Chia-I Wu <[email protected]> | 2019-03-11 10:01:41 -0700 |
commit | d30baaaba645dcc1b4b4e16c4914554b896ea54a (patch) | |
tree | faacc671e482deb8614df319722b4eedc8eace50 /src | |
parent | 6401ad389e9cec6f523e4e4e989c190fb25a8dfc (diff) |
turnip: add .clang-format
Add and apply .clang-format.
Diffstat (limited to 'src')
-rw-r--r-- | src/freedreno/vulkan/.clang-format | 31 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_android.c | 116 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_cmd_buffer.c | 111 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_descriptor_set.c | 273 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_descriptor_set.h | 8 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_device.c | 563 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_drm.c | 37 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_formats.c | 235 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_image.c | 76 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_meta_blit.c | 5 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_meta_clear.c | 4 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_meta_copy.c | 25 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_meta_resolve.c | 7 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_pass.c | 146 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_pipeline.c | 25 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_pipeline_cache.c | 46 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_private.h | 147 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_query.c | 15 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_util.c | 12 | ||||
-rw-r--r-- | src/freedreno/vulkan/vk_format.h | 238 |
20 files changed, 1027 insertions, 1093 deletions
diff --git a/src/freedreno/vulkan/.clang-format b/src/freedreno/vulkan/.clang-format new file mode 100644 index 00000000000..7ff44c54085 --- /dev/null +++ b/src/freedreno/vulkan/.clang-format @@ -0,0 +1,31 @@ +BasedOnStyle: LLVM +AlwaysBreakAfterReturnType: TopLevel +BinPackParameters: false +BraceWrapping: + AfterControlStatement: false + AfterEnum: true + AfterFunction: true + AfterStruct: true + BeforeElse: false + SplitEmptyFunction: true +BreakBeforeBraces: Custom +ColumnLimit: 78 +ContinuationIndentWidth: 3 +Cpp11BracedListStyle: false +IncludeBlocks: Regroup +IncludeCategories: + - Regex: '^"tu_private.h"$' + Priority: 0 + - Regex: '^"(drm/|tu_)' + Priority: 4 + - Regex: '^"(c11/|compiler/|main/|nir/|spirv/|util/|vk_)' + Priority: 3 + - Regex: '^<(vulkan/)' + Priority: 2 + - Regex: '.*' + Priority: 1 +IndentWidth: 3 +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyExcessCharacter: 100 +SpaceAfterCStyleCast: true +SpaceBeforeCpp11BracedList: true diff --git a/src/freedreno/vulkan/tu_android.c b/src/freedreno/vulkan/tu_android.c index a88ed1418d8..533a5bea9df 100644 --- a/src/freedreno/vulkan/tu_android.c +++ b/src/freedreno/vulkan/tu_android.c @@ -17,19 +17,20 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ +#include "tu_private.h" + #include <hardware/gralloc.h> #include <hardware/hardware.h> #include <hardware/hwvulkan.h> #include <libsync.h> + #include <vulkan/vk_android_native_buffer.h> #include <vulkan/vk_icd.h> -#include "tu_private.h" - static int tu_hal_open(const struct hw_module_t *mod, const char *id, @@ -120,18 +121,16 @@ tu_image_from_gralloc(VkDevice device_h, VkResult result; result = tu_image_create( - device_h, - &(struct tu_image_create_info){ - .vk_info = base_info, .scanout = true, .no_metadata_planes = true }, - alloc, - &image_h); + device_h, + &(struct tu_image_create_info) { + .vk_info = base_info, .scanout = true, .no_metadata_planes = true }, + alloc, &image_h); if (result != VK_SUCCESS) return result; if (gralloc_info->handle->numFds != 1) { - return vk_errorf(device->instance, - VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR, + return vk_errorf(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR, "VkNativeBufferANDROID::handle::numFds is %d, " "expected 1", gralloc_info->handle->numFds); @@ -163,12 +162,11 @@ tu_image_from_gralloc(VkDevice device_h, /* Find the first VRAM memory type, or GART for PRIME images. */ int memory_type_index = -1; for (int i = 0; - i < device->physical_device->memory_properties.memoryTypeCount; - ++i) { + i < device->physical_device->memory_properties.memoryTypeCount; ++i) { bool is_local = - !!(device->physical_device->memory_properties.memoryTypes[i] - .propertyFlags & - VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + !!(device->physical_device->memory_properties.memoryTypes[i] + .propertyFlags & + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); if (is_local) { memory_type_index = i; break; @@ -180,15 +178,14 @@ tu_image_from_gralloc(VkDevice device_h, memory_type_index = 0; result = - tu_AllocateMemory(device_h, - &(VkMemoryAllocateInfo){ - .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, - .pNext = &import_info, - .allocationSize = image->size, - .memoryTypeIndex = memory_type_index, + tu_AllocateMemory(device_h, + &(VkMemoryAllocateInfo) { + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .pNext = &import_info, + .allocationSize = image->size, + .memoryTypeIndex = memory_type_index, }, - alloc, - &memory_h); + alloc, &memory_h); if (result != VK_SUCCESS) goto fail_create_image; @@ -248,42 +245,39 @@ tu_GetSwapchainGrallocUsageANDROID(VkDevice device_h, /* Check that requested format and usage are supported. */ result = tu_GetPhysicalDeviceImageFormatProperties2( - phys_dev_h, &image_format_info, &image_format_props); + phys_dev_h, &image_format_info, &image_format_props); if (result != VK_SUCCESS) { - return vk_errorf(device->instance, - result, + return vk_errorf(device->instance, result, "tu_GetPhysicalDeviceImageFormatProperties2 failed " "inside %s", __func__); } - if (unmask32(&imageUsage, - VK_IMAGE_USAGE_TRANSFER_DST_BIT | - VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) + if (unmask32(&imageUsage, VK_IMAGE_USAGE_TRANSFER_DST_BIT | + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) *grallocUsage |= GRALLOC_USAGE_HW_RENDER; - if (unmask32(&imageUsage, - VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT | - VK_IMAGE_USAGE_STORAGE_BIT | - VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) + if (unmask32(&imageUsage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | + VK_IMAGE_USAGE_SAMPLED_BIT | + VK_IMAGE_USAGE_STORAGE_BIT | + VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) *grallocUsage |= GRALLOC_USAGE_HW_TEXTURE; /* All VkImageUsageFlags not explicitly checked here are unsupported for * gralloc swapchains. */ if (imageUsage != 0) { - return vk_errorf(device->instance, - VK_ERROR_FORMAT_NOT_SUPPORTED, + return vk_errorf(device->instance, VK_ERROR_FORMAT_NOT_SUPPORTED, "unsupported VkImageUsageFlags(0x%x) for gralloc " "swapchain", imageUsage); } /* - * FINISHME: Advertise all display-supported formats. Mostly - * DRM_FORMAT_ARGB2101010 and DRM_FORMAT_ABGR2101010, but need to check - * what we need for 30-bit colors. - */ + * FINISHME: Advertise all display-supported formats. Mostly + * DRM_FORMAT_ARGB2101010 and DRM_FORMAT_ABGR2101010, but need to check + * what we need for 30-bit colors. + */ if (format == VK_FORMAT_B8G8R8A8_UNORM || format == VK_FORMAT_B5G6R5_UNORM_PACK16) { *grallocUsage |= GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER | @@ -307,27 +301,25 @@ tu_AcquireImageANDROID(VkDevice device, if (semaphore != VK_NULL_HANDLE) { int semaphore_fd = - nativeFenceFd >= 0 ? dup(nativeFenceFd) : nativeFenceFd; + nativeFenceFd >= 0 ? dup(nativeFenceFd) : nativeFenceFd; semaphore_result = tu_ImportSemaphoreFdKHR( - device, - &(VkImportSemaphoreFdInfoKHR){ - .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, - .flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR, - .fd = semaphore_fd, - .semaphore = semaphore, - }); + device, &(VkImportSemaphoreFdInfoKHR) { + .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, + .flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR, + .fd = semaphore_fd, + .semaphore = semaphore, + }); } if (fence != VK_NULL_HANDLE) { int fence_fd = nativeFenceFd >= 0 ? dup(nativeFenceFd) : nativeFenceFd; fence_result = tu_ImportFenceFdKHR( - device, - &(VkImportFenceFdInfoKHR){ - .sType = VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR, - .flags = VK_FENCE_IMPORT_TEMPORARY_BIT_KHR, - .fd = fence_fd, - .fence = fence, - }); + device, &(VkImportFenceFdInfoKHR) { + .sType = VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR, + .flags = VK_FENCE_IMPORT_TEMPORARY_BIT_KHR, + .fd = fence_fd, + .fence = fence, + }); } close(nativeFenceFd); @@ -358,13 +350,13 @@ tu_QueueSignalReleaseImageANDROID(VkQueue _queue, for (uint32_t i = 0; i < waitSemaphoreCount; ++i) { int tmp_fd; result = tu_GetSemaphoreFdKHR( - tu_device_to_handle(queue->device), - &(VkSemaphoreGetFdInfoKHR){ - .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, - .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR, - .semaphore = pWaitSemaphores[i], - }, - &tmp_fd); + tu_device_to_handle(queue->device), + &(VkSemaphoreGetFdInfoKHR) { + .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, + .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR, + .semaphore = pWaitSemaphores[i], + }, + &tmp_fd); if (result != VK_SUCCESS) { if (fd >= 0) close(fd); diff --git a/src/freedreno/vulkan/tu_cmd_buffer.c b/src/freedreno/vulkan/tu_cmd_buffer.c index 5c83bca5538..60002a0a708 100644 --- a/src/freedreno/vulkan/tu_cmd_buffer.c +++ b/src/freedreno/vulkan/tu_cmd_buffer.c @@ -21,11 +21,12 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #include "tu_private.h" + #include "vk_format.h" const struct tu_dynamic_state default_dynamic_state = { @@ -85,22 +86,19 @@ tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer, dest->discard_rectangle.count = src->discard_rectangle.count; if (copy_mask & TU_DYNAMIC_VIEWPORT) { - if (memcmp(&dest->viewport.viewports, - &src->viewport.viewports, + if (memcmp(&dest->viewport.viewports, &src->viewport.viewports, src->viewport.count * sizeof(VkViewport))) { - typed_memcpy(dest->viewport.viewports, - src->viewport.viewports, + typed_memcpy(dest->viewport.viewports, src->viewport.viewports, src->viewport.count); dest_mask |= TU_DYNAMIC_VIEWPORT; } } if (copy_mask & TU_DYNAMIC_SCISSOR) { - if (memcmp(&dest->scissor.scissors, - &src->scissor.scissors, + if (memcmp(&dest->scissor.scissors, &src->scissor.scissors, src->scissor.count * sizeof(VkRect2D))) { - typed_memcpy( - dest->scissor.scissors, src->scissor.scissors, src->scissor.count); + typed_memcpy(dest->scissor.scissors, src->scissor.scissors, + src->scissor.count); dest_mask |= TU_DYNAMIC_SCISSOR; } } @@ -113,16 +111,15 @@ tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer, } if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) { - if (memcmp( - &dest->depth_bias, &src->depth_bias, sizeof(src->depth_bias))) { + if (memcmp(&dest->depth_bias, &src->depth_bias, + sizeof(src->depth_bias))) { dest->depth_bias = src->depth_bias; dest_mask |= TU_DYNAMIC_DEPTH_BIAS; } } if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) { - if (memcmp(&dest->blend_constants, - &src->blend_constants, + if (memcmp(&dest->blend_constants, &src->blend_constants, sizeof(src->blend_constants))) { typed_memcpy(dest->blend_constants, src->blend_constants, 4); dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS; @@ -130,8 +127,7 @@ tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer, } if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) { - if (memcmp(&dest->depth_bounds, - &src->depth_bounds, + if (memcmp(&dest->depth_bounds, &src->depth_bounds, sizeof(src->depth_bounds))) { dest->depth_bounds = src->depth_bounds; dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS; @@ -139,8 +135,7 @@ tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer, } if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) { - if (memcmp(&dest->stencil_compare_mask, - &src->stencil_compare_mask, + if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask, sizeof(src->stencil_compare_mask))) { dest->stencil_compare_mask = src->stencil_compare_mask; dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK; @@ -148,8 +143,7 @@ tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer, } if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) { - if (memcmp(&dest->stencil_write_mask, - &src->stencil_write_mask, + if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask, sizeof(src->stencil_write_mask))) { dest->stencil_write_mask = src->stencil_write_mask; dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK; @@ -157,8 +151,7 @@ tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer, } if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) { - if (memcmp(&dest->stencil_reference, - &src->stencil_reference, + if (memcmp(&dest->stencil_reference, &src->stencil_reference, sizeof(src->stencil_reference))) { dest->stencil_reference = src->stencil_reference; dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE; @@ -184,8 +177,8 @@ tu_create_cmd_buffer(struct tu_device *device, VkCommandBuffer *pCommandBuffer) { struct tu_cmd_buffer *cmd_buffer; - cmd_buffer = vk_zalloc( - &pool->alloc, sizeof(*cmd_buffer), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (cmd_buffer == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -255,7 +248,7 @@ tu_AllocateCommandBuffers(VkDevice _device, if (!list_empty(&pool->free_cmd_buffers)) { struct tu_cmd_buffer *cmd_buffer = list_first_entry( - &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link); + &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link); list_del(&cmd_buffer->pool_link); list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers); @@ -266,16 +259,16 @@ tu_AllocateCommandBuffers(VkDevice _device, pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer); } else { - result = tu_create_cmd_buffer( - device, pool, pAllocateInfo->level, &pCommandBuffers[i]); + result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level, + &pCommandBuffers[i]); } if (result != VK_SUCCESS) break; } if (result != VK_SUCCESS) { - tu_FreeCommandBuffers( - _device, pAllocateInfo->commandPool, i, pCommandBuffers); + tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i, + pCommandBuffers); /* From the Vulkan 1.0.66 spec: * @@ -286,8 +279,7 @@ tu_AllocateCommandBuffers(VkDevice _device, * command, set all entries of the pCommandBuffers array to * NULL and return the error." */ - memset(pCommandBuffers, - 0, + memset(pCommandBuffers, 0, sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount); } @@ -344,11 +336,11 @@ tu_BeginCommandBuffer(VkCommandBuffer commandBuffer, /* setup initial configuration into command buffer */ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) { switch (cmd_buffer->queue_family_index) { - case TU_QUEUE_GENERAL: - /* init */ - break; - default: - break; + case TU_QUEUE_GENERAL: + /* init */ + break; + default: + break; } } @@ -492,10 +484,7 @@ tu_CreateCommandPool(VkDevice _device, TU_FROM_HANDLE(tu_device, device, _device); struct tu_cmd_pool *pool; - pool = vk_alloc2(&device->alloc, - pAllocator, - sizeof(*pool), - 8, + pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (pool == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -526,14 +515,14 @@ tu_DestroyCommandPool(VkDevice _device, if (!pool) return; - list_for_each_entry_safe( - struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers, pool_link) + list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer, + &pool->cmd_buffers, pool_link) { tu_cmd_buffer_destroy(cmd_buffer); } - list_for_each_entry_safe( - struct tu_cmd_buffer, cmd_buffer, &pool->free_cmd_buffers, pool_link) + list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer, + &pool->free_cmd_buffers, pool_link) { tu_cmd_buffer_destroy(cmd_buffer); } @@ -549,8 +538,8 @@ tu_ResetCommandPool(VkDevice device, TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool); VkResult result; - list_for_each_entry( - struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers, pool_link) + list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers, + pool_link) { result = tu_reset_cmd_buffer(cmd_buffer); if (result != VK_SUCCESS) @@ -570,8 +559,8 @@ tu_TrimCommandPool(VkDevice device, if (!pool) return; - list_for_each_entry_safe( - struct tu_cmd_buffer, cmd_buffer, &pool->free_cmd_buffers, pool_link) + list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer, + &pool->free_cmd_buffers, pool_link) { tu_cmd_buffer_destroy(cmd_buffer); } @@ -589,8 +578,8 @@ tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBeginInfo, const VkSubpassBeginInfoKHR *pSubpassBeginInfo) { - tu_CmdBeginRenderPass( - commandBuffer, pRenderPassBeginInfo, pSubpassBeginInfo->contents); + tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo, + pSubpassBeginInfo->contents); } void @@ -861,14 +850,9 @@ tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer, info.pEvents = NULL; info.srcStageMask = srcStageMask; - tu_barrier(cmd_buffer, - memoryBarrierCount, - pMemoryBarriers, - bufferMemoryBarrierCount, - pBufferMemoryBarriers, - imageMemoryBarrierCount, - pImageMemoryBarriers, - &info); + tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers, + bufferMemoryBarrierCount, pBufferMemoryBarriers, + imageMemoryBarrierCount, pImageMemoryBarriers, &info); } static void @@ -921,14 +905,9 @@ tu_CmdWaitEvents(VkCommandBuffer commandBuffer, info.pEvents = pEvents; info.srcStageMask = 0; - tu_barrier(cmd_buffer, - memoryBarrierCount, - pMemoryBarriers, - bufferMemoryBarrierCount, - pBufferMemoryBarriers, - imageMemoryBarrierCount, - pImageMemoryBarriers, - &info); + tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers, + bufferMemoryBarrierCount, pBufferMemoryBarriers, + imageMemoryBarrierCount, pImageMemoryBarriers, &info); } void diff --git a/src/freedreno/vulkan/tu_descriptor_set.c b/src/freedreno/vulkan/tu_descriptor_set.c index 7d3b8a6b7b1..2bfbf327592 100644 --- a/src/freedreno/vulkan/tu_descriptor_set.c +++ b/src/freedreno/vulkan/tu_descriptor_set.c @@ -18,16 +18,17 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ +#include "tu_private.h" + #include <assert.h> #include <fcntl.h> #include <stdbool.h> #include <string.h> #include <unistd.h> -#include "tu_private.h" #include "util/mesa-sha1.h" #include "vk_util.h" @@ -35,9 +36,9 @@ static int binding_compare(const void *av, const void *bv) { const VkDescriptorSetLayoutBinding *a = - (const VkDescriptorSetLayoutBinding *)av; + (const VkDescriptorSetLayoutBinding *) av; const VkDescriptorSetLayoutBinding *b = - (const VkDescriptorSetLayoutBinding *)bv; + (const VkDescriptorSetLayoutBinding *) bv; return (a->binding < b->binding) ? -1 : (a->binding > b->binding) ? 1 : 0; } @@ -47,16 +48,14 @@ create_sorted_bindings(const VkDescriptorSetLayoutBinding *bindings, unsigned count) { VkDescriptorSetLayoutBinding *sorted_bindings = - malloc(count * sizeof(VkDescriptorSetLayoutBinding)); + malloc(count * sizeof(VkDescriptorSetLayoutBinding)); if (!sorted_bindings) return NULL; - memcpy( - sorted_bindings, bindings, count * sizeof(VkDescriptorSetLayoutBinding)); + memcpy(sorted_bindings, bindings, + count * sizeof(VkDescriptorSetLayoutBinding)); - qsort(sorted_bindings, - count, - sizeof(VkDescriptorSetLayoutBinding), + qsort(sorted_bindings, count, sizeof(VkDescriptorSetLayoutBinding), binding_compare); return sorted_bindings; @@ -75,8 +74,9 @@ tu_CreateDescriptorSetLayout( assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO); const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT *variable_flags = - vk_find_struct_const(pCreateInfo->pNext, - DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT); + vk_find_struct_const( + pCreateInfo->pNext, + DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT); uint32_t max_binding = 0; uint32_t immutable_sampler_count = 0; @@ -87,24 +87,24 @@ tu_CreateDescriptorSetLayout( } uint32_t samplers_offset = - sizeof(struct tu_descriptor_set_layout) + - (max_binding + 1) * sizeof(set_layout->binding[0]); + sizeof(struct tu_descriptor_set_layout) + + (max_binding + 1) * sizeof(set_layout->binding[0]); size_t size = - samplers_offset + immutable_sampler_count * 4 * sizeof(uint32_t); + samplers_offset + immutable_sampler_count * 4 * sizeof(uint32_t); - set_layout = vk_alloc2( - &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + set_layout = vk_alloc2(&device->alloc, pAllocator, size, 8, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!set_layout) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); set_layout->flags = pCreateInfo->flags; /* We just allocate all the samplers at the end of the struct */ - uint32_t *samplers = (uint32_t *)&set_layout->binding[max_binding + 1]; + uint32_t *samplers = (uint32_t *) &set_layout->binding[max_binding + 1]; (void) samplers; /* TODO: Use me */ - VkDescriptorSetLayoutBinding *bindings = - create_sorted_bindings(pCreateInfo->pBindings, pCreateInfo->bindingCount); + VkDescriptorSetLayoutBinding *bindings = create_sorted_bindings( + pCreateInfo->pBindings, pCreateInfo->bindingCount); if (!bindings) { vk_free2(&device->alloc, pAllocator, set_layout); return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -116,8 +116,8 @@ tu_CreateDescriptorSetLayout( set_layout->has_immutable_samplers = false; set_layout->size = 0; - memset( - set_layout->binding, 0, size - sizeof(struct tu_descriptor_set_layout)); + memset(set_layout->binding, 0, + size - sizeof(struct tu_descriptor_set_layout)); uint32_t buffer_count = 0; uint32_t dynamic_offset_count = 0; @@ -129,45 +129,45 @@ tu_CreateDescriptorSetLayout( unsigned binding_buffer_count = 0; switch (binding->descriptorType) { - case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: - case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: - assert(!(pCreateInfo->flags & - VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR)); - set_layout->binding[b].dynamic_offset_count = 1; - set_layout->dynamic_shader_stages |= binding->stageFlags; - set_layout->binding[b].size = 0; - binding_buffer_count = 1; - alignment = 1; - break; - case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: - case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: - case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: - case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: - set_layout->binding[b].size = 16; - binding_buffer_count = 1; - alignment = 16; - break; - case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: - case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: - case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: - /* main descriptor + fmask descriptor */ - set_layout->binding[b].size = 64; - binding_buffer_count = 1; - alignment = 32; - break; - case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: - /* main descriptor + fmask descriptor + sampler */ - set_layout->binding[b].size = 96; - binding_buffer_count = 1; - alignment = 32; - break; - case VK_DESCRIPTOR_TYPE_SAMPLER: - set_layout->binding[b].size = 16; - alignment = 16; - break; - default: - unreachable("unknown descriptor type\n"); - break; + case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: + case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: + assert(!(pCreateInfo->flags & + VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR)); + set_layout->binding[b].dynamic_offset_count = 1; + set_layout->dynamic_shader_stages |= binding->stageFlags; + set_layout->binding[b].size = 0; + binding_buffer_count = 1; + alignment = 1; + break; + case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: + case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: + case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: + case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: + set_layout->binding[b].size = 16; + binding_buffer_count = 1; + alignment = 16; + break; + case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: + case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: + case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: + /* main descriptor + fmask descriptor */ + set_layout->binding[b].size = 64; + binding_buffer_count = 1; + alignment = 32; + break; + case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: + /* main descriptor + fmask descriptor + sampler */ + set_layout->binding[b].size = 96; + binding_buffer_count = 1; + alignment = 32; + break; + case VK_DESCRIPTOR_TYPE_SAMPLER: + set_layout->binding[b].size = 16; + alignment = 16; + break; + default: + unreachable("unknown descriptor type\n"); + break; } set_layout->size = align(set_layout->size, alignment); @@ -180,8 +180,8 @@ tu_CreateDescriptorSetLayout( if (variable_flags && binding->binding < variable_flags->bindingCount && (variable_flags->pBindingFlags[binding->binding] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) { - assert(!binding->pImmutableSamplers); /* Terribly ill defined how many - samplers are valid */ + assert(!binding->pImmutableSamplers); /* Terribly ill defined how + many samplers are valid */ assert(binding->binding == max_binding); set_layout->has_variable_descriptors = true; @@ -193,10 +193,10 @@ tu_CreateDescriptorSetLayout( } set_layout->size += - binding->descriptorCount * set_layout->binding[b].size; + binding->descriptorCount * set_layout->binding[b].size; buffer_count += binding->descriptorCount * binding_buffer_count; - dynamic_offset_count += - binding->descriptorCount * set_layout->binding[b].dynamic_offset_count; + dynamic_offset_count += binding->descriptorCount * + set_layout->binding[b].dynamic_offset_count; set_layout->shader_stages |= binding->stageFlags; } @@ -230,20 +230,21 @@ tu_GetDescriptorSetLayoutSupport( const VkDescriptorSetLayoutCreateInfo *pCreateInfo, VkDescriptorSetLayoutSupport *pSupport) { - VkDescriptorSetLayoutBinding *bindings = - create_sorted_bindings(pCreateInfo->pBindings, pCreateInfo->bindingCount); + VkDescriptorSetLayoutBinding *bindings = create_sorted_bindings( + pCreateInfo->pBindings, pCreateInfo->bindingCount); if (!bindings) { pSupport->supported = false; return; } const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT *variable_flags = - vk_find_struct_const(pCreateInfo->pNext, - DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT); + vk_find_struct_const( + pCreateInfo->pNext, + DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT); VkDescriptorSetVariableDescriptorCountLayoutSupportEXT *variable_count = - vk_find_struct( - (void *)pCreateInfo->pNext, - DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT); + vk_find_struct( + (void *) pCreateInfo->pNext, + DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT); if (variable_count) { variable_count->maxVariableDescriptorCount = 0; } @@ -256,33 +257,33 @@ tu_GetDescriptorSetLayoutSupport( uint64_t descriptor_size = 0; uint64_t descriptor_alignment = 1; switch (binding->descriptorType) { - case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: - case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: - break; - case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: - case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: - case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: - case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: - descriptor_size = 16; - descriptor_alignment = 16; - break; - case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: - case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: - case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: - descriptor_size = 64; - descriptor_alignment = 32; - break; - case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: - descriptor_size = 96; - descriptor_alignment = 32; - break; - case VK_DESCRIPTOR_TYPE_SAMPLER: - descriptor_size = 16; - descriptor_alignment = 16; - break; - default: - unreachable("unknown descriptor type\n"); - break; + case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: + case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: + break; + case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: + case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: + case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: + case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: + descriptor_size = 16; + descriptor_alignment = 16; + break; + case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: + case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: + case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: + descriptor_size = 64; + descriptor_alignment = 32; + break; + case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: + descriptor_size = 96; + descriptor_alignment = 32; + break; + case VK_DESCRIPTOR_TYPE_SAMPLER: + descriptor_size = 16; + descriptor_alignment = 16; + break; + default: + unreachable("unknown descriptor type\n"); + break; } if (size && !align_u64(size, descriptor_alignment)) { @@ -302,7 +303,7 @@ tu_GetDescriptorSetLayoutSupport( (variable_flags->pBindingFlags[binding->binding] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) { variable_count->maxVariableDescriptorCount = - MIN2(UINT32_MAX, max_count); + MIN2(UINT32_MAX, max_count); } size += binding->descriptorCount * descriptor_size; } @@ -327,12 +328,10 @@ tu_CreatePipelineLayout(VkDevice _device, struct tu_pipeline_layout *layout; struct mesa_sha1 ctx; - assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO); + assert(pCreateInfo->sType == + VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO); - layout = vk_alloc2(&device->alloc, - pAllocator, - sizeof(*layout), - 8, + layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (layout == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -343,8 +342,8 @@ tu_CreatePipelineLayout(VkDevice _device, _mesa_sha1_init(&ctx); for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) { - TU_FROM_HANDLE( - tu_descriptor_set_layout, set_layout, pCreateInfo->pSetLayouts[set]); + TU_FROM_HANDLE(tu_descriptor_set_layout, set_layout, + pCreateInfo->pSetLayouts[set]); layout->set[set].layout = set_layout; layout->set[set].dynamic_offset_start = dynamic_offset_count; @@ -353,14 +352,13 @@ tu_CreatePipelineLayout(VkDevice _device, set_layout->binding[b].dynamic_offset_count; if (set_layout->binding[b].immutable_samplers_offset) _mesa_sha1_update( - &ctx, - tu_immutable_samplers(set_layout, set_layout->binding + b), - set_layout->binding[b].array_size * 4 * sizeof(uint32_t)); + &ctx, + tu_immutable_samplers(set_layout, set_layout->binding + b), + set_layout->binding[b].array_size * 4 * sizeof(uint32_t)); } - _mesa_sha1_update(&ctx, - set_layout->binding, - sizeof(set_layout->binding[0]) * - set_layout->binding_count); + _mesa_sha1_update( + &ctx, set_layout->binding, + sizeof(set_layout->binding[0]) * set_layout->binding_count); } layout->dynamic_offset_count = dynamic_offset_count; @@ -369,12 +367,12 @@ tu_CreatePipelineLayout(VkDevice _device, for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i; layout->push_constant_size = - MAX2(layout->push_constant_size, range->offset + range->size); + MAX2(layout->push_constant_size, range->offset + range->size); } layout->push_constant_size = align(layout->push_constant_size, 16); - _mesa_sha1_update( - &ctx, &layout->push_constant_size, sizeof(layout->push_constant_size)); + _mesa_sha1_update(&ctx, &layout->push_constant_size, + sizeof(layout->push_constant_size)); _mesa_sha1_final(&ctx, layout->sha1); *pPipelineLayout = tu_pipeline_layout_to_handle(layout); @@ -475,13 +473,9 @@ tu_UpdateDescriptorSets(VkDevice _device, { TU_FROM_HANDLE(tu_device, device, _device); - tu_update_descriptor_sets(device, - NULL, - VK_NULL_HANDLE, - descriptorWriteCount, - pDescriptorWrites, - descriptorCopyCount, - pDescriptorCopies); + tu_update_descriptor_sets(device, NULL, VK_NULL_HANDLE, + descriptorWriteCount, pDescriptorWrites, + descriptorCopyCount, pDescriptorCopies); } VkResult @@ -492,20 +486,21 @@ tu_CreateDescriptorUpdateTemplate( VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) { TU_FROM_HANDLE(tu_device, device, _device); - TU_FROM_HANDLE( - tu_descriptor_set_layout, set_layout, pCreateInfo->descriptorSetLayout); + TU_FROM_HANDLE(tu_descriptor_set_layout, set_layout, + pCreateInfo->descriptorSetLayout); const uint32_t entry_count = pCreateInfo->descriptorUpdateEntryCount; const size_t size = - sizeof(struct tu_descriptor_update_template) + - sizeof(struct tu_descriptor_update_template_entry) * entry_count; + sizeof(struct tu_descriptor_update_template) + + sizeof(struct tu_descriptor_update_template_entry) * entry_count; struct tu_descriptor_update_template *templ; - templ = vk_alloc2( - &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + templ = vk_alloc2(&device->alloc, pAllocator, size, 8, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!templ) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); - *pDescriptorUpdateTemplate = tu_descriptor_update_template_to_handle(templ); + *pDescriptorUpdateTemplate = + tu_descriptor_update_template_to_handle(templ); tu_use_args(set_layout); tu_stub(); @@ -519,8 +514,8 @@ tu_DestroyDescriptorUpdateTemplate( const VkAllocationCallbacks *pAllocator) { TU_FROM_HANDLE(tu_device, device, _device); - TU_FROM_HANDLE( - tu_descriptor_update_template, templ, descriptorUpdateTemplate); + TU_FROM_HANDLE(tu_descriptor_update_template, templ, + descriptorUpdateTemplate); if (!templ) return; @@ -536,8 +531,8 @@ tu_update_descriptor_set_with_template( VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) { - TU_FROM_HANDLE( - tu_descriptor_update_template, templ, descriptorUpdateTemplate); + TU_FROM_HANDLE(tu_descriptor_update_template, templ, + descriptorUpdateTemplate); tu_use_args(templ); } @@ -551,8 +546,8 @@ tu_UpdateDescriptorSetWithTemplate( TU_FROM_HANDLE(tu_device, device, _device); TU_FROM_HANDLE(tu_descriptor_set, set, descriptorSet); - tu_update_descriptor_set_with_template( - device, NULL, set, descriptorUpdateTemplate, pData); + tu_update_descriptor_set_with_template(device, NULL, set, + descriptorUpdateTemplate, pData); } VkResult diff --git a/src/freedreno/vulkan/tu_descriptor_set.h b/src/freedreno/vulkan/tu_descriptor_set.h index c16677a5322..5692e11b14e 100644 --- a/src/freedreno/vulkan/tu_descriptor_set.h +++ b/src/freedreno/vulkan/tu_descriptor_set.h @@ -17,8 +17,8 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #ifndef TU_DESCRIPTOR_SET_H @@ -96,7 +96,7 @@ static inline const uint32_t * tu_immutable_samplers(const struct tu_descriptor_set_layout *set, const struct tu_descriptor_set_binding_layout *binding) { - return (const uint32_t *)((const char *)set + - binding->immutable_samplers_offset); + return (const uint32_t *) ((const char *) set + + binding->immutable_samplers_offset); } #endif /* TU_DESCRIPTOR_SET_H */ diff --git a/src/freedreno/vulkan/tu_device.c b/src/freedreno/vulkan/tu_device.c index b42bb4cb03b..67c00c866be 100644 --- a/src/freedreno/vulkan/tu_device.c +++ b/src/freedreno/vulkan/tu_device.c @@ -21,24 +21,26 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #include "tu_private.h" -#include "util/debug.h" -#include "util/disk_cache.h" -#include "util/strtod.h" -#include "vk_format.h" -#include "vk_util.h" + #include <fcntl.h> +#include <msm_drm.h> #include <stdbool.h> #include <string.h> #include <sys/mman.h> #include <sys/sysinfo.h> #include <unistd.h> #include <xf86drm.h> -#include <msm_drm.h> + +#include "util/debug.h" +#include "util/disk_cache.h" +#include "util/strtod.h" +#include "vk_format.h" +#include "vk_util.h" static int tu_device_get_cache_uuid(uint16_t family, void *uuid) @@ -51,8 +53,8 @@ tu_device_get_cache_uuid(uint16_t family, void *uuid) return -1; memcpy(uuid, &mesa_timestamp, 4); - memcpy((char *)uuid + 4, &f, 2); - snprintf((char *)uuid + 6, VK_UUID_SIZE - 10, "tu"); + memcpy((char *) uuid + 4, &f, 2); + snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu"); return 0; } @@ -107,9 +109,9 @@ tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size) return VK_SUCCESS; fail_info: - tu_gem_close(dev, bo->gem_handle); + tu_gem_close(dev, bo->gem_handle); fail_new: - return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); + return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); } VkResult @@ -183,8 +185,7 @@ tu_physical_device_init(struct tu_physical_device *device, result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER, "kernel driver for device %s has version %d.%d, " "but Vulkan requires version >= %d.%d", - path, - version->version_major, version->version_minor, + path, version->version_major, version->version_minor, min_version_major, min_version_minor); drmFreeVersion(version); close(fd); @@ -202,7 +203,8 @@ tu_physical_device_init(struct tu_physical_device *device, strncpy(device->path, path, ARRAY_SIZE(device->path)); if (instance->enabled_extensions.KHR_display) { - master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC); + master_fd = + open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC); if (master_fd >= 0) { /* TODO: free master_fd is accel is not working? */ } @@ -215,16 +217,16 @@ tu_physical_device_init(struct tu_physical_device *device, if (!device->drm_device) { if (instance->debug_flags & TU_DEBUG_STARTUP) tu_logi("Could not create the libdrm device"); - result = vk_errorf( - instance, VK_ERROR_INITIALIZATION_FAILED, "could not create the libdrm device"); - goto fail; + result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED, + "could not create the libdrm device"); + goto fail; } if (tu_drm_query_param(device, MSM_PARAM_GPU_ID, &val)) { if (instance->debug_flags & TU_DEBUG_STARTUP) tu_logi("Could not query the GPU ID"); - result = vk_errorf( - instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GPU ID"); + result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED, + "could not get GPU ID"); goto fail; } device->gpu_id = val; @@ -232,8 +234,8 @@ tu_physical_device_init(struct tu_physical_device *device, if (tu_drm_query_param(device, MSM_PARAM_GMEM_SIZE, &val)) { if (instance->debug_flags & TU_DEBUG_STARTUP) tu_logi("Could not query the GMEM size"); - result = vk_errorf( - instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GMEM size"); + result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED, + "could not get GMEM size"); goto fail; } device->gmem_size = val; @@ -241,7 +243,7 @@ tu_physical_device_init(struct tu_physical_device *device, memset(device->name, 0, sizeof(device->name)); sprintf(device->name, "FD%d", device->gpu_id); - switch(device->gpu_id) { + switch (device->gpu_id) { case 530: case 630: break; @@ -251,8 +253,8 @@ tu_physical_device_init(struct tu_physical_device *device, goto fail; } if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) { - result = vk_errorf( - instance, VK_ERROR_INITIALIZATION_FAILED, "cannot generate UUID"); + result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED, + "cannot generate UUID"); goto fail; } @@ -263,9 +265,8 @@ tu_physical_device_init(struct tu_physical_device *device, disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2); device->disk_cache = disk_cache_create(device->name, buf, 0); - fprintf(stderr, - "WARNING: tu is not a conformant vulkan implementation, " - "testing use only.\n"); + fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, " + "testing use only.\n"); tu_get_driver_uuid(&device->device_uuid); tu_get_device_uuid(&device->device_uuid); @@ -329,9 +330,9 @@ static const VkAllocationCallbacks default_alloc = { .pfnFree = default_free_func, }; -static const struct debug_control tu_debug_options[] = { { "startup", - TU_DEBUG_STARTUP }, - { NULL, 0 } }; +static const struct debug_control tu_debug_options[] = { + { "startup", TU_DEBUG_STARTUP }, { NULL, 0 } +}; const char * tu_get_debug_option_name(int id) @@ -368,10 +369,7 @@ tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, tu_EnumerateInstanceVersion(&client_version); } - instance = vk_zalloc2(&default_alloc, - pAllocator, - sizeof(*instance), - 8, + instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); if (!instance) return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -387,7 +385,7 @@ tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, instance->physical_device_count = -1; instance->debug_flags = - parse_debug_string(getenv("TU_DEBUG"), tu_debug_options); + parse_debug_string(getenv("TU_DEBUG"), tu_debug_options); if (instance->debug_flags & TU_DEBUG_STARTUP) tu_logi("Created an instance"); @@ -459,14 +457,13 @@ tu_enumerate_devices(struct tu_instance *instance) if (max_devices < 1) return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER); - for (unsigned i = 0; i < (unsigned)max_devices; i++) { + for (unsigned i = 0; i < (unsigned) max_devices; i++) { if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER && devices[i]->bustype == DRM_BUS_PLATFORM) { - result = tu_physical_device_init(instance->physical_devices + - instance->physical_device_count, - instance, - devices[i]); + result = tu_physical_device_init( + instance->physical_devices + instance->physical_device_count, + instance, devices[i]); if (result == VK_SUCCESS) ++instance->physical_device_count; else if (result != VK_ERROR_INCOMPATIBLE_DRIVER) @@ -495,10 +492,10 @@ tu_EnumeratePhysicalDevices(VkInstance _instance, } for (uint32_t i = 0; i < instance->physical_device_count; ++i) { - vk_outarray_append(&out, p) { + vk_outarray_append(&out, p) + { *p = tu_physical_device_to_handle(instance->physical_devices + i); } - } return vk_outarray_status(&out); @@ -511,7 +508,8 @@ tu_EnumeratePhysicalDeviceGroups( VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties) { TU_FROM_HANDLE(tu_instance, instance, _instance); - VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties, pPhysicalDeviceGroupCount); + VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties, + pPhysicalDeviceGroupCount); VkResult result; if (instance->physical_device_count < 0) { @@ -521,10 +519,11 @@ tu_EnumeratePhysicalDeviceGroups( } for (uint32_t i = 0; i < instance->physical_device_count; ++i) { - vk_outarray_append(&out, p) { + vk_outarray_append(&out, p) + { p->physicalDeviceCount = 1; p->physicalDevices[0] = - tu_physical_device_to_handle(instance->physical_devices + i); + tu_physical_device_to_handle(instance->physical_devices + i); p->subsetAllocation = false; } } @@ -538,7 +537,7 @@ tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, { memset(pFeatures, 0, sizeof(*pFeatures)); - *pFeatures = (VkPhysicalDeviceFeatures){ + *pFeatures = (VkPhysicalDeviceFeatures) { .robustBufferAccess = false, .fullDrawIndexUint32 = false, .imageCubeArray = false, @@ -594,81 +593,81 @@ tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice, vk_foreach_struct(ext, pFeatures->pNext) { switch (ext->sType) { - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: { - VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext; - features->variablePointersStorageBuffer = false; - features->variablePointers = false; - break; - } - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: { - VkPhysicalDeviceMultiviewFeaturesKHR *features = - (VkPhysicalDeviceMultiviewFeaturesKHR *)ext; - features->multiview = false; - features->multiviewGeometryShader = false; - features->multiviewTessellationShader = false; - break; - } - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: { - VkPhysicalDeviceShaderDrawParameterFeatures *features = - (VkPhysicalDeviceShaderDrawParameterFeatures *)ext; - features->shaderDrawParameters = false; - break; - } - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: { - VkPhysicalDeviceProtectedMemoryFeatures *features = - (VkPhysicalDeviceProtectedMemoryFeatures *)ext; - features->protectedMemory = false; - break; - } - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: { - VkPhysicalDevice16BitStorageFeatures *features = - (VkPhysicalDevice16BitStorageFeatures *)ext; - features->storageBuffer16BitAccess = false; - features->uniformAndStorageBuffer16BitAccess = false; - features->storagePushConstant16 = false; - features->storageInputOutput16 = false; - break; - } - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: { - VkPhysicalDeviceSamplerYcbcrConversionFeatures *features = - (VkPhysicalDeviceSamplerYcbcrConversionFeatures *)ext; - features->samplerYcbcrConversion = false; - break; - } - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: { - VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features = - (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext; - features->shaderInputAttachmentArrayDynamicIndexing = false; - features->shaderUniformTexelBufferArrayDynamicIndexing = false; - features->shaderStorageTexelBufferArrayDynamicIndexing = false; - features->shaderUniformBufferArrayNonUniformIndexing = false; - features->shaderSampledImageArrayNonUniformIndexing = false; - features->shaderStorageBufferArrayNonUniformIndexing = false; - features->shaderStorageImageArrayNonUniformIndexing = false; - features->shaderInputAttachmentArrayNonUniformIndexing = false; - features->shaderUniformTexelBufferArrayNonUniformIndexing = false; - features->shaderStorageTexelBufferArrayNonUniformIndexing = false; - features->descriptorBindingUniformBufferUpdateAfterBind = false; - features->descriptorBindingSampledImageUpdateAfterBind = false; - features->descriptorBindingStorageImageUpdateAfterBind = false; - features->descriptorBindingStorageBufferUpdateAfterBind = false; - features->descriptorBindingUniformTexelBufferUpdateAfterBind = false; - features->descriptorBindingStorageTexelBufferUpdateAfterBind = false; - features->descriptorBindingUpdateUnusedWhilePending = false; - features->descriptorBindingPartiallyBound = false; - features->descriptorBindingVariableDescriptorCount = false; - features->runtimeDescriptorArray = false; - break; - } - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: { - VkPhysicalDeviceConditionalRenderingFeaturesEXT *features = - (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)ext; - features->conditionalRendering = false; - features->inheritedConditionalRendering = false; - break; - } - default: - break; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: { + VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *) ext; + features->variablePointersStorageBuffer = false; + features->variablePointers = false; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: { + VkPhysicalDeviceMultiviewFeaturesKHR *features = + (VkPhysicalDeviceMultiviewFeaturesKHR *) ext; + features->multiview = false; + features->multiviewGeometryShader = false; + features->multiviewTessellationShader = false; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: { + VkPhysicalDeviceShaderDrawParameterFeatures *features = + (VkPhysicalDeviceShaderDrawParameterFeatures *) ext; + features->shaderDrawParameters = false; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: { + VkPhysicalDeviceProtectedMemoryFeatures *features = + (VkPhysicalDeviceProtectedMemoryFeatures *) ext; + features->protectedMemory = false; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: { + VkPhysicalDevice16BitStorageFeatures *features = + (VkPhysicalDevice16BitStorageFeatures *) ext; + features->storageBuffer16BitAccess = false; + features->uniformAndStorageBuffer16BitAccess = false; + features->storagePushConstant16 = false; + features->storageInputOutput16 = false; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: { + VkPhysicalDeviceSamplerYcbcrConversionFeatures *features = + (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext; + features->samplerYcbcrConversion = false; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: { + VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features = + (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext; + features->shaderInputAttachmentArrayDynamicIndexing = false; + features->shaderUniformTexelBufferArrayDynamicIndexing = false; + features->shaderStorageTexelBufferArrayDynamicIndexing = false; + features->shaderUniformBufferArrayNonUniformIndexing = false; + features->shaderSampledImageArrayNonUniformIndexing = false; + features->shaderStorageBufferArrayNonUniformIndexing = false; + features->shaderStorageImageArrayNonUniformIndexing = false; + features->shaderInputAttachmentArrayNonUniformIndexing = false; + features->shaderUniformTexelBufferArrayNonUniformIndexing = false; + features->shaderStorageTexelBufferArrayNonUniformIndexing = false; + features->descriptorBindingUniformBufferUpdateAfterBind = false; + features->descriptorBindingSampledImageUpdateAfterBind = false; + features->descriptorBindingStorageImageUpdateAfterBind = false; + features->descriptorBindingStorageBufferUpdateAfterBind = false; + features->descriptorBindingUniformTexelBufferUpdateAfterBind = false; + features->descriptorBindingStorageTexelBufferUpdateAfterBind = false; + features->descriptorBindingUpdateUnusedWhilePending = false; + features->descriptorBindingPartiallyBound = false; + features->descriptorBindingVariableDescriptorCount = false; + features->runtimeDescriptorArray = false; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: { + VkPhysicalDeviceConditionalRenderingFeaturesEXT *features = + (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext; + features->conditionalRendering = false; + features->inheritedConditionalRendering = false; + break; + } + default: + break; } } return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features); @@ -688,11 +687,11 @@ tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, * there is no set limit, so we just set a pipeline limit. I don't think * any app is going to hit this soon. */ size_t max_descriptor_set_size = - ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) / - (32 /* uniform buffer, 32 due to potential space wasted on alignment */ + - 32 /* storage buffer, 32 due to potential space wasted on alignment */ + - 32 /* sampler, largest when combined with image */ + - 64 /* sampled image */ + 64 /* storage image */); + ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) / + (32 /* uniform buffer, 32 due to potential space wasted on alignment */ + + 32 /* storage buffer, 32 due to potential space wasted on alignment */ + + 32 /* sampler, largest when combined with image */ + + 64 /* sampled image */ + 64 /* storage image */); VkPhysicalDeviceLimits limits = { .maxImageDimension1D = (1 << 14), @@ -803,7 +802,7 @@ tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, .nonCoherentAtomSize = 64, }; - *pProperties = (VkPhysicalDeviceProperties){ + *pProperties = (VkPhysicalDeviceProperties) { .apiVersion = tu_physical_device_api_version(pdevice), .driverVersion = vk_get_driver_version(), .vendorID = 0, /* TODO */ @@ -827,55 +826,53 @@ tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice, vk_foreach_struct(ext, pProperties->pNext) { switch (ext->sType) { - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: { - VkPhysicalDevicePushDescriptorPropertiesKHR *properties = - (VkPhysicalDevicePushDescriptorPropertiesKHR *)ext; - properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS; - break; - } - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: { - VkPhysicalDeviceIDPropertiesKHR *properties = - (VkPhysicalDeviceIDPropertiesKHR *)ext; - memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE); - memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE); - properties->deviceLUIDValid = false; - break; - } - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: { - VkPhysicalDeviceMultiviewPropertiesKHR *properties = - (VkPhysicalDeviceMultiviewPropertiesKHR *)ext; - properties->maxMultiviewViewCount = MAX_VIEWS; - properties->maxMultiviewInstanceIndex = INT_MAX; - break; - } - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: { - VkPhysicalDevicePointClippingPropertiesKHR *properties = - (VkPhysicalDevicePointClippingPropertiesKHR *)ext; - properties->pointClippingBehavior = - VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR; - break; - } - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: { - VkPhysicalDeviceMaintenance3Properties *properties = - (VkPhysicalDeviceMaintenance3Properties *)ext; - /* Make sure everything is addressable by a signed 32-bit int, and - * our largest descriptors are 96 bytes. */ - properties->maxPerSetDescriptors = (1ull << 31) / 96; - /* Our buffer size fields allow only this much */ - properties->maxMemoryAllocationSize = 0xFFFFFFFFull; - break; - } - default: - break; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: { + VkPhysicalDevicePushDescriptorPropertiesKHR *properties = + (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext; + properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: { + VkPhysicalDeviceIDPropertiesKHR *properties = + (VkPhysicalDeviceIDPropertiesKHR *) ext; + memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE); + memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE); + properties->deviceLUIDValid = false; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: { + VkPhysicalDeviceMultiviewPropertiesKHR *properties = + (VkPhysicalDeviceMultiviewPropertiesKHR *) ext; + properties->maxMultiviewViewCount = MAX_VIEWS; + properties->maxMultiviewInstanceIndex = INT_MAX; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: { + VkPhysicalDevicePointClippingPropertiesKHR *properties = + (VkPhysicalDevicePointClippingPropertiesKHR *) ext; + properties->pointClippingBehavior = + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: { + VkPhysicalDeviceMaintenance3Properties *properties = + (VkPhysicalDeviceMaintenance3Properties *) ext; + /* Make sure everything is addressable by a signed 32-bit int, and + * our largest descriptors are 96 bytes. */ + properties->maxPerSetDescriptors = (1ull << 31) / 96; + /* Our buffer size fields allow only this much */ + properties->maxMemoryAllocationSize = 0xFFFFFFFFull; + break; + } + default: + break; } } } -static const VkQueueFamilyProperties -tu_queue_family_properties = { - .queueFlags = VK_QUEUE_GRAPHICS_BIT | - VK_QUEUE_COMPUTE_BIT | - VK_QUEUE_TRANSFER_BIT, +static const VkQueueFamilyProperties tu_queue_family_properties = { + .queueFlags = + VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT, .queueCount = 1, .timestampValidBits = 64, .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 }, @@ -889,9 +886,7 @@ tu_GetPhysicalDeviceQueueFamilyProperties( { VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount); - vk_outarray_append(&out, p) { - *p = tu_queue_family_properties; - } + vk_outarray_append(&out, p) { *p = tu_queue_family_properties; } } void @@ -902,7 +897,8 @@ tu_GetPhysicalDeviceQueueFamilyProperties2( { VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount); - vk_outarray_append(&out, p) { + vk_outarray_append(&out, p) + { p->queueFamilyProperties = tu_queue_family_properties; } } @@ -913,7 +909,7 @@ tu_get_system_heap_size() struct sysinfo info; sysinfo(&info); - uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit; + uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit; /* We don't want to burn too much ram with the GPU. If the user has 4GiB * or less, we use at most half. If they have more than 4GiB, we use 3/4. @@ -937,9 +933,10 @@ tu_GetPhysicalDeviceMemoryProperties( pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT; pMemoryProperties->memoryTypeCount = 1; - pMemoryProperties->memoryTypes[0].propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | - VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | - VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + pMemoryProperties->memoryTypes[0].propertyFlags = + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; pMemoryProperties->memoryTypes[0].heapIndex = 0; } @@ -949,7 +946,7 @@ tu_GetPhysicalDeviceMemoryProperties2( VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties) { return tu_GetPhysicalDeviceMemoryProperties( - physicalDevice, &pMemoryProperties->memoryProperties); + physicalDevice, &pMemoryProperties->memoryProperties); } static int @@ -997,10 +994,10 @@ tu_CreateDevice(VkPhysicalDevice physicalDevice, if (pCreateInfo->pEnabledFeatures) { VkPhysicalDeviceFeatures supported_features; tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features); - VkBool32 *supported_feature = (VkBool32 *)&supported_features; - VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures; + VkBool32 *supported_feature = (VkBool32 *) &supported_features; + VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures; unsigned num_features = - sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32); + sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32); for (uint32_t i = 0; i < num_features; i++) { if (enabled_feature[i] && !supported_feature[i]) return vk_error(physical_device->instance, @@ -1008,11 +1005,8 @@ tu_CreateDevice(VkPhysicalDevice physicalDevice, } } - device = vk_zalloc2(&physical_device->instance->alloc, - pAllocator, - sizeof(*device), - 8, - VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); + device = vk_zalloc2(&physical_device->instance->alloc, pAllocator, + sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); if (!device) return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -1040,27 +1034,24 @@ tu_CreateDevice(VkPhysicalDevice physicalDevice, for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) { const VkDeviceQueueCreateInfo *queue_create = - &pCreateInfo->pQueueCreateInfos[i]; + &pCreateInfo->pQueueCreateInfos[i]; uint32_t qfi = queue_create->queueFamilyIndex; - device->queues[qfi] = - vk_alloc(&device->alloc, - queue_create->queueCount * sizeof(struct tu_queue), - 8, - VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); + device->queues[qfi] = vk_alloc( + &device->alloc, queue_create->queueCount * sizeof(struct tu_queue), + 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); if (!device->queues[qfi]) { result = VK_ERROR_OUT_OF_HOST_MEMORY; goto fail; } - memset(device->queues[qfi], - 0, + memset(device->queues[qfi], 0, queue_create->queueCount * sizeof(struct tu_queue)); device->queue_count[qfi] = queue_create->queueCount; for (unsigned q = 0; q < queue_create->queueCount; q++) { - result = tu_queue_init( - device, &device->queues[qfi][q], qfi, q, queue_create->flags); + result = tu_queue_init(device, &device->queues[qfi][q], qfi, q, + queue_create->flags); if (result != VK_SUCCESS) goto fail; } @@ -1074,7 +1065,7 @@ tu_CreateDevice(VkPhysicalDevice physicalDevice, ci.initialDataSize = 0; VkPipelineCache pc; result = - tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc); + tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc); if (result != VK_SUCCESS) goto fail; @@ -1142,7 +1133,7 @@ tu_GetDeviceQueue2(VkDevice _device, struct tu_queue *queue; queue = - &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex]; + &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex]; if (pQueueInfo->flags != queue->flags) { /* From the Vulkan 1.1.70 spec: * @@ -1166,9 +1157,9 @@ tu_GetDeviceQueue(VkDevice _device, VkQueue *pQueue) { const VkDeviceQueueInfo2 info = - (VkDeviceQueueInfo2){.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, - .queueFamilyIndex = queueFamilyIndex, - .queueIndex = queueIndex }; + (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, + .queueFamilyIndex = queueFamilyIndex, + .queueIndex = queueIndex }; tu_GetDeviceQueue2(_device, &info, pQueue); } @@ -1249,11 +1240,9 @@ tu_GetInstanceProcAddr(VkInstance _instance, const char *pName) { TU_FROM_HANDLE(tu_instance, instance, _instance); - return tu_lookup_entrypoint_checked(pName, - instance ? instance->api_version : 0, - instance ? &instance->enabled_extensions - : NULL, - NULL); + return tu_lookup_entrypoint_checked( + pName, instance ? instance->api_version : 0, + instance ? &instance->enabled_extensions : NULL, NULL); } /* The loader wants us to expose a second GetInstanceProcAddr function @@ -1275,10 +1264,9 @@ tu_GetDeviceProcAddr(VkDevice _device, const char *pName) { TU_FROM_HANDLE(tu_device, device, _device); - return tu_lookup_entrypoint_checked(pName, - device->instance->api_version, - &device->instance->enabled_extensions, - &device->enabled_extensions); + return tu_lookup_entrypoint_checked(pName, device->instance->api_version, + &device->instance->enabled_extensions, + &device->enabled_extensions); } static VkResult @@ -1298,10 +1286,7 @@ tu_alloc_memory(struct tu_device *device, return VK_SUCCESS; } - mem = vk_alloc2(&device->alloc, - pAllocator, - sizeof(*mem), - 8, + mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (mem == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -1367,7 +1352,7 @@ tu_MapMemory(VkDevice _device, if (mem->user_ptr) { *ppData = mem->user_ptr; - } else if (!mem->map){ + } else if (!mem->map) { result = tu_bo_map(device, &mem->bo); if (result != VK_SUCCESS) return result; @@ -1415,7 +1400,7 @@ tu_GetBufferMemoryRequirements(VkDevice _device, pMemoryRequirements->memoryTypeBits = 1; pMemoryRequirements->alignment = 16; pMemoryRequirements->size = - align64(buffer->size, pMemoryRequirements->alignment); + align64(buffer->size, pMemoryRequirements->alignment); } void @@ -1424,8 +1409,8 @@ tu_GetBufferMemoryRequirements2( const VkBufferMemoryRequirementsInfo2KHR *pInfo, VkMemoryRequirements2KHR *pMemoryRequirements) { - tu_GetBufferMemoryRequirements( - device, pInfo->buffer, &pMemoryRequirements->memoryRequirements); + tu_GetBufferMemoryRequirements(device, pInfo->buffer, + &pMemoryRequirements->memoryRequirements); } void @@ -1445,8 +1430,8 @@ tu_GetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2KHR *pInfo, VkMemoryRequirements2KHR *pMemoryRequirements) { - tu_GetImageMemoryRequirements( - device, pInfo->image, &pMemoryRequirements->memoryRequirements); + tu_GetImageMemoryRequirements(device, pInfo->image, + &pMemoryRequirements->memoryRequirements); } void @@ -1542,11 +1527,9 @@ tu_CreateFence(VkDevice _device, { TU_FROM_HANDLE(tu_device, device, _device); - struct tu_fence *fence = vk_alloc2(&device->alloc, - pAllocator, - sizeof(*fence), - 8, - VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + struct tu_fence *fence = + vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!fence) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -1602,11 +1585,9 @@ tu_CreateSemaphore(VkDevice _device, { TU_FROM_HANDLE(tu_device, device, _device); - struct tu_semaphore *sem = vk_alloc2(&device->alloc, - pAllocator, - sizeof(*sem), - 8, - VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + struct tu_semaphore *sem = + vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!sem) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -1634,11 +1615,9 @@ tu_CreateEvent(VkDevice _device, VkEvent *pEvent) { TU_FROM_HANDLE(tu_device, device, _device); - struct tu_event *event = vk_alloc2(&device->alloc, - pAllocator, - sizeof(*event), - 8, - VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + struct tu_event *event = + vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!event) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -1700,10 +1679,7 @@ tu_CreateBuffer(VkDevice _device, assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO); - buffer = vk_alloc2(&device->alloc, - pAllocator, - sizeof(*buffer), - 8, + buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (buffer == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -1735,8 +1711,8 @@ static uint32_t tu_surface_max_layer_count(struct tu_image_view *iview) { return iview->type == VK_IMAGE_VIEW_TYPE_3D - ? iview->extent.depth - : (iview->base_layer + iview->layer_count); + ? iview->extent.depth + : (iview->base_layer + iview->layer_count); } VkResult @@ -1750,11 +1726,10 @@ tu_CreateFramebuffer(VkDevice _device, assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO); - size_t size = - sizeof(*framebuffer) + - sizeof(struct tu_attachment_info) * pCreateInfo->attachmentCount; - framebuffer = vk_alloc2( - &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) * + pCreateInfo->attachmentCount; + framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (framebuffer == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -1770,7 +1745,7 @@ tu_CreateFramebuffer(VkDevice _device, framebuffer->width = MIN2(framebuffer->width, iview->extent.width); framebuffer->height = MIN2(framebuffer->height, iview->extent.height); framebuffer->layers = - MIN2(framebuffer->layers, tu_surface_max_layer_count(iview)); + MIN2(framebuffer->layers, tu_surface_max_layer_count(iview)); } *pFramebuffer = tu_framebuffer_to_handle(framebuffer); @@ -1808,10 +1783,7 @@ tu_CreateSampler(VkDevice _device, assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO); - sampler = vk_alloc2(&device->alloc, - pAllocator, - sizeof(*sampler), - 8, + sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!sampler) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -1845,36 +1817,37 @@ PUBLIC VKAPI_ATTR VkResult VKAPI_CALL vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion) { /* For the full details on loader interface versioning, see - * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>. - * What follows is a condensed summary, to help you navigate the large and - * confusing official doc. - * - * - Loader interface v0 is incompatible with later versions. We don't - * support it. - * - * - In loader interface v1: - * - The first ICD entrypoint called by the loader is - * vk_icdGetInstanceProcAddr(). The ICD must statically expose this - * entrypoint. - * - The ICD must statically expose no other Vulkan symbol unless it is - * linked with -Bsymbolic. - * - Each dispatchable Vulkan handle created by the ICD must be - * a pointer to a struct whose first member is VK_LOADER_DATA. The - * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC. - * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and - * vkDestroySurfaceKHR(). The ICD must be capable of working with - * such loader-managed surfaces. - * - * - Loader interface v2 differs from v1 in: - * - The first ICD entrypoint called by the loader is - * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must - * statically expose this entrypoint. - * - * - Loader interface v3 differs from v2 in: - * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(), - * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR, - * because the loader no longer does so. - */ + * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>. + * What follows is a condensed summary, to help you navigate the large and + * confusing official doc. + * + * - Loader interface v0 is incompatible with later versions. We don't + * support it. + * + * - In loader interface v1: + * - The first ICD entrypoint called by the loader is + * vk_icdGetInstanceProcAddr(). The ICD must statically expose this + * entrypoint. + * - The ICD must statically expose no other Vulkan symbol unless it + * is linked with -Bsymbolic. + * - Each dispatchable Vulkan handle created by the ICD must be + * a pointer to a struct whose first member is VK_LOADER_DATA. The + * ICD must initialize VK_LOADER_DATA.loadMagic to + * ICD_LOADER_MAGIC. + * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and + * vkDestroySurfaceKHR(). The ICD must be capable of working with + * such loader-managed surfaces. + * + * - Loader interface v2 differs from v1 in: + * - The first ICD entrypoint called by the loader is + * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must + * statically expose this entrypoint. + * + * - Loader interface v3 differs from v2 in: + * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(), + * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR, + * because the loader no longer does so. + */ *pSupportedVersion = MIN2(*pSupportedVersion, 3u); return VK_SUCCESS; } @@ -1910,10 +1883,8 @@ tu_CreateDebugReportCallbackEXT( { TU_FROM_HANDLE(tu_instance, instance, _instance); return vk_create_debug_report_callback(&instance->debug_report_callbacks, - pCreateInfo, - pAllocator, - &instance->alloc, - pCallback); + pCreateInfo, pAllocator, + &instance->alloc, pCallback); } void @@ -1923,9 +1894,7 @@ tu_DestroyDebugReportCallbackEXT(VkInstance _instance, { TU_FROM_HANDLE(tu_instance, instance, _instance); vk_destroy_debug_report_callback(&instance->debug_report_callbacks, - _callback, - pAllocator, - &instance->alloc); + _callback, pAllocator, &instance->alloc); } void @@ -1939,14 +1908,8 @@ tu_DebugReportMessageEXT(VkInstance _instance, const char *pMessage) { TU_FROM_HANDLE(tu_instance, instance, _instance); - vk_debug_report(&instance->debug_report_callbacks, - flags, - objectType, - object, - location, - messageCode, - pLayerPrefix, - pMessage); + vk_debug_report(&instance->debug_report_callbacks, flags, objectType, + object, location, messageCode, pLayerPrefix, pMessage); } void diff --git a/src/freedreno/vulkan/tu_drm.c b/src/freedreno/vulkan/tu_drm.c index 11c3b008155..88baf82adf2 100644 --- a/src/freedreno/vulkan/tu_drm.c +++ b/src/freedreno/vulkan/tu_drm.c @@ -18,19 +18,17 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ -#include <stdint.h> -#include <sys/ioctl.h> -#include <errno.h> - -#include <msm_drm.h> - #include "tu_private.h" #include "xf86drm.h" +#include <errno.h> +#include <msm_drm.h> +#include <stdint.h> +#include <sys/ioctl.h> /** * Return gem handle on success. Return 0 on failure. @@ -43,9 +41,8 @@ tu_gem_new(struct tu_device *dev, uint64_t size, uint32_t flags) .flags = flags, }; - - int ret = drmCommandWriteRead(dev->physical_device->local_fd, DRM_MSM_GEM_NEW, - &req, sizeof(req)); + int ret = drmCommandWriteRead(dev->physical_device->local_fd, + DRM_MSM_GEM_NEW, &req, sizeof(req)); if (ret) return 0; @@ -71,8 +68,8 @@ tu_gem_info(struct tu_device *dev, uint32_t gem_handle, uint32_t flags) .flags = flags, }; - int ret = drmCommandWriteRead(dev->physical_device->local_fd, DRM_MSM_GEM_INFO, - &req, sizeof(req)); + int ret = drmCommandWriteRead(dev->physical_device->local_fd, + DRM_MSM_GEM_INFO, &req, sizeof(req)); if (ret == -1) return UINT64_MAX; @@ -93,19 +90,21 @@ tu_gem_info_iova(struct tu_device *dev, uint32_t gem_handle) return tu_gem_info(dev, gem_handle, MSM_INFO_IOVA); } - int -tu_drm_query_param(struct tu_physical_device *dev, uint32_t param, uint64_t *value) +tu_drm_query_param(struct tu_physical_device *dev, + uint32_t param, + uint64_t *value) { - /* Technically this requires a pipe, but the kernel only supports one pipe anyway - * at the time of writing and most of these are clearly pipe independent. */ + /* Technically this requires a pipe, but the kernel only supports one pipe + * anyway at the time of writing and most of these are clearly pipe + * independent. */ struct drm_msm_param req = { .pipe = MSM_PIPE_3D0, .param = param, }; - int ret = drmCommandWriteRead(dev->local_fd, DRM_MSM_GET_PARAM, - &req, sizeof(req)); + int ret = drmCommandWriteRead(dev->local_fd, DRM_MSM_GET_PARAM, &req, + sizeof(req)); if (ret) return ret; diff --git a/src/freedreno/vulkan/tu_formats.c b/src/freedreno/vulkan/tu_formats.c index 3571565cb50..f30c56bdfb2 100644 --- a/src/freedreno/vulkan/tu_formats.c +++ b/src/freedreno/vulkan/tu_formats.c @@ -19,19 +19,17 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #include "tu_private.h" -#include "vk_format.h" - -#include "vk_util.h" - #include "util/format_r11g11b10f.h" #include "util/format_srgb.h" #include "util/u_half.h" +#include "vk_format.h" +#include "vk_util.h" static void tu_physical_device_get_format_properties( @@ -60,8 +58,8 @@ tu_GetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, { TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice); - tu_physical_device_get_format_properties( - physical_device, format, pFormatProperties); + tu_physical_device_get_format_properties(physical_device, format, + pFormatProperties); } void @@ -73,13 +71,14 @@ tu_GetPhysicalDeviceFormatProperties2( TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice); tu_physical_device_get_format_properties( - physical_device, format, &pFormatProperties->formatProperties); + physical_device, format, &pFormatProperties->formatProperties); } static VkResult -tu_get_image_format_properties(struct tu_physical_device *physical_device, - const VkPhysicalDeviceImageFormatInfo2KHR *info, - VkImageFormatProperties *pImageFormatProperties) +tu_get_image_format_properties( + struct tu_physical_device *physical_device, + const VkPhysicalDeviceImageFormatInfo2KHR *info, + VkImageFormatProperties *pImageFormatProperties) { VkFormatProperties format_props; @@ -89,8 +88,8 @@ tu_get_image_format_properties(struct tu_physical_device *physical_device, uint32_t maxArraySize; VkSampleCountFlags sampleCounts = VK_SAMPLE_COUNT_1_BIT; - tu_physical_device_get_format_properties( - physical_device, info->format, &format_props); + tu_physical_device_get_format_properties(physical_device, info->format, + &format_props); if (info->tiling == VK_IMAGE_TILING_LINEAR) { format_feature_flags = format_props.linearTilingFeatures; } else if (info->tiling == VK_IMAGE_TILING_OPTIMAL) { @@ -107,29 +106,29 @@ tu_get_image_format_properties(struct tu_physical_device *physical_device, goto unsupported; switch (info->type) { - default: - unreachable("bad vkimage type\n"); - case VK_IMAGE_TYPE_1D: - maxExtent.width = 16384; - maxExtent.height = 1; - maxExtent.depth = 1; - maxMipLevels = 15; /* log2(maxWidth) + 1 */ - maxArraySize = 2048; - break; - case VK_IMAGE_TYPE_2D: - maxExtent.width = 16384; - maxExtent.height = 16384; - maxExtent.depth = 1; - maxMipLevels = 15; /* log2(maxWidth) + 1 */ - maxArraySize = 2048; - break; - case VK_IMAGE_TYPE_3D: - maxExtent.width = 2048; - maxExtent.height = 2048; - maxExtent.depth = 2048; - maxMipLevels = 12; /* log2(maxWidth) + 1 */ - maxArraySize = 1; - break; + default: + unreachable("bad vkimage type\n"); + case VK_IMAGE_TYPE_1D: + maxExtent.width = 16384; + maxExtent.height = 1; + maxExtent.depth = 1; + maxMipLevels = 15; /* log2(maxWidth) + 1 */ + maxArraySize = 2048; + break; + case VK_IMAGE_TYPE_2D: + maxExtent.width = 16384; + maxExtent.height = 16384; + maxExtent.depth = 1; + maxMipLevels = 15; /* log2(maxWidth) + 1 */ + maxArraySize = 2048; + break; + case VK_IMAGE_TYPE_3D: + maxExtent.width = 2048; + maxExtent.height = 2048; + maxExtent.depth = 2048; + maxMipLevels = 12; /* log2(maxWidth) + 1 */ + maxArraySize = 1; + break; } if (info->tiling == VK_IMAGE_TILING_OPTIMAL && @@ -139,8 +138,8 @@ tu_get_image_format_properties(struct tu_physical_device *physical_device, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) && !(info->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) && !(info->usage & VK_IMAGE_USAGE_STORAGE_BIT)) { - sampleCounts |= - VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT | VK_SAMPLE_COUNT_8_BIT; + sampleCounts |= VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT | + VK_SAMPLE_COUNT_8_BIT; } if (info->usage & VK_IMAGE_USAGE_SAMPLED_BIT) { @@ -168,7 +167,7 @@ tu_get_image_format_properties(struct tu_physical_device *physical_device, } } - *pImageFormatProperties = (VkImageFormatProperties){ + *pImageFormatProperties = (VkImageFormatProperties) { .maxExtent = maxExtent, .maxMipLevels = maxMipLevels, .maxArrayLayers = maxArraySize, @@ -182,7 +181,7 @@ tu_get_image_format_properties(struct tu_physical_device *physical_device, return VK_SUCCESS; unsupported: - *pImageFormatProperties = (VkImageFormatProperties){ + *pImageFormatProperties = (VkImageFormatProperties) { .maxExtent = { 0, 0, 0 }, .maxMipLevels = 0, .maxArrayLayers = 0, @@ -215,44 +214,44 @@ tu_GetPhysicalDeviceImageFormatProperties( .flags = createFlags, }; - return tu_get_image_format_properties( - physical_device, &info, pImageFormatProperties); + return tu_get_image_format_properties(physical_device, &info, + pImageFormatProperties); } static void get_external_image_format_properties( - const VkPhysicalDeviceImageFormatInfo2KHR *pImageFormatInfo, - VkExternalMemoryHandleTypeFlagBitsKHR handleType, - VkExternalMemoryPropertiesKHR *external_properties) + const VkPhysicalDeviceImageFormatInfo2KHR *pImageFormatInfo, + VkExternalMemoryHandleTypeFlagBitsKHR handleType, + VkExternalMemoryPropertiesKHR *external_properties) { VkExternalMemoryFeatureFlagBitsKHR flags = 0; VkExternalMemoryHandleTypeFlagsKHR export_flags = 0; VkExternalMemoryHandleTypeFlagsKHR compat_flags = 0; switch (handleType) { - case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR: - case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT: - switch (pImageFormatInfo->type) { - case VK_IMAGE_TYPE_2D: - flags = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR | - VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR | - VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR; - compat_flags = export_flags = - VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | - VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT; - break; - default: - break; - } - break; - case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: - flags = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR; - compat_flags = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR: + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT: + switch (pImageFormatInfo->type) { + case VK_IMAGE_TYPE_2D: + flags = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR | + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR | + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR; + compat_flags = export_flags = + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | + VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT; break; default: break; + } + break; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: + flags = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR; + compat_flags = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT; + break; + default: + break; } - *external_properties = (VkExternalMemoryPropertiesKHR){ + *external_properties = (VkExternalMemoryPropertiesKHR) { .externalMemoryFeatures = flags, .exportFromImportedHandleTypes = export_flags, .compatibleHandleTypes = compat_flags, @@ -271,7 +270,7 @@ tu_GetPhysicalDeviceImageFormatProperties2( VkResult result; result = tu_get_image_format_properties( - physical_device, base_info, &base_props->imageFormatProperties); + physical_device, base_info, &base_props->imageFormatProperties); if (result != VK_SUCCESS) return result; @@ -279,11 +278,11 @@ tu_GetPhysicalDeviceImageFormatProperties2( vk_foreach_struct_const(s, base_info->pNext) { switch (s->sType) { - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR: - external_info = (const void *)s; - break; - default: - break; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR: + external_info = (const void *) s; + break; + default: + break; } } @@ -291,11 +290,11 @@ tu_GetPhysicalDeviceImageFormatProperties2( vk_foreach_struct(s, base_props->pNext) { switch (s->sType) { - case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR: - external_props = (void *)s; - break; - default: - break; + case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR: + external_props = (void *) s; + break; + default: + break; } } @@ -307,29 +306,27 @@ tu_GetPhysicalDeviceImageFormatProperties2( */ if (external_info && external_info->handleType != 0) { switch (external_info->handleType) { - case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR: - case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT: - case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: - get_external_image_format_properties( - base_info, - external_info->handleType, - &external_props->externalMemoryProperties); - break; - default: - /* From the Vulkan 1.0.42 spec: - * - * If handleType is not compatible with the [parameters] - * specified - * in VkPhysicalDeviceImageFormatInfo2KHR, then - * vkGetPhysicalDeviceImageFormatProperties2KHR returns - * VK_ERROR_FORMAT_NOT_SUPPORTED. - */ - result = - vk_errorf(physical_device->instance, - VK_ERROR_FORMAT_NOT_SUPPORTED, - "unsupported VkExternalMemoryTypeFlagBitsKHR 0x%x", - external_info->handleType); - goto fail; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR: + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT: + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: + get_external_image_format_properties( + base_info, external_info->handleType, + &external_props->externalMemoryProperties); + break; + default: + /* From the Vulkan 1.0.42 spec: + * + * If handleType is not compatible with the [parameters] + * specified + * in VkPhysicalDeviceImageFormatInfo2KHR, then + * vkGetPhysicalDeviceImageFormatProperties2KHR returns + * VK_ERROR_FORMAT_NOT_SUPPORTED. + */ + result = vk_errorf( + physical_device->instance, VK_ERROR_FORMAT_NOT_SUPPORTED, + "unsupported VkExternalMemoryTypeFlagBitsKHR 0x%x", + external_info->handleType); + goto fail; } } @@ -344,7 +341,7 @@ fail: * the implementation for use in vkCreateImage, then all members of * imageFormatProperties will be filled with zero. */ - base_props->imageFormatProperties = (VkImageFormatProperties){ 0 }; + base_props->imageFormatProperties = (VkImageFormatProperties) { 0 }; } return result; @@ -386,25 +383,25 @@ tu_GetPhysicalDeviceExternalBufferProperties( VkExternalMemoryHandleTypeFlagsKHR export_flags = 0; VkExternalMemoryHandleTypeFlagsKHR compat_flags = 0; switch (pExternalBufferInfo->handleType) { - case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR: - case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT: - flags = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR | - VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR; - compat_flags = export_flags = - VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | - VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT; - break; - case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: - flags = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR; - compat_flags = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT; - break; - default: - break; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR: + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT: + flags = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR | + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR; + compat_flags = export_flags = + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | + VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT; + break; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: + flags = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR; + compat_flags = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT; + break; + default: + break; } pExternalBufferProperties->externalMemoryProperties = - (VkExternalMemoryPropertiesKHR){ - .externalMemoryFeatures = flags, - .exportFromImportedHandleTypes = export_flags, - .compatibleHandleTypes = compat_flags, - }; + (VkExternalMemoryPropertiesKHR) { + .externalMemoryFeatures = flags, + .exportFromImportedHandleTypes = export_flags, + .compatibleHandleTypes = compat_flags, + }; } diff --git a/src/freedreno/vulkan/tu_image.c b/src/freedreno/vulkan/tu_image.c index db7462d65c0..9a6930b0a9a 100644 --- a/src/freedreno/vulkan/tu_image.c +++ b/src/freedreno/vulkan/tu_image.c @@ -21,17 +21,17 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #include "tu_private.h" + #include "util/debug.h" #include "util/u_atomic.h" #include "vk_format.h" #include "vk_util.h" - static inline bool image_level_linear(struct tu_image *image, int level) { @@ -40,23 +40,20 @@ image_level_linear(struct tu_image *image, int level) } /* indexed by cpp: */ -static const struct { +static const struct +{ unsigned pitchalign; unsigned heightalign; } tile_alignment[] = { - [1] = { 128, 32 }, - [2] = { 128, 16 }, - [3] = { 128, 16 }, - [4] = { 64, 16 }, - [8] = { 64, 16 }, - [12] = { 64, 16 }, - [16] = { 64, 16 }, + [1] = { 128, 32 }, [2] = { 128, 16 }, [3] = { 128, 16 }, [4] = { 64, 16 }, + [8] = { 64, 16 }, [12] = { 64, 16 }, [16] = { 64, 16 }, }; static void setup_slices(struct tu_image *image, const VkImageCreateInfo *pCreateInfo) { - enum vk_format_layout layout = vk_format_description(pCreateInfo->format)->layout; + enum vk_format_layout layout = + vk_format_description(pCreateInfo->format)->layout; uint32_t layer_size = 0; uint32_t width = pCreateInfo->extent.width; uint32_t height = pCreateInfo->extent.height; @@ -92,13 +89,15 @@ setup_slices(struct tu_image *image, const VkImageCreateInfo *pCreateInfo) } if (layout == VK_FORMAT_LAYOUT_ASTC) - slice->pitch = - util_align_npot(width, pitchalign * vk_format_get_blockwidth(pCreateInfo->format)); + slice->pitch = util_align_npot( + width, + pitchalign * vk_format_get_blockwidth(pCreateInfo->format)); else slice->pitch = align(width, pitchalign); slice->offset = layer_size; - blocks = vk_format_get_block_count(pCreateInfo->format, slice->pitch, aligned_height); + blocks = vk_format_get_block_count(pCreateInfo->format, slice->pitch, + aligned_height); /* 1d array and 2d array textures must all have the same layer size * for each miplevel on a3xx. 3d textures can have different layer @@ -106,9 +105,9 @@ setup_slices(struct tu_image *image, const VkImageCreateInfo *pCreateInfo) * different than what this code does), so as soon as the layer size * range gets into range, we stop reducing it. */ - if (pCreateInfo->imageType == VK_IMAGE_TYPE_3D && ( - level == 1 || - (level > 1 && image->levels[level - 1].size > 0xf000))) + if (pCreateInfo->imageType == VK_IMAGE_TYPE_3D && + (level == 1 || + (level > 1 && image->levels[level - 1].size > 0xf000))) slice->size = align(blocks * cpp, alignment); else if (level == 0 || layer_first || alignment == 1) slice->size = align(blocks * cpp, alignment); @@ -125,7 +124,6 @@ setup_slices(struct tu_image *image, const VkImageCreateInfo *pCreateInfo) image->layer_size = layer_size; } - VkResult tu_image_create(VkDevice _device, const struct tu_image_create_info *create_info, @@ -144,10 +142,7 @@ tu_image_create(VkDevice _device, tu_assert(pCreateInfo->extent.height > 0); tu_assert(pCreateInfo->extent.depth > 0); - image = vk_zalloc2(&device->alloc, - alloc, - sizeof(*image), - 8, + image = vk_zalloc2(&device->alloc, alloc, sizeof(*image), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!image) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -167,13 +162,13 @@ tu_image_create(VkDevice _device, VK_QUEUE_FAMILY_EXTERNAL_KHR) image->queue_family_mask |= (1u << TU_MAX_QUEUE_FAMILIES) - 1u; else - image->queue_family_mask |= 1u - << pCreateInfo->pQueueFamilyIndices[i]; + image->queue_family_mask |= + 1u << pCreateInfo->pQueueFamilyIndices[i]; } image->shareable = - vk_find_struct_const(pCreateInfo->pNext, - EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR) != NULL; + vk_find_struct_const(pCreateInfo->pNext, + EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR) != NULL; image->tile_mode = pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? 3 : 0; setup_slices(image, pCreateInfo); @@ -213,20 +208,19 @@ tu_CreateImage(VkDevice device, { #ifdef ANDROID const VkNativeBufferANDROID *gralloc_info = - vk_find_struct_const(pCreateInfo->pNext, NATIVE_BUFFER_ANDROID); + vk_find_struct_const(pCreateInfo->pNext, NATIVE_BUFFER_ANDROID); if (gralloc_info) - return tu_image_from_gralloc( - device, pCreateInfo, gralloc_info, pAllocator, pImage); + return tu_image_from_gralloc(device, pCreateInfo, gralloc_info, + pAllocator, pImage); #endif return tu_image_create(device, - &(struct tu_image_create_info) { + &(struct tu_image_create_info) { .vk_info = pCreateInfo, .scanout = false, - }, - pAllocator, - pImage); + }, + pAllocator, pImage); } void @@ -263,10 +257,7 @@ tu_CreateImageView(VkDevice _device, TU_FROM_HANDLE(tu_device, device, _device); struct tu_image_view *view; - view = vk_alloc2(&device->alloc, - pAllocator, - sizeof(*view), - 8, + view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (view == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -299,8 +290,8 @@ tu_buffer_view_init(struct tu_buffer_view *view, TU_FROM_HANDLE(tu_buffer, buffer, pCreateInfo->buffer); view->range = pCreateInfo->range == VK_WHOLE_SIZE - ? buffer->size - pCreateInfo->offset - : pCreateInfo->range; + ? buffer->size - pCreateInfo->offset + : pCreateInfo->range; view->vk_format = pCreateInfo->format; } @@ -313,10 +304,7 @@ tu_CreateBufferView(VkDevice _device, TU_FROM_HANDLE(tu_device, device, _device); struct tu_buffer_view *view; - view = vk_alloc2(&device->alloc, - pAllocator, - sizeof(*view), - 8, + view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!view) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); diff --git a/src/freedreno/vulkan/tu_meta_blit.c b/src/freedreno/vulkan/tu_meta_blit.c index ebe17ec21b0..da5ff6b12b7 100644 --- a/src/freedreno/vulkan/tu_meta_blit.c +++ b/src/freedreno/vulkan/tu_meta_blit.c @@ -17,11 +17,12 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #include "tu_private.h" + #include "nir/nir_builder.h" void diff --git a/src/freedreno/vulkan/tu_meta_clear.c b/src/freedreno/vulkan/tu_meta_clear.c index 08a14338bc5..2beed543359 100644 --- a/src/freedreno/vulkan/tu_meta_clear.c +++ b/src/freedreno/vulkan/tu_meta_clear.c @@ -17,8 +17,8 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #include "tu_private.h" diff --git a/src/freedreno/vulkan/tu_meta_copy.c b/src/freedreno/vulkan/tu_meta_copy.c index a6899d5db3c..86d85e7b137 100644 --- a/src/freedreno/vulkan/tu_meta_copy.c +++ b/src/freedreno/vulkan/tu_meta_copy.c @@ -17,8 +17,8 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #include "tu_private.h" @@ -45,12 +45,8 @@ tu_CmdCopyBufferToImage(VkCommandBuffer commandBuffer, TU_FROM_HANDLE(tu_image, dest_image, destImage); TU_FROM_HANDLE(tu_buffer, src_buffer, srcBuffer); - meta_copy_buffer_to_image(cmd_buffer, - src_buffer, - dest_image, - destImageLayout, - regionCount, - pRegions); + meta_copy_buffer_to_image(cmd_buffer, src_buffer, dest_image, + destImageLayout, regionCount, pRegions); } static void @@ -75,8 +71,8 @@ tu_CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, TU_FROM_HANDLE(tu_image, src_image, srcImage); TU_FROM_HANDLE(tu_buffer, dst_buffer, destBuffer); - meta_copy_image_to_buffer( - cmd_buffer, dst_buffer, src_image, srcImageLayout, regionCount, pRegions); + meta_copy_image_to_buffer(cmd_buffer, dst_buffer, src_image, + srcImageLayout, regionCount, pRegions); } static void @@ -103,11 +99,6 @@ tu_CmdCopyImage(VkCommandBuffer commandBuffer, TU_FROM_HANDLE(tu_image, src_image, srcImage); TU_FROM_HANDLE(tu_image, dest_image, destImage); - meta_copy_image(cmd_buffer, - src_image, - srcImageLayout, - dest_image, - destImageLayout, - regionCount, - pRegions); + meta_copy_image(cmd_buffer, src_image, srcImageLayout, dest_image, + destImageLayout, regionCount, pRegions); } diff --git a/src/freedreno/vulkan/tu_meta_resolve.c b/src/freedreno/vulkan/tu_meta_resolve.c index 1de63a33444..4a9ebedfdab 100644 --- a/src/freedreno/vulkan/tu_meta_resolve.c +++ b/src/freedreno/vulkan/tu_meta_resolve.c @@ -17,14 +17,15 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ +#include "tu_private.h" + #include <assert.h> #include <stdbool.h> -#include "tu_private.h" #include "nir/nir_builder.h" #include "vk_format.h" diff --git a/src/freedreno/vulkan/tu_pass.c b/src/freedreno/vulkan/tu_pass.c index 4e0895bc11a..54047055a4f 100644 --- a/src/freedreno/vulkan/tu_pass.c +++ b/src/freedreno/vulkan/tu_pass.c @@ -21,8 +21,8 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #include "tu_private.h" @@ -47,24 +47,24 @@ tu_CreateRenderPass(VkDevice _device, attachments_offset = size; size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]); - pass = vk_alloc2( - &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + pass = vk_alloc2(&device->alloc, pAllocator, size, 8, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (pass == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); memset(pass, 0, size); pass->attachment_count = pCreateInfo->attachmentCount; pass->subpass_count = pCreateInfo->subpassCount; - pass->attachments = (void *)pass + attachments_offset; + pass->attachments = (void *) pass + attachments_offset; vk_foreach_struct(ext, pCreateInfo->pNext) { switch (ext->sType) { - case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR: - multiview_info = (VkRenderPassMultiviewCreateInfoKHR *)ext; - break; - default: - break; + case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR: + multiview_info = (VkRenderPassMultiviewCreateInfoKHR *) ext; + break; + default: + break; } } @@ -86,18 +86,16 @@ tu_CreateRenderPass(VkDevice _device, const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i]; subpass_attachment_count += - desc->inputAttachmentCount + desc->colorAttachmentCount + - (desc->pResolveAttachments ? desc->colorAttachmentCount : 0) + - (desc->pDepthStencilAttachment != NULL); + desc->inputAttachmentCount + desc->colorAttachmentCount + + (desc->pResolveAttachments ? desc->colorAttachmentCount : 0) + + (desc->pDepthStencilAttachment != NULL); } if (subpass_attachment_count) { pass->subpass_attachments = vk_alloc2( - &device->alloc, - pAllocator, - subpass_attachment_count * sizeof(struct tu_subpass_attachment), - 8, - VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + &device->alloc, pAllocator, + subpass_attachment_count * sizeof(struct tu_subpass_attachment), 8, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (pass->subpass_attachments == NULL) { vk_free2(&device->alloc, pAllocator, pass); return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -121,13 +119,13 @@ tu_CreateRenderPass(VkDevice _device, p += desc->inputAttachmentCount; for (uint32_t j = 0; j < desc->inputAttachmentCount; j++) { - subpass->input_attachments[j] = (struct tu_subpass_attachment){ + subpass->input_attachments[j] = (struct tu_subpass_attachment) { .attachment = desc->pInputAttachments[j].attachment, .layout = desc->pInputAttachments[j].layout, }; if (desc->pInputAttachments[j].attachment != VK_ATTACHMENT_UNUSED) pass->attachments[desc->pInputAttachments[j].attachment] - .view_mask |= subpass->view_mask; + .view_mask |= subpass->view_mask; } } @@ -136,17 +134,18 @@ tu_CreateRenderPass(VkDevice _device, p += desc->colorAttachmentCount; for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) { - subpass->color_attachments[j] = (struct tu_subpass_attachment){ + subpass->color_attachments[j] = (struct tu_subpass_attachment) { .attachment = desc->pColorAttachments[j].attachment, .layout = desc->pColorAttachments[j].layout, }; - if (desc->pColorAttachments[j].attachment != VK_ATTACHMENT_UNUSED) { + if (desc->pColorAttachments[j].attachment != + VK_ATTACHMENT_UNUSED) { pass->attachments[desc->pColorAttachments[j].attachment] - .view_mask |= subpass->view_mask; + .view_mask |= subpass->view_mask; color_sample_count = - pCreateInfo - ->pAttachments[desc->pColorAttachments[j].attachment] - .samples; + pCreateInfo + ->pAttachments[desc->pColorAttachments[j].attachment] + .samples; } } } @@ -158,55 +157,56 @@ tu_CreateRenderPass(VkDevice _device, for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) { uint32_t a = desc->pResolveAttachments[j].attachment; - subpass->resolve_attachments[j] = (struct tu_subpass_attachment){ + subpass->resolve_attachments[j] = (struct tu_subpass_attachment) { .attachment = desc->pResolveAttachments[j].attachment, .layout = desc->pResolveAttachments[j].layout, }; if (a != VK_ATTACHMENT_UNUSED) { subpass->has_resolve = true; pass->attachments[desc->pResolveAttachments[j].attachment] - .view_mask |= subpass->view_mask; + .view_mask |= subpass->view_mask; } } } if (desc->pDepthStencilAttachment) { - subpass->depth_stencil_attachment = (struct tu_subpass_attachment){ + subpass->depth_stencil_attachment = (struct tu_subpass_attachment) { .attachment = desc->pDepthStencilAttachment->attachment, .layout = desc->pDepthStencilAttachment->layout, }; if (desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { pass->attachments[desc->pDepthStencilAttachment->attachment] - .view_mask |= subpass->view_mask; + .view_mask |= subpass->view_mask; depth_sample_count = - pCreateInfo - ->pAttachments[desc->pDepthStencilAttachment->attachment] - .samples; + pCreateInfo + ->pAttachments[desc->pDepthStencilAttachment->attachment] + .samples; } } else { subpass->depth_stencil_attachment.attachment = VK_ATTACHMENT_UNUSED; } - subpass->max_sample_count = MAX2(color_sample_count, depth_sample_count); + subpass->max_sample_count = + MAX2(color_sample_count, depth_sample_count); } for (unsigned i = 0; i < pCreateInfo->dependencyCount; ++i) { uint32_t dst = pCreateInfo->pDependencies[i].dstSubpass; if (dst == VK_SUBPASS_EXTERNAL) { pass->end_barrier.src_stage_mask = - pCreateInfo->pDependencies[i].srcStageMask; + pCreateInfo->pDependencies[i].srcStageMask; pass->end_barrier.src_access_mask = - pCreateInfo->pDependencies[i].srcAccessMask; + pCreateInfo->pDependencies[i].srcAccessMask; pass->end_barrier.dst_access_mask = - pCreateInfo->pDependencies[i].dstAccessMask; + pCreateInfo->pDependencies[i].dstAccessMask; } else { pass->subpasses[dst].start_barrier.src_stage_mask = - pCreateInfo->pDependencies[i].srcStageMask; + pCreateInfo->pDependencies[i].srcStageMask; pass->subpasses[dst].start_barrier.src_access_mask = - pCreateInfo->pDependencies[i].srcAccessMask; + pCreateInfo->pDependencies[i].srcAccessMask; pass->subpasses[dst].start_barrier.dst_access_mask = - pCreateInfo->pDependencies[i].dstAccessMask; + pCreateInfo->pDependencies[i].dstAccessMask; } } @@ -234,15 +234,15 @@ tu_CreateRenderPass2KHR(VkDevice _device, attachments_offset = size; size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]); - pass = vk_alloc2( - &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + pass = vk_alloc2(&device->alloc, pAllocator, size, 8, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (pass == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); memset(pass, 0, size); pass->attachment_count = pCreateInfo->attachmentCount; pass->subpass_count = pCreateInfo->subpassCount; - pass->attachments = (void *)pass + attachments_offset; + pass->attachments = (void *) pass + attachments_offset; for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) { struct tu_render_pass_attachment *att = &pass->attachments[i]; @@ -262,18 +262,16 @@ tu_CreateRenderPass2KHR(VkDevice _device, const VkSubpassDescription2KHR *desc = &pCreateInfo->pSubpasses[i]; subpass_attachment_count += - desc->inputAttachmentCount + desc->colorAttachmentCount + - (desc->pResolveAttachments ? desc->colorAttachmentCount : 0) + - (desc->pDepthStencilAttachment != NULL); + desc->inputAttachmentCount + desc->colorAttachmentCount + + (desc->pResolveAttachments ? desc->colorAttachmentCount : 0) + + (desc->pDepthStencilAttachment != NULL); } if (subpass_attachment_count) { pass->subpass_attachments = vk_alloc2( - &device->alloc, - pAllocator, - subpass_attachment_count * sizeof(struct tu_subpass_attachment), - 8, - VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + &device->alloc, pAllocator, + subpass_attachment_count * sizeof(struct tu_subpass_attachment), 8, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (pass->subpass_attachments == NULL) { vk_free2(&device->alloc, pAllocator, pass); return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -296,13 +294,13 @@ tu_CreateRenderPass2KHR(VkDevice _device, p += desc->inputAttachmentCount; for (uint32_t j = 0; j < desc->inputAttachmentCount; j++) { - subpass->input_attachments[j] = (struct tu_subpass_attachment){ + subpass->input_attachments[j] = (struct tu_subpass_attachment) { .attachment = desc->pInputAttachments[j].attachment, .layout = desc->pInputAttachments[j].layout, }; if (desc->pInputAttachments[j].attachment != VK_ATTACHMENT_UNUSED) pass->attachments[desc->pInputAttachments[j].attachment] - .view_mask |= subpass->view_mask; + .view_mask |= subpass->view_mask; } } @@ -311,17 +309,18 @@ tu_CreateRenderPass2KHR(VkDevice _device, p += desc->colorAttachmentCount; for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) { - subpass->color_attachments[j] = (struct tu_subpass_attachment){ + subpass->color_attachments[j] = (struct tu_subpass_attachment) { .attachment = desc->pColorAttachments[j].attachment, .layout = desc->pColorAttachments[j].layout, }; - if (desc->pColorAttachments[j].attachment != VK_ATTACHMENT_UNUSED) { + if (desc->pColorAttachments[j].attachment != + VK_ATTACHMENT_UNUSED) { pass->attachments[desc->pColorAttachments[j].attachment] - .view_mask |= subpass->view_mask; + .view_mask |= subpass->view_mask; color_sample_count = - pCreateInfo - ->pAttachments[desc->pColorAttachments[j].attachment] - .samples; + pCreateInfo + ->pAttachments[desc->pColorAttachments[j].attachment] + .samples; } } } @@ -333,55 +332,56 @@ tu_CreateRenderPass2KHR(VkDevice _device, for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) { uint32_t a = desc->pResolveAttachments[j].attachment; - subpass->resolve_attachments[j] = (struct tu_subpass_attachment){ + subpass->resolve_attachments[j] = (struct tu_subpass_attachment) { .attachment = desc->pResolveAttachments[j].attachment, .layout = desc->pResolveAttachments[j].layout, }; if (a != VK_ATTACHMENT_UNUSED) { subpass->has_resolve = true; pass->attachments[desc->pResolveAttachments[j].attachment] - .view_mask |= subpass->view_mask; + .view_mask |= subpass->view_mask; } } } if (desc->pDepthStencilAttachment) { - subpass->depth_stencil_attachment = (struct tu_subpass_attachment){ + subpass->depth_stencil_attachment = (struct tu_subpass_attachment) { .attachment = desc->pDepthStencilAttachment->attachment, .layout = desc->pDepthStencilAttachment->layout, }; if (desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { pass->attachments[desc->pDepthStencilAttachment->attachment] - .view_mask |= subpass->view_mask; + .view_mask |= subpass->view_mask; depth_sample_count = - pCreateInfo - ->pAttachments[desc->pDepthStencilAttachment->attachment] - .samples; + pCreateInfo + ->pAttachments[desc->pDepthStencilAttachment->attachment] + .samples; } } else { subpass->depth_stencil_attachment.attachment = VK_ATTACHMENT_UNUSED; } - subpass->max_sample_count = MAX2(color_sample_count, depth_sample_count); + subpass->max_sample_count = + MAX2(color_sample_count, depth_sample_count); } for (unsigned i = 0; i < pCreateInfo->dependencyCount; ++i) { uint32_t dst = pCreateInfo->pDependencies[i].dstSubpass; if (dst == VK_SUBPASS_EXTERNAL) { pass->end_barrier.src_stage_mask = - pCreateInfo->pDependencies[i].srcStageMask; + pCreateInfo->pDependencies[i].srcStageMask; pass->end_barrier.src_access_mask = - pCreateInfo->pDependencies[i].srcAccessMask; + pCreateInfo->pDependencies[i].srcAccessMask; pass->end_barrier.dst_access_mask = - pCreateInfo->pDependencies[i].dstAccessMask; + pCreateInfo->pDependencies[i].dstAccessMask; } else { pass->subpasses[dst].start_barrier.src_stage_mask = - pCreateInfo->pDependencies[i].srcStageMask; + pCreateInfo->pDependencies[i].srcStageMask; pass->subpasses[dst].start_barrier.src_access_mask = - pCreateInfo->pDependencies[i].srcAccessMask; + pCreateInfo->pDependencies[i].srcAccessMask; pass->subpasses[dst].start_barrier.dst_access_mask = - pCreateInfo->pDependencies[i].dstAccessMask; + pCreateInfo->pDependencies[i].dstAccessMask; } } diff --git a/src/freedreno/vulkan/tu_pipeline.c b/src/freedreno/vulkan/tu_pipeline.c index 8179b03e89c..b7598960748 100644 --- a/src/freedreno/vulkan/tu_pipeline.c +++ b/src/freedreno/vulkan/tu_pipeline.c @@ -21,21 +21,21 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #include "tu_private.h" + +#include "main/menums.h" #include "nir/nir.h" #include "nir/nir_builder.h" #include "spirv/nir_spirv.h" +#include "util/debug.h" #include "util/mesa-sha1.h" #include "util/u_atomic.h" -#include "vk_util.h" - -#include "main/menums.h" -#include "util/debug.h" #include "vk_format.h" +#include "vk_util.h" VkResult tu_graphics_pipeline_create( @@ -63,12 +63,9 @@ tu_CreateGraphicsPipelines(VkDevice _device, for (; i < count; i++) { VkResult r; - r = tu_graphics_pipeline_create(_device, - pipelineCache, - &pCreateInfos[i], - NULL, - pAllocator, - &pPipelines[i]); + r = + tu_graphics_pipeline_create(_device, pipelineCache, &pCreateInfos[i], + NULL, pAllocator, &pPipelines[i]); if (r != VK_SUCCESS) { result = r; pPipelines[i] = VK_NULL_HANDLE; @@ -101,8 +98,8 @@ tu_CreateComputePipelines(VkDevice _device, unsigned i = 0; for (; i < count; i++) { VkResult r; - r = tu_compute_pipeline_create( - _device, pipelineCache, &pCreateInfos[i], pAllocator, &pPipelines[i]); + r = tu_compute_pipeline_create(_device, pipelineCache, &pCreateInfos[i], + pAllocator, &pPipelines[i]); if (r != VK_SUCCESS) { result = r; pPipelines[i] = VK_NULL_HANDLE; diff --git a/src/freedreno/vulkan/tu_pipeline_cache.c b/src/freedreno/vulkan/tu_pipeline_cache.c index efb63383793..b8b2ceda263 100644 --- a/src/freedreno/vulkan/tu_pipeline_cache.c +++ b/src/freedreno/vulkan/tu_pipeline_cache.c @@ -17,11 +17,12 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #include "tu_private.h" + #include "util/debug.h" #include "util/disk_cache.h" #include "util/mesa-sha1.h" @@ -33,8 +34,7 @@ struct cache_entry_variant_info struct cache_entry { - union - { + union { unsigned char sha1[20]; uint32_t sha1_dw[5]; }; @@ -83,7 +83,8 @@ entry_size(struct cache_entry *entry) size_t ret = sizeof(*entry); for (int i = 0; i < MESA_SHADER_STAGES; ++i) if (entry->code_sizes[i]) - ret += sizeof(struct cache_entry_variant_info) + entry->code_sizes[i]; + ret += + sizeof(struct cache_entry_variant_info) + entry->code_sizes[i]; return ret; } @@ -105,15 +106,15 @@ tu_hash_shaders(unsigned char *hash, for (int i = 0; i < MESA_SHADER_STAGES; ++i) { if (stages[i]) { TU_FROM_HANDLE(tu_shader_module, module, stages[i]->module); - const VkSpecializationInfo *spec_info = stages[i]->pSpecializationInfo; + const VkSpecializationInfo *spec_info = + stages[i]->pSpecializationInfo; _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1)); _mesa_sha1_update(&ctx, stages[i]->pName, strlen(stages[i]->pName)); if (spec_info) { - _mesa_sha1_update(&ctx, - spec_info->pMapEntries, - spec_info->mapEntryCount * - sizeof spec_info->pMapEntries[0]); + _mesa_sha1_update( + &ctx, spec_info->pMapEntries, + spec_info->mapEntryCount * sizeof spec_info->pMapEntries[0]); _mesa_sha1_update(&ctx, spec_info->pData, spec_info->dataSize); } } @@ -127,7 +128,7 @@ tu_pipeline_cache_search_unlocked(struct tu_pipeline_cache *cache, const unsigned char *sha1) { const uint32_t mask = cache->table_size - 1; - const uint32_t start = (*(uint32_t *)sha1); + const uint32_t start = (*(uint32_t *) sha1); if (cache->table_size == 0) return NULL; @@ -258,22 +259,22 @@ tu_pipeline_cache_load(struct tu_pipeline_cache *cache, return; if (header.device_id != 0 /* TODO */) return; - if (memcmp(header.uuid, device->physical_device->cache_uuid, VK_UUID_SIZE) != - 0) + if (memcmp(header.uuid, device->physical_device->cache_uuid, + VK_UUID_SIZE) != 0) return; - char *end = (void *)data + size; - char *p = (void *)data + header.header_size; + char *end = (void *) data + size; + char *p = (void *) data + header.header_size; while (end - p >= sizeof(struct cache_entry)) { - struct cache_entry *entry = (struct cache_entry *)p; + struct cache_entry *entry = (struct cache_entry *) p; struct cache_entry *dest_entry; size_t size = entry_size(entry); if (end - p < size) break; dest_entry = - vk_alloc(&cache->alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE); + vk_alloc(&cache->alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE); if (dest_entry) { memcpy(dest_entry, entry, size); for (int i = 0; i < MESA_SHADER_STAGES; ++i) @@ -296,10 +297,7 @@ tu_CreatePipelineCache(VkDevice _device, assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO); assert(pCreateInfo->flags == 0); - cache = vk_alloc2(&device->alloc, - pAllocator, - sizeof(*cache), - 8, + cache = vk_alloc2(&device->alloc, pAllocator, sizeof(*cache), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (cache == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); @@ -312,8 +310,8 @@ tu_CreatePipelineCache(VkDevice _device, tu_pipeline_cache_init(cache, device); if (pCreateInfo->initialDataSize > 0) { - tu_pipeline_cache_load( - cache, pCreateInfo->pInitialData, pCreateInfo->initialDataSize); + tu_pipeline_cache_load(cache, pCreateInfo->pInitialData, + pCreateInfo->initialDataSize); } *pPipelineCache = tu_pipeline_cache_to_handle(cache); @@ -382,7 +380,7 @@ tu_GetPipelineCacheData(VkDevice _device, memcpy(p, entry, size); for (int j = 0; j < MESA_SHADER_STAGES; ++j) - ((struct cache_entry *)p)->variants[j] = NULL; + ((struct cache_entry *) p)->variants[j] = NULL; p += size; } *pDataSize = p - pData; diff --git a/src/freedreno/vulkan/tu_private.h b/src/freedreno/vulkan/tu_private.h index 454a051336e..3d9219eae57 100644 --- a/src/freedreno/vulkan/tu_private.h +++ b/src/freedreno/vulkan/tu_private.h @@ -21,8 +21,8 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #ifndef TU_PRIVATE_H @@ -67,7 +67,6 @@ typedef uint32_t xcb_window_t; #include <vulkan/vulkan_intel.h> #include "drm/freedreno_ringbuffer.h" - #include "tu_entrypoints.h" #define MAX_VBS 32 @@ -80,7 +79,7 @@ typedef uint32_t xcb_window_t; #define MAX_PUSH_DESCRIPTORS 32 #define MAX_DYNAMIC_UNIFORM_BUFFERS 16 #define MAX_DYNAMIC_STORAGE_BUFFERS 8 -#define MAX_DYNAMIC_BUFFERS \ +#define MAX_DYNAMIC_BUFFERS \ (MAX_DYNAMIC_UNIFORM_BUFFERS + MAX_DYNAMIC_STORAGE_BUFFERS) #define MAX_SAMPLES_LOG2 4 #define NUM_META_FS_KEYS 13 @@ -193,14 +192,14 @@ tu_clear_mask(uint32_t *inout_mask, uint32_t clear_mask) } } -#define for_each_bit(b, dword) \ - for (uint32_t __dword = (dword); (b) = __builtin_ffs(__dword) - 1, __dword; \ - __dword &= ~(1 << (b))) +#define for_each_bit(b, dword) \ + for (uint32_t __dword = (dword); \ + (b) = __builtin_ffs(__dword) - 1, __dword; __dword &= ~(1 << (b))) -#define typed_memcpy(dest, src, count) \ - ({ \ - STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \ - memcpy((dest), (src), (count) * sizeof(*(src))); \ +#define typed_memcpy(dest, src, count) \ + ({ \ + STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \ + memcpy((dest), (src), (count) * sizeof(*(src))); \ }) /* Whenever we generate an error, pass it through this function. Useful for @@ -218,14 +217,14 @@ __vk_errorf(struct tu_instance *instance, const char *format, ...); -#define vk_error(instance, error) \ +#define vk_error(instance, error) \ __vk_errorf(instance, error, __FILE__, __LINE__, NULL); -#define vk_errorf(instance, error, format, ...) \ +#define vk_errorf(instance, error, format, ...) \ __vk_errorf(instance, error, __FILE__, __LINE__, format, ##__VA_ARGS__); void __tu_finishme(const char *file, int line, const char *format, ...) - tu_printflike(3, 4); + tu_printflike(3, 4); void tu_loge(const char *format, ...) tu_printflike(1, 2); void @@ -238,21 +237,21 @@ tu_logi_v(const char *format, va_list va); /** * Print a FINISHME message, including its source location. */ -#define tu_finishme(format, ...) \ - do { \ - static bool reported = false; \ - if (!reported) { \ - __tu_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \ - reported = true; \ - } \ +#define tu_finishme(format, ...) \ + do { \ + static bool reported = false; \ + if (!reported) { \ + __tu_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \ + reported = true; \ + } \ } while (0) /* A non-fatal assert. Useful for debugging. */ #ifdef DEBUG -#define tu_assert(x) \ - ({ \ - if (unlikely(!(x))) \ - fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \ +#define tu_assert(x) \ + ({ \ + if (unlikely(!(x))) \ + fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \ }) #else #define tu_assert(x) @@ -260,11 +259,14 @@ tu_logi_v(const char *format, va_list va); /* Suppress -Wunused in stub functions */ #define tu_use_args(...) __tu_use_args(0, ##__VA_ARGS__) -static inline void __tu_use_args(int ignore, ...) {} +static inline void +__tu_use_args(int ignore, ...) +{ +} -#define tu_stub() \ - do { \ - tu_finishme("stub %s", __func__); \ +#define tu_stub() \ + do { \ + tu_finishme("stub %s", __func__); \ } while (0) void * @@ -813,11 +815,10 @@ mesa_to_vk_shader_stage(gl_shader_stage mesa_stage) #define TU_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1) -#define tu_foreach_stage(stage, stage_bits) \ - for (gl_shader_stage stage, \ - __tmp = (gl_shader_stage)((stage_bits)&TU_STAGE_MASK); \ - stage = __builtin_ffs(__tmp) - 1, __tmp; \ - __tmp &= ~(1 << (stage))) +#define tu_foreach_stage(stage, stage_bits) \ + for (gl_shader_stage stage, \ + __tmp = (gl_shader_stage)((stage_bits) &TU_STAGE_MASK); \ + stage = __builtin_ffs(__tmp) - 1, __tmp; __tmp &= ~(1 << (stage))) struct tu_shader_module { @@ -904,11 +905,11 @@ tu_is_colorbuffer_format_supported(VkFormat format, bool *blendable); bool tu_dcc_formats_compatible(VkFormat format1, VkFormat format2); - -struct tu_image_level { - VkDeviceSize offset; - VkDeviceSize size; - uint32_t pitch; +struct tu_image_level +{ + VkDeviceSize offset; + VkDeviceSize size; + uint32_t pitch; }; struct tu_image @@ -1026,14 +1027,14 @@ tu_sanitize_image_extent(const VkImageType imageType, const struct VkExtent3D imageExtent) { switch (imageType) { - case VK_IMAGE_TYPE_1D: - return (VkExtent3D){ imageExtent.width, 1, 1 }; - case VK_IMAGE_TYPE_2D: - return (VkExtent3D){ imageExtent.width, imageExtent.height, 1 }; - case VK_IMAGE_TYPE_3D: - return imageExtent; - default: - unreachable("invalid image type"); + case VK_IMAGE_TYPE_1D: + return (VkExtent3D) { imageExtent.width, 1, 1 }; + case VK_IMAGE_TYPE_2D: + return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 }; + case VK_IMAGE_TYPE_3D: + return imageExtent; + default: + unreachable("invalid image type"); } } @@ -1042,14 +1043,14 @@ tu_sanitize_image_offset(const VkImageType imageType, const struct VkOffset3D imageOffset) { switch (imageType) { - case VK_IMAGE_TYPE_1D: - return (VkOffset3D){ imageOffset.x, 0, 0 }; - case VK_IMAGE_TYPE_2D: - return (VkOffset3D){ imageOffset.x, imageOffset.y, 0 }; - case VK_IMAGE_TYPE_3D: - return imageOffset; - default: - unreachable("invalid image type"); + case VK_IMAGE_TYPE_1D: + return (VkOffset3D) { imageOffset.x, 0, 0 }; + case VK_IMAGE_TYPE_2D: + return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 }; + case VK_IMAGE_TYPE_3D: + return imageOffset; + default: + unreachable("invalid image type"); } } @@ -1204,30 +1205,32 @@ tu_gem_info_offset(struct tu_device *dev, uint32_t gem_handle); uint64_t tu_gem_info_iova(struct tu_device *dev, uint32_t gem_handle); int -tu_drm_query_param(struct tu_physical_device *dev, uint32_t param, uint64_t *value); +tu_drm_query_param(struct tu_physical_device *dev, + uint32_t param, + uint64_t *value); #define TU_DEFINE_HANDLE_CASTS(__tu_type, __VkType) \ - \ + \ static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \ - { \ - return (struct __tu_type *)_handle; \ - } \ - \ + { \ + return (struct __tu_type *) _handle; \ + } \ + \ static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \ - { \ - return (__VkType)_obj; \ + { \ + return (__VkType) _obj; \ } #define TU_DEFINE_NONDISP_HANDLE_CASTS(__tu_type, __VkType) \ - \ + \ static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \ - { \ - return (struct __tu_type *)(uintptr_t)_handle; \ - } \ - \ + { \ + return (struct __tu_type *) (uintptr_t) _handle; \ + } \ + \ static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \ - { \ - return (__VkType)(uintptr_t)_obj; \ + { \ + return (__VkType)(uintptr_t) _obj; \ } #define TU_FROM_HANDLE(__tu_type, __name, __handle) \ @@ -1245,9 +1248,9 @@ TU_DEFINE_NONDISP_HANDLE_CASTS(tu_buffer_view, VkBufferView) TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_pool, VkDescriptorPool) TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set, VkDescriptorSet) TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set_layout, - VkDescriptorSetLayout) + VkDescriptorSetLayout) TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_update_template, - VkDescriptorUpdateTemplateKHR) + VkDescriptorUpdateTemplateKHR) TU_DEFINE_NONDISP_HANDLE_CASTS(tu_device_memory, VkDeviceMemory) TU_DEFINE_NONDISP_HANDLE_CASTS(tu_fence, VkFence) TU_DEFINE_NONDISP_HANDLE_CASTS(tu_event, VkEvent) diff --git a/src/freedreno/vulkan/tu_query.c b/src/freedreno/vulkan/tu_query.c index 9b2d76ba0e2..2cb710fb1ca 100644 --- a/src/freedreno/vulkan/tu_query.c +++ b/src/freedreno/vulkan/tu_query.c @@ -19,17 +19,18 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ +#include "tu_private.h" + #include <assert.h> #include <fcntl.h> #include <stdbool.h> #include <string.h> #include <unistd.h> -#include "tu_private.h" #include "nir/nir_builder.h" VkResult @@ -39,11 +40,9 @@ tu_CreateQueryPool(VkDevice _device, VkQueryPool *pQueryPool) { TU_FROM_HANDLE(tu_device, device, _device); - struct tu_query_pool *pool = vk_alloc2(&device->alloc, - pAllocator, - sizeof(*pool), - 8, - VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + struct tu_query_pool *pool = + vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!pool) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); diff --git a/src/freedreno/vulkan/tu_util.c b/src/freedreno/vulkan/tu_util.c index ab33fd9ab10..e630460fb33 100644 --- a/src/freedreno/vulkan/tu_util.c +++ b/src/freedreno/vulkan/tu_util.c @@ -17,10 +17,12 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ +#include "tu_private.h" + #include <assert.h> #include <errno.h> #include <stdarg.h> @@ -28,10 +30,8 @@ #include <stdlib.h> #include <string.h> -#include "tu_private.h" -#include "vk_enum_to_str.h" - #include "util/u_math.h" +#include "vk_enum_to_str.h" /* TODO: Add Android support to tu_log funcs */ @@ -74,7 +74,7 @@ tu_logi_v(const char *format, va_list va) } void tu_printflike(3, 4) - __tu_finishme(const char *file, int line, const char *format, ...) + __tu_finishme(const char *file, int line, const char *format, ...) { va_list ap; char buffer[256]; diff --git a/src/freedreno/vulkan/vk_format.h b/src/freedreno/vulkan/vk_format.h index 55ad379f8f3..01e116575a3 100644 --- a/src/freedreno/vulkan/vk_format.h +++ b/src/freedreno/vulkan/vk_format.h @@ -20,8 +20,8 @@ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #ifndef VK_FORMAT_H @@ -29,6 +29,7 @@ #include <assert.h> #include <util/macros.h> + #include <vulkan/vulkan.h> enum vk_format_layout @@ -224,7 +225,6 @@ vk_format_get_block_count(VkFormat format, unsigned width, unsigned height) vk_format_get_block_count_height(format, height); } - /** * Return the index of the first non-void channel * -1 if no non-void channels @@ -261,24 +261,24 @@ static inline VkImageAspectFlags vk_format_aspects(VkFormat format) { switch (format) { - case VK_FORMAT_UNDEFINED: - return 0; + case VK_FORMAT_UNDEFINED: + return 0; - case VK_FORMAT_S8_UINT: - return VK_IMAGE_ASPECT_STENCIL_BIT; + case VK_FORMAT_S8_UINT: + return VK_IMAGE_ASPECT_STENCIL_BIT; - case VK_FORMAT_D16_UNORM_S8_UINT: - case VK_FORMAT_D24_UNORM_S8_UINT: - case VK_FORMAT_D32_SFLOAT_S8_UINT: - return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; + case VK_FORMAT_D16_UNORM_S8_UINT: + case VK_FORMAT_D24_UNORM_S8_UINT: + case VK_FORMAT_D32_SFLOAT_S8_UINT: + return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; - case VK_FORMAT_D16_UNORM: - case VK_FORMAT_X8_D24_UNORM_PACK32: - case VK_FORMAT_D32_SFLOAT: - return VK_IMAGE_ASPECT_DEPTH_BIT; + case VK_FORMAT_D16_UNORM: + case VK_FORMAT_X8_D24_UNORM_PACK32: + case VK_FORMAT_D32_SFLOAT: + return VK_IMAGE_ASPECT_DEPTH_BIT; - default: - return VK_IMAGE_ASPECT_COLOR_BIT; + default: + return VK_IMAGE_ASPECT_COLOR_BIT; } } @@ -292,32 +292,32 @@ tu_swizzle_conv(VkComponentSwizzle component, if (vk_swiz == VK_COMPONENT_SWIZZLE_IDENTITY) vk_swiz = component; switch (vk_swiz) { - case VK_COMPONENT_SWIZZLE_ZERO: - return VK_SWIZZLE_0; - case VK_COMPONENT_SWIZZLE_ONE: - return VK_SWIZZLE_1; - case VK_COMPONENT_SWIZZLE_R: - for (x = 0; x < 4; x++) - if (chan[x] == 0) - return x; - return VK_SWIZZLE_0; - case VK_COMPONENT_SWIZZLE_G: - for (x = 0; x < 4; x++) - if (chan[x] == 1) - return x; - return VK_SWIZZLE_0; - case VK_COMPONENT_SWIZZLE_B: - for (x = 0; x < 4; x++) - if (chan[x] == 2) - return x; - return VK_SWIZZLE_0; - case VK_COMPONENT_SWIZZLE_A: - for (x = 0; x < 4; x++) - if (chan[x] == 3) - return x; - return VK_SWIZZLE_1; - default: - unreachable("Illegal swizzle"); + case VK_COMPONENT_SWIZZLE_ZERO: + return VK_SWIZZLE_0; + case VK_COMPONENT_SWIZZLE_ONE: + return VK_SWIZZLE_1; + case VK_COMPONENT_SWIZZLE_R: + for (x = 0; x < 4; x++) + if (chan[x] == 0) + return x; + return VK_SWIZZLE_0; + case VK_COMPONENT_SWIZZLE_G: + for (x = 0; x < 4; x++) + if (chan[x] == 1) + return x; + return VK_SWIZZLE_0; + case VK_COMPONENT_SWIZZLE_B: + for (x = 0; x < 4; x++) + if (chan[x] == 2) + return x; + return VK_SWIZZLE_0; + case VK_COMPONENT_SWIZZLE_A: + for (x = 0; x < 4; x++) + if (chan[x] == 3) + return x; + return VK_SWIZZLE_1; + default: + unreachable("Illegal swizzle"); } } @@ -343,15 +343,15 @@ vk_format_is_compressed(VkFormat format) } switch (desc->layout) { - case VK_FORMAT_LAYOUT_S3TC: - case VK_FORMAT_LAYOUT_RGTC: - case VK_FORMAT_LAYOUT_ETC: - case VK_FORMAT_LAYOUT_BPTC: - case VK_FORMAT_LAYOUT_ASTC: - /* XXX add other formats in the future */ - return true; - default: - return false; + case VK_FORMAT_LAYOUT_S3TC: + case VK_FORMAT_LAYOUT_RGTC: + case VK_FORMAT_LAYOUT_ETC: + case VK_FORMAT_LAYOUT_BPTC: + case VK_FORMAT_LAYOUT_ASTC: + /* XXX add other formats in the future */ + return true; + default: + return false; } } @@ -418,14 +418,14 @@ static inline VkFormat vk_format_depth_only(VkFormat format) { switch (format) { - case VK_FORMAT_D16_UNORM_S8_UINT: - return VK_FORMAT_D16_UNORM; - case VK_FORMAT_D24_UNORM_S8_UINT: - return VK_FORMAT_X8_D24_UNORM_PACK32; - case VK_FORMAT_D32_SFLOAT_S8_UINT: - return VK_FORMAT_D32_SFLOAT; - default: - return format; + case VK_FORMAT_D16_UNORM_S8_UINT: + return VK_FORMAT_D16_UNORM; + case VK_FORMAT_D24_UNORM_S8_UINT: + return VK_FORMAT_X8_D24_UNORM_PACK32; + case VK_FORMAT_D32_SFLOAT_S8_UINT: + return VK_FORMAT_D32_SFLOAT; + default: + return format; } } @@ -449,39 +449,39 @@ static inline VkFormat vk_format_no_srgb(VkFormat format) { switch (format) { - case VK_FORMAT_R8_SRGB: - return VK_FORMAT_R8_UNORM; - case VK_FORMAT_R8G8_SRGB: - return VK_FORMAT_R8G8_UNORM; - case VK_FORMAT_R8G8B8_SRGB: - return VK_FORMAT_R8G8B8_UNORM; - case VK_FORMAT_B8G8R8_SRGB: - return VK_FORMAT_B8G8R8_UNORM; - case VK_FORMAT_R8G8B8A8_SRGB: - return VK_FORMAT_R8G8B8A8_UNORM; - case VK_FORMAT_B8G8R8A8_SRGB: - return VK_FORMAT_B8G8R8A8_UNORM; - case VK_FORMAT_A8B8G8R8_SRGB_PACK32: - return VK_FORMAT_A8B8G8R8_UNORM_PACK32; - case VK_FORMAT_BC1_RGB_SRGB_BLOCK: - return VK_FORMAT_BC1_RGB_UNORM_BLOCK; - case VK_FORMAT_BC1_RGBA_SRGB_BLOCK: - return VK_FORMAT_BC1_RGBA_UNORM_BLOCK; - case VK_FORMAT_BC2_SRGB_BLOCK: - return VK_FORMAT_BC2_UNORM_BLOCK; - case VK_FORMAT_BC3_SRGB_BLOCK: - return VK_FORMAT_BC3_UNORM_BLOCK; - case VK_FORMAT_BC7_SRGB_BLOCK: - return VK_FORMAT_BC7_UNORM_BLOCK; - case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: - return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK; - case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: - return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK; - case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: - return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK; - default: - assert(!vk_format_is_srgb(format)); - return format; + case VK_FORMAT_R8_SRGB: + return VK_FORMAT_R8_UNORM; + case VK_FORMAT_R8G8_SRGB: + return VK_FORMAT_R8G8_UNORM; + case VK_FORMAT_R8G8B8_SRGB: + return VK_FORMAT_R8G8B8_UNORM; + case VK_FORMAT_B8G8R8_SRGB: + return VK_FORMAT_B8G8R8_UNORM; + case VK_FORMAT_R8G8B8A8_SRGB: + return VK_FORMAT_R8G8B8A8_UNORM; + case VK_FORMAT_B8G8R8A8_SRGB: + return VK_FORMAT_B8G8R8A8_UNORM; + case VK_FORMAT_A8B8G8R8_SRGB_PACK32: + return VK_FORMAT_A8B8G8R8_UNORM_PACK32; + case VK_FORMAT_BC1_RGB_SRGB_BLOCK: + return VK_FORMAT_BC1_RGB_UNORM_BLOCK; + case VK_FORMAT_BC1_RGBA_SRGB_BLOCK: + return VK_FORMAT_BC1_RGBA_UNORM_BLOCK; + case VK_FORMAT_BC2_SRGB_BLOCK: + return VK_FORMAT_BC2_UNORM_BLOCK; + case VK_FORMAT_BC3_SRGB_BLOCK: + return VK_FORMAT_BC3_UNORM_BLOCK; + case VK_FORMAT_BC7_SRGB_BLOCK: + return VK_FORMAT_BC7_UNORM_BLOCK; + case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: + return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK; + case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: + return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK; + case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: + return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK; + default: + assert(!vk_format_is_srgb(format)); + return format; } } @@ -521,16 +521,16 @@ vk_format_get_component_bits(VkFormat format, } switch (desc->swizzle[component]) { - case VK_SWIZZLE_X: - return desc->channel[0].size; - case VK_SWIZZLE_Y: - return desc->channel[1].size; - case VK_SWIZZLE_Z: - return desc->channel[2].size; - case VK_SWIZZLE_W: - return desc->channel[3].size; - default: - return 0; + case VK_SWIZZLE_X: + return desc->channel[0].size; + case VK_SWIZZLE_Y: + return desc->channel[1].size; + case VK_SWIZZLE_Z: + return desc->channel[2].size; + case VK_SWIZZLE_W: + return desc->channel[3].size; + default: + return 0; } } @@ -538,22 +538,22 @@ static inline VkFormat vk_to_non_srgb_format(VkFormat format) { switch (format) { - case VK_FORMAT_R8_SRGB: - return VK_FORMAT_R8_UNORM; - case VK_FORMAT_R8G8_SRGB: - return VK_FORMAT_R8G8_UNORM; - case VK_FORMAT_R8G8B8_SRGB: - return VK_FORMAT_R8G8B8_UNORM; - case VK_FORMAT_B8G8R8_SRGB: - return VK_FORMAT_B8G8R8_UNORM; - case VK_FORMAT_R8G8B8A8_SRGB: - return VK_FORMAT_R8G8B8A8_UNORM; - case VK_FORMAT_B8G8R8A8_SRGB: - return VK_FORMAT_B8G8R8A8_UNORM; - case VK_FORMAT_A8B8G8R8_SRGB_PACK32: - return VK_FORMAT_A8B8G8R8_UNORM_PACK32; - default: - return format; + case VK_FORMAT_R8_SRGB: + return VK_FORMAT_R8_UNORM; + case VK_FORMAT_R8G8_SRGB: + return VK_FORMAT_R8G8_UNORM; + case VK_FORMAT_R8G8B8_SRGB: + return VK_FORMAT_R8G8B8_UNORM; + case VK_FORMAT_B8G8R8_SRGB: + return VK_FORMAT_B8G8R8_UNORM; + case VK_FORMAT_R8G8B8A8_SRGB: + return VK_FORMAT_R8G8B8A8_UNORM; + case VK_FORMAT_B8G8R8A8_SRGB: + return VK_FORMAT_B8G8R8A8_UNORM; + case VK_FORMAT_A8B8G8R8_SRGB_PACK32: + return VK_FORMAT_A8B8G8R8_UNORM_PACK32; + default: + return format; } } |