diff options
author | Bas Nieuwenhuizen <[email protected]> | 2016-12-17 21:25:32 +0100 |
---|---|---|
committer | Bas Nieuwenhuizen <[email protected]> | 2016-12-18 20:52:41 +0100 |
commit | accc5fc026ec45171c458334bdee16747fbe7824 (patch) | |
tree | ccd0b4da28d02b75fd4d71cb8cfd1747f01e1927 /src | |
parent | bfee9866ea87fb0a81b3165f968ac45a4f5a25c3 (diff) |
radv: Don't enable CMASK on compute queues.
We can't fast clear on compute queues.
Signed-off-by: Bas Nieuwenhuizen <[email protected]>
Reviewed-by: Dave Airlie <[email protected]>
Diffstat (limited to 'src')
-rw-r--r-- | src/amd/vulkan/radv_cmd_buffer.c | 40 | ||||
-rw-r--r-- | src/amd/vulkan/radv_image.c | 18 | ||||
-rw-r--r-- | src/amd/vulkan/radv_meta_clear.c | 2 | ||||
-rw-r--r-- | src/amd/vulkan/radv_private.h | 10 |
4 files changed, 62 insertions, 8 deletions
diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index 3c5fe25ce6f..0572cb85e56 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -36,6 +36,8 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkImageLayout src_layout, VkImageLayout dst_layout, + int src_family, + int dst_family, VkImageSubresourceRange range, VkImageAspectFlags pending_clears); @@ -1207,7 +1209,7 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf radv_handle_image_transition(cmd_buffer, view->image, cmd_buffer->state.attachments[idx].current_layout, - att.layout, range, + att.layout, 0, 0, range, cmd_buffer->state.attachments[idx].pending_clear_aspects); cmd_buffer->state.attachments[idx].current_layout = att.layout; @@ -2386,6 +2388,8 @@ static void radv_handle_cmask_image_transition(struct radv_cmd_buffer *cmd_buffe struct radv_image *image, VkImageLayout src_layout, VkImageLayout dst_layout, + unsigned src_queue_mask, + unsigned dst_queue_mask, VkImageSubresourceRange range, VkImageAspectFlags pending_clears) { @@ -2394,8 +2398,8 @@ static void radv_handle_cmask_image_transition(struct radv_cmd_buffer *cmd_buffe radv_initialise_cmask(cmd_buffer, image, 0xccccccccu); else radv_initialise_cmask(cmd_buffer, image, 0xffffffffu); - } else if (radv_layout_has_cmask(image, src_layout) && - !radv_layout_has_cmask(image, dst_layout)) { + } else if (radv_layout_has_cmask(image, src_layout, src_queue_mask) && + !radv_layout_has_cmask(image, dst_layout, dst_queue_mask)) { radv_fast_clear_flush_image_inplace(cmd_buffer, image); } } @@ -2436,16 +2440,40 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkImageLayout src_layout, VkImageLayout dst_layout, + int src_family, + int dst_family, VkImageSubresourceRange range, VkImageAspectFlags pending_clears) { + if (image->exclusive && src_family != dst_family) { + /* This is an acquire or a release operation and there will be + * a corresponding release/acquire. Do the transition in the + * most flexible queue. */ + + assert(src_family == cmd_buffer->queue_family_index || + dst_family == cmd_buffer->queue_family_index); + + if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER) + return; + + if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE && + (src_family == RADV_QUEUE_GENERAL || + dst_family == RADV_QUEUE_GENERAL)) + return; + } + + unsigned src_queue_mask = radv_image_queue_family_mask(image, src_family); + unsigned dst_queue_mask = radv_image_queue_family_mask(image, dst_family); + if (image->htile.size) radv_handle_depth_image_transition(cmd_buffer, image, src_layout, dst_layout, range, pending_clears); if (image->cmask.size) radv_handle_cmask_image_transition(cmd_buffer, image, src_layout, - dst_layout, range, pending_clears); + dst_layout, src_queue_mask, + dst_queue_mask, range, + pending_clears); if (image->surface.dcc_size) radv_handle_dcc_image_transition(cmd_buffer, image, src_layout, @@ -2509,6 +2537,8 @@ void radv_CmdPipelineBarrier( radv_handle_image_transition(cmd_buffer, image, pImageMemoryBarriers[i].oldLayout, pImageMemoryBarriers[i].newLayout, + pImageMemoryBarriers[i].srcQueueFamilyIndex, + pImageMemoryBarriers[i].dstQueueFamilyIndex, pImageMemoryBarriers[i].subresourceRange, 0); } @@ -2639,6 +2669,8 @@ void radv_CmdWaitEvents(VkCommandBuffer commandBuffer, radv_handle_image_transition(cmd_buffer, image, pImageMemoryBarriers[i].oldLayout, pImageMemoryBarriers[i].newLayout, + pImageMemoryBarriers[i].srcQueueFamilyIndex, + pImageMemoryBarriers[i].dstQueueFamilyIndex, pImageMemoryBarriers[i].subresourceRange, 0); } diff --git a/src/amd/vulkan/radv_image.c b/src/amd/vulkan/radv_image.c index fee98ba94c6..a0287fcb3eb 100644 --- a/src/amd/vulkan/radv_image.c +++ b/src/amd/vulkan/radv_image.c @@ -698,6 +698,11 @@ radv_image_create(VkDevice _device, image->samples = pCreateInfo->samples; image->tiling = pCreateInfo->tiling; image->usage = pCreateInfo->usage; + + image->exclusive = pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE; + for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; ++i) + image->queue_family_mask |= 1u << pCreateInfo->pQueueFamilyIndices[i]; + radv_init_surface(device, &image->surface, create_info); device->ws->surface_init(device->ws, &image->surface); @@ -887,10 +892,19 @@ bool radv_layout_can_expclear(const struct radv_image *image, } bool radv_layout_has_cmask(const struct radv_image *image, - VkImageLayout layout) + VkImageLayout layout, + unsigned queue_mask) { return (layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL || - layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); + layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) && + queue_mask == (1u << RADV_QUEUE_GENERAL); +} + + +unsigned radv_image_queue_family_mask(const struct radv_image *image, int family) { + if (image->exclusive) + return 1u <<family; + return image->queue_family_mask; } VkResult diff --git a/src/amd/vulkan/radv_meta_clear.c b/src/amd/vulkan/radv_meta_clear.c index d6af29187fd..b7263dde01e 100644 --- a/src/amd/vulkan/radv_meta_clear.c +++ b/src/amd/vulkan/radv_meta_clear.c @@ -805,7 +805,7 @@ emit_fast_color_clear(struct radv_cmd_buffer *cmd_buffer, if (!cmd_buffer->device->allow_fast_clears) return false; - if (!radv_layout_has_cmask(iview->image, image_layout)) + if (!radv_layout_has_cmask(iview->image, image_layout, radv_image_queue_family_mask(iview->image, cmd_buffer->queue_family_index))) goto fail; if (vk_format_get_blocksizebits(iview->image->vk_format) > 64) goto fail; diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h index e6f6c29c919..03d295986f3 100644 --- a/src/amd/vulkan/radv_private.h +++ b/src/amd/vulkan/radv_private.h @@ -1006,6 +1006,9 @@ struct radv_image { VkDeviceSize size; uint32_t alignment; + bool exclusive; + unsigned queue_family_mask; + /* Set when bound */ struct radeon_winsys_bo *bo; VkDeviceSize offset; @@ -1027,7 +1030,12 @@ bool radv_layout_is_htile_compressed(const struct radv_image *image, bool radv_layout_can_expclear(const struct radv_image *image, VkImageLayout layout); bool radv_layout_has_cmask(const struct radv_image *image, - VkImageLayout layout); + VkImageLayout layout, + unsigned queue_mask); + + +unsigned radv_image_queue_family_mask(const struct radv_image *image, int family); + static inline uint32_t radv_get_layerCount(const struct radv_image *image, const VkImageSubresourceRange *range) |