summaryrefslogtreecommitdiffstats
path: root/src/vulkan
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2015-11-30 11:48:08 -0800
committerJason Ekstrand <[email protected]>2015-11-30 11:48:08 -0800
commita89a485e79ad40793a85979d86d45760362be21a (patch)
tree26acc2ef861f736838993a420bd1eae691266a34 /src/vulkan
parent6a8a542610243f32ee20989778af06d66d7b5b1a (diff)
vk/0.210.0: Rename CmdBuffer to CommandBuffer
Diffstat (limited to 'src/vulkan')
-rw-r--r--src/vulkan/anv_batch_chain.c4
-rw-r--r--src/vulkan/anv_cmd_buffer.c118
-rw-r--r--src/vulkan/anv_device.c18
-rw-r--r--src/vulkan/anv_dump.c24
-rw-r--r--src/vulkan/anv_meta.c28
-rw-r--r--src/vulkan/anv_meta_clear.c16
-rw-r--r--src/vulkan/anv_private.h8
-rw-r--r--src/vulkan/anv_query.c2
-rw-r--r--src/vulkan/gen7_cmd_buffer.c46
-rw-r--r--src/vulkan/gen8_cmd_buffer.c62
-rw-r--r--src/vulkan/genX_cmd_buffer.c4
11 files changed, 165 insertions, 165 deletions
diff --git a/src/vulkan/anv_batch_chain.c b/src/vulkan/anv_batch_chain.c
index 9d35da8024b..62189afec2f 100644
--- a/src/vulkan/anv_batch_chain.c
+++ b/src/vulkan/anv_batch_chain.c
@@ -641,7 +641,7 @@ anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
- if (cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY) {
+ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
anv_batch_emit(&cmd_buffer->batch, GEN7_MI_BATCH_BUFFER_END);
/* Round batch up to an even number of dwords. */
@@ -653,7 +653,7 @@ anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
- if (cmd_buffer->level == VK_CMD_BUFFER_LEVEL_SECONDARY) {
+ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
/* If this is a secondary command buffer, we need to determine the
* mode in which it will be executed with vkExecuteCommands. We
* determine this statically here so that this stays in sync with the
diff --git a/src/vulkan/anv_cmd_buffer.c b/src/vulkan/anv_cmd_buffer.c
index 66b2f65e9f7..f42f6fd5183 100644
--- a/src/vulkan/anv_cmd_buffer.c
+++ b/src/vulkan/anv_cmd_buffer.c
@@ -162,11 +162,11 @@ anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
VkResult anv_CreateCommandBuffer(
VkDevice _device,
- const VkCmdBufferCreateInfo* pCreateInfo,
- VkCmdBuffer* pCmdBuffer)
+ const VkCommandBufferCreateInfo* pCreateInfo,
+ VkCommandBuffer* pCommandBuffer)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- ANV_FROM_HANDLE(anv_cmd_pool, pool, pCreateInfo->cmdPool);
+ ANV_FROM_HANDLE(anv_cmd_pool, pool, pCreateInfo->commandPool);
struct anv_cmd_buffer *cmd_buffer;
VkResult result;
@@ -201,7 +201,7 @@ VkResult anv_CreateCommandBuffer(
list_inithead(&cmd_buffer->pool_link);
}
- *pCmdBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
+ *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
return VK_SUCCESS;
@@ -212,7 +212,7 @@ VkResult anv_CreateCommandBuffer(
void anv_DestroyCommandBuffer(
VkDevice _device,
- VkCmdBuffer _cmd_buffer)
+ VkCommandBuffer _cmd_buffer)
{
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, _cmd_buffer);
@@ -227,10 +227,10 @@ void anv_DestroyCommandBuffer(
}
VkResult anv_ResetCommandBuffer(
- VkCmdBuffer cmdBuffer,
- VkCmdBufferResetFlags flags)
+ VkCommandBuffer commandBuffer,
+ VkCommandBufferResetFlags flags)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
@@ -258,16 +258,16 @@ anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
}
VkResult anv_BeginCommandBuffer(
- VkCmdBuffer cmdBuffer,
- const VkCmdBufferBeginInfo* pBeginInfo)
+ VkCommandBuffer commandBuffer,
+ const VkCommandBufferBeginInfo* pBeginInfo)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
cmd_buffer->opt_flags = pBeginInfo->flags;
- if (cmd_buffer->level == VK_CMD_BUFFER_LEVEL_SECONDARY) {
+ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
cmd_buffer->state.framebuffer =
anv_framebuffer_from_handle(pBeginInfo->framebuffer);
cmd_buffer->state.pass =
@@ -286,14 +286,14 @@ VkResult anv_BeginCommandBuffer(
}
VkResult anv_EndCommandBuffer(
- VkCmdBuffer cmdBuffer)
+ VkCommandBuffer commandBuffer)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
struct anv_device *device = cmd_buffer->device;
anv_cmd_buffer_end_batch_buffer(cmd_buffer);
- if (cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY) {
+ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
/* The algorithm used to compute the validate list is not threadsafe as
* it uses the bo->index field. We have to lock the device around it.
* Fortunately, the chances for contention here are probably very low.
@@ -307,11 +307,11 @@ VkResult anv_EndCommandBuffer(
}
void anv_CmdBindPipeline(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkPipelineBindPoint pipelineBindPoint,
VkPipeline _pipeline)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
switch (pipelineBindPoint) {
@@ -341,11 +341,11 @@ void anv_CmdBindPipeline(
}
void anv_CmdSetViewport(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
uint32_t viewportCount,
const VkViewport* pViewports)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
cmd_buffer->state.dynamic.viewport.count = viewportCount;
memcpy(cmd_buffer->state.dynamic.viewport.viewports,
@@ -355,11 +355,11 @@ void anv_CmdSetViewport(
}
void anv_CmdSetScissor(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
uint32_t scissorCount,
const VkRect2D* pScissors)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
cmd_buffer->state.dynamic.scissor.count = scissorCount;
memcpy(cmd_buffer->state.dynamic.scissor.scissors,
@@ -369,22 +369,22 @@ void anv_CmdSetScissor(
}
void anv_CmdSetLineWidth(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
float lineWidth)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
cmd_buffer->state.dynamic.line_width = lineWidth;
cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
}
void anv_CmdSetDepthBias(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
float depthBias,
float depthBiasClamp,
float slopeScaledDepthBias)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
cmd_buffer->state.dynamic.depth_bias.bias = depthBias;
cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
@@ -394,10 +394,10 @@ void anv_CmdSetDepthBias(
}
void anv_CmdSetBlendConstants(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
const float blendConst[4])
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
memcpy(cmd_buffer->state.dynamic.blend_constants,
blendConst, sizeof(float) * 4);
@@ -406,11 +406,11 @@ void anv_CmdSetBlendConstants(
}
void anv_CmdSetDepthBounds(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
float minDepthBounds,
float maxDepthBounds)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
@@ -419,11 +419,11 @@ void anv_CmdSetDepthBounds(
}
void anv_CmdSetStencilCompareMask(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkStencilFaceFlags faceMask,
uint32_t stencilCompareMask)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
cmd_buffer->state.dynamic.stencil_compare_mask.front = stencilCompareMask;
@@ -434,11 +434,11 @@ void anv_CmdSetStencilCompareMask(
}
void anv_CmdSetStencilWriteMask(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkStencilFaceFlags faceMask,
uint32_t stencilWriteMask)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
cmd_buffer->state.dynamic.stencil_write_mask.front = stencilWriteMask;
@@ -449,11 +449,11 @@ void anv_CmdSetStencilWriteMask(
}
void anv_CmdSetStencilReference(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkStencilFaceFlags faceMask,
uint32_t stencilReference)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
cmd_buffer->state.dynamic.stencil_reference.front = stencilReference;
@@ -464,7 +464,7 @@ void anv_CmdSetStencilReference(
}
void anv_CmdBindDescriptorSets(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout _layout,
uint32_t firstSet,
@@ -473,7 +473,7 @@ void anv_CmdBindDescriptorSets(
uint32_t dynamicOffsetCount,
const uint32_t* pDynamicOffsets)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
struct anv_descriptor_set_layout *set_layout;
@@ -519,13 +519,13 @@ void anv_CmdBindDescriptorSets(
}
void anv_CmdBindVertexBuffers(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
uint32_t startBinding,
uint32_t bindingCount,
const VkBuffer* pBuffers,
const VkDeviceSize* pOffsets)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
/* We have to defer setting up vertex buffer since we need the buffer
@@ -799,7 +799,7 @@ anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer,
}
void anv_CmdSetEvent(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkEvent event,
VkPipelineStageFlags stageMask)
{
@@ -807,7 +807,7 @@ void anv_CmdSetEvent(
}
void anv_CmdResetEvent(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkEvent event,
VkPipelineStageFlags stageMask)
{
@@ -815,7 +815,7 @@ void anv_CmdResetEvent(
}
void anv_CmdWaitEvents(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
uint32_t eventCount,
const VkEvent* pEvents,
VkPipelineStageFlags srcStageMask,
@@ -855,14 +855,14 @@ anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
}
void anv_CmdPushConstants(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkPipelineLayout layout,
VkShaderStageFlags stageFlags,
uint32_t start,
uint32_t length,
const void* values)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
VkShaderStage stage;
for_each_bit(stage, stageFlags) {
@@ -876,20 +876,20 @@ void anv_CmdPushConstants(
}
void anv_CmdExecuteCommands(
- VkCmdBuffer cmdBuffer,
- uint32_t cmdBuffersCount,
- const VkCmdBuffer* pCmdBuffers)
+ VkCommandBuffer commandBuffer,
+ uint32_t commandBuffersCount,
+ const VkCommandBuffer* pCmdBuffers)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, primary, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
- assert(primary->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
+ assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
anv_assert(primary->state.subpass == &primary->state.pass->subpasses[0]);
- for (uint32_t i = 0; i < cmdBuffersCount; i++) {
+ for (uint32_t i = 0; i < commandBuffersCount; i++) {
ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
- assert(secondary->level == VK_CMD_BUFFER_LEVEL_SECONDARY);
+ assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
anv_cmd_buffer_add_secondary(primary, secondary);
}
@@ -897,8 +897,8 @@ void anv_CmdExecuteCommands(
VkResult anv_CreateCommandPool(
VkDevice _device,
- const VkCmdPoolCreateInfo* pCreateInfo,
- VkCmdPool* pCmdPool)
+ const VkCommandPoolCreateInfo* pCreateInfo,
+ VkCommandPool* pCmdPool)
{
ANV_FROM_HANDLE(anv_device, device, _device);
struct anv_cmd_pool *pool;
@@ -917,22 +917,22 @@ VkResult anv_CreateCommandPool(
void anv_DestroyCommandPool(
VkDevice _device,
- VkCmdPool cmdPool)
+ VkCommandPool commandPool)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- ANV_FROM_HANDLE(anv_cmd_pool, pool, cmdPool);
+ ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
- anv_ResetCommandPool(_device, cmdPool, 0);
+ anv_ResetCommandPool(_device, commandPool, 0);
anv_device_free(device, pool);
}
VkResult anv_ResetCommandPool(
VkDevice device,
- VkCmdPool cmdPool,
- VkCmdPoolResetFlags flags)
+ VkCommandPool commandPool,
+ VkCommandPoolResetFlags flags)
{
- ANV_FROM_HANDLE(anv_cmd_pool, pool, cmdPool);
+ ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
&pool->cmd_buffers, pool_link) {
diff --git a/src/vulkan/anv_device.c b/src/vulkan/anv_device.c
index ebb86c09001..15ad2107627 100644
--- a/src/vulkan/anv_device.c
+++ b/src/vulkan/anv_device.c
@@ -812,8 +812,8 @@ VkResult anv_GetDeviceQueue(
VkResult anv_QueueSubmit(
VkQueue _queue,
- uint32_t cmdBufferCount,
- const VkCmdBuffer* pCmdBuffers,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer* pCommandBuffers,
VkFence _fence)
{
ANV_FROM_HANDLE(anv_queue, queue, _queue);
@@ -821,10 +821,10 @@ VkResult anv_QueueSubmit(
struct anv_device *device = queue->device;
int ret;
- for (uint32_t i = 0; i < cmdBufferCount; i++) {
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCmdBuffers[i]);
+ for (uint32_t i = 0; i < commandBufferCount; i++) {
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
- assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
+ assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
if (ret != 0) {
@@ -1526,21 +1526,21 @@ void anv_DestroyFramebuffer(
}
void vkCmdDbgMarkerBegin(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
const char* pMarker)
__attribute__ ((visibility ("default")));
void vkCmdDbgMarkerEnd(
- VkCmdBuffer cmdBuffer)
+ VkCommandBuffer commandBuffer)
__attribute__ ((visibility ("default")));
void vkCmdDbgMarkerBegin(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
const char* pMarker)
{
}
void vkCmdDbgMarkerEnd(
- VkCmdBuffer cmdBuffer)
+ VkCommandBuffer commandBuffer)
{
}
diff --git a/src/vulkan/anv_dump.c b/src/vulkan/anv_dump.c
index 3634ae68732..1c5cd4b56f9 100644
--- a/src/vulkan/anv_dump.c
+++ b/src/vulkan/anv_dump.c
@@ -72,28 +72,28 @@ anv_dump_image_to_ppm(struct anv_device *device,
result = anv_BindImageMemory(vk_device, copy_image, memory, 0);
assert(result == VK_SUCCESS);
- VkCmdPool cmdPool;
+ VkCommandPool commandPool;
result = anv_CreateCommandPool(vk_device,
- &(VkCmdPoolCreateInfo) {
- .sType = VK_STRUCTURE_TYPE_CMD_POOL_CREATE_INFO,
+ &(VkCommandPoolCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.queueFamilyIndex = 0,
.flags = 0,
- }, &cmdPool);
+ }, &commandPool);
assert(result == VK_SUCCESS);
- VkCmdBuffer cmd;
+ VkCommandBuffer cmd;
result = anv_CreateCommandBuffer(vk_device,
- &(VkCmdBufferCreateInfo) {
- .sType = VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO,
- .cmdPool = cmdPool,
- .level = VK_CMD_BUFFER_LEVEL_PRIMARY,
+ &(VkCommandBufferCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_CREATE_INFO,
+ .commandPool = commandPool,
+ .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
.flags = 0,
}, &cmd);
assert(result == VK_SUCCESS);
result = anv_BeginCommandBuffer(cmd,
- &(VkCmdBufferBeginInfo) {
- .sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
+ &(VkCommandBufferBeginInfo) {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.flags = VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT,
});
assert(result == VK_SUCCESS);
@@ -169,7 +169,7 @@ anv_dump_image_to_ppm(struct anv_device *device,
assert(result == VK_SUCCESS);
anv_DestroyFence(vk_device, fence);
- anv_DestroyCommandPool(vk_device, cmdPool);
+ anv_DestroyCommandPool(vk_device, commandPool);
uint8_t *map;
result = anv_MapMemory(vk_device, memory, 0, reqs.size, 0, (void **)&map);
diff --git a/src/vulkan/anv_meta.c b/src/vulkan/anv_meta.c
index 96792e9d6da..143d637c55a 100644
--- a/src/vulkan/anv_meta.c
+++ b/src/vulkan/anv_meta.c
@@ -575,7 +575,7 @@ meta_emit_blit(struct anv_cmd_buffer *cmd_buffer,
},
.clearValueCount = 0,
.pClearValues = NULL,
- }, VK_RENDER_PASS_CONTENTS_INLINE);
+ }, VK_SUBPASS_CONTENTS_INLINE);
VkPipeline pipeline;
@@ -753,13 +753,13 @@ do_buffer_copy(struct anv_cmd_buffer *cmd_buffer,
}
void anv_CmdCopyBuffer(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkBuffer srcBuffer,
VkBuffer destBuffer,
uint32_t regionCount,
const VkBufferCopy* pRegions)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
ANV_FROM_HANDLE(anv_buffer, dest_buffer, destBuffer);
@@ -831,7 +831,7 @@ void anv_CmdCopyBuffer(
}
void anv_CmdCopyImage(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkImage srcImage,
VkImageLayout srcImageLayout,
VkImage destImage,
@@ -839,7 +839,7 @@ void anv_CmdCopyImage(
uint32_t regionCount,
const VkImageCopy* pRegions)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_image, src_image, srcImage);
ANV_FROM_HANDLE(anv_image, dest_image, destImage);
@@ -939,7 +939,7 @@ void anv_CmdCopyImage(
}
void anv_CmdBlitImage(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkImage srcImage,
VkImageLayout srcImageLayout,
VkImage destImage,
@@ -949,7 +949,7 @@ void anv_CmdBlitImage(
VkTexFilter filter)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_image, src_image, srcImage);
ANV_FROM_HANDLE(anv_image, dest_image, destImage);
@@ -1082,14 +1082,14 @@ make_image_for_buffer(VkDevice vk_device, VkBuffer vk_buffer, VkFormat format,
}
void anv_CmdCopyBufferToImage(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkBuffer srcBuffer,
VkImage destImage,
VkImageLayout destImageLayout,
uint32_t regionCount,
const VkBufferImageCopy* pRegions)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_image, dest_image, destImage);
VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
const VkFormat orig_format = dest_image->format->vk_format;
@@ -1208,14 +1208,14 @@ void anv_CmdCopyBufferToImage(
}
void anv_CmdCopyImageToBuffer(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkImage srcImage,
VkImageLayout srcImageLayout,
VkBuffer destBuffer,
uint32_t regionCount,
const VkBufferImageCopy* pRegions)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_image, src_image, srcImage);
VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
struct anv_meta_saved_state saved_state;
@@ -1323,7 +1323,7 @@ void anv_CmdCopyImageToBuffer(
}
void anv_CmdUpdateBuffer(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkBuffer destBuffer,
VkDeviceSize destOffset,
VkDeviceSize dataSize,
@@ -1333,7 +1333,7 @@ void anv_CmdUpdateBuffer(
}
void anv_CmdFillBuffer(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkBuffer destBuffer,
VkDeviceSize destOffset,
VkDeviceSize fillSize,
@@ -1343,7 +1343,7 @@ void anv_CmdFillBuffer(
}
void anv_CmdResolveImage(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkImage srcImage,
VkImageLayout srcImageLayout,
VkImage destImage,
diff --git a/src/vulkan/anv_meta_clear.c b/src/vulkan/anv_meta_clear.c
index 6645e37d124..0709c41db46 100644
--- a/src/vulkan/anv_meta_clear.c
+++ b/src/vulkan/anv_meta_clear.c
@@ -311,7 +311,7 @@ emit_load_color_clear(struct anv_cmd_buffer *cmd_buffer,
VkClearColorValue clear_value)
{
struct anv_device *device = cmd_buffer->device;
- VkCmdBuffer cmd_buffer_h = anv_cmd_buffer_to_handle(cmd_buffer);
+ VkCommandBuffer cmd_buffer_h = anv_cmd_buffer_to_handle(cmd_buffer);
const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
VkPipeline pipeline_h =
anv_pipeline_to_handle(device->meta_state.clear.color_pipeline);
@@ -487,7 +487,7 @@ emit_load_depthstencil_clear(struct anv_cmd_buffer *cmd_buffer,
VkClearDepthStencilValue clear_value)
{
struct anv_device *device = cmd_buffer->device;
- VkCmdBuffer cmd_buffer_h = anv_cmd_buffer_to_handle(cmd_buffer);
+ VkCommandBuffer cmd_buffer_h = anv_cmd_buffer_to_handle(cmd_buffer);
const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
const struct depthstencil_clear_vattrs vertex_data[3] = {
@@ -678,14 +678,14 @@ anv_cmd_buffer_clear_attachments(struct anv_cmd_buffer *cmd_buffer,
}
void anv_CmdClearColorImage(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkImage _image,
VkImageLayout imageLayout,
const VkClearColorValue* pColor,
uint32_t rangeCount,
const VkImageSubresourceRange* pRanges)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_image, image, _image);
struct anv_meta_saved_state saved_state;
@@ -783,7 +783,7 @@ void anv_CmdClearColorImage(
.pClearValues = (VkClearValue[]) {
{ .color = *pColor },
},
- }, VK_RENDER_PASS_CONTENTS_INLINE);
+ }, VK_SUBPASS_CONTENTS_INLINE);
ANV_CALL(CmdEndRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer));
}
@@ -794,7 +794,7 @@ void anv_CmdClearColorImage(
}
void anv_CmdClearDepthStencilImage(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkImage image,
VkImageLayout imageLayout,
const VkClearDepthStencilValue* pDepthStencil,
@@ -805,7 +805,7 @@ void anv_CmdClearDepthStencilImage(
}
void anv_CmdClearColorAttachment(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
uint32_t colorAttachment,
VkImageLayout imageLayout,
const VkClearColorValue* pColor,
@@ -816,7 +816,7 @@ void anv_CmdClearColorAttachment(
}
void anv_CmdClearDepthStencilAttachment(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkImageAspectFlags aspectMask,
VkImageLayout imageLayout,
const VkClearDepthStencilValue* pDepthStencil,
diff --git a/src/vulkan/anv_private.h b/src/vulkan/anv_private.h
index c99d4e0a59e..0e148b36b1c 100644
--- a/src/vulkan/anv_private.h
+++ b/src/vulkan/anv_private.h
@@ -1044,8 +1044,8 @@ struct anv_cmd_buffer {
struct anv_state_stream surface_state_stream;
struct anv_state_stream dynamic_state_stream;
- VkCmdBufferOptimizeFlags opt_flags;
- VkCmdBufferLevel level;
+ VkCommandBufferOptimizeFlags opt_flags;
+ VkCommandBufferLevel level;
struct anv_cmd_state state;
};
@@ -1549,13 +1549,13 @@ void anv_dump_image_to_ppm(struct anv_device *device,
#define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
struct __anv_type *__name = __anv_type ## _from_handle(__handle)
-ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCmdBuffer)
+ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCommandBuffer)
ANV_DEFINE_HANDLE_CASTS(anv_device, VkDevice)
ANV_DEFINE_HANDLE_CASTS(anv_instance, VkInstance)
ANV_DEFINE_HANDLE_CASTS(anv_physical_device, VkPhysicalDevice)
ANV_DEFINE_HANDLE_CASTS(anv_queue, VkQueue)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCmdPool)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCommandPool)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, VkBuffer)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
diff --git a/src/vulkan/anv_query.c b/src/vulkan/anv_query.c
index 68535b40cac..8891aa02d76 100644
--- a/src/vulkan/anv_query.c
+++ b/src/vulkan/anv_query.c
@@ -142,7 +142,7 @@ VkResult anv_GetQueryPoolResults(
}
void anv_CmdResetQueryPool(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t startQuery,
uint32_t queryCount)
diff --git a/src/vulkan/gen7_cmd_buffer.c b/src/vulkan/gen7_cmd_buffer.c
index 4b3922d8278..b69982d6e52 100644
--- a/src/vulkan/gen7_cmd_buffer.c
+++ b/src/vulkan/gen7_cmd_buffer.c
@@ -235,12 +235,12 @@ static const uint32_t restart_index_for_type[] = {
};
void genX(CmdBindIndexBuffer)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
VkIndexType indexType)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
cmd_buffer->state.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
@@ -508,13 +508,13 @@ cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer)
}
void genX(CmdDraw)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
uint32_t vertexCount,
uint32_t instanceCount,
uint32_t firstVertex,
uint32_t firstInstance)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
cmd_buffer_flush_state(cmd_buffer);
@@ -530,14 +530,14 @@ void genX(CmdDraw)(
}
void genX(CmdDrawIndexed)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
uint32_t indexCount,
uint32_t instanceCount,
uint32_t firstIndex,
int32_t vertexOffset,
uint32_t firstInstance)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
cmd_buffer_flush_state(cmd_buffer);
@@ -578,13 +578,13 @@ gen7_batch_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
#define GEN7_3DPRIM_BASE_VERTEX 0x2440
void genX(CmdDrawIndirect)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
uint32_t count,
uint32_t stride)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
struct anv_bo *bo = buffer->bo;
@@ -605,13 +605,13 @@ void genX(CmdDrawIndirect)(
}
void genX(CmdDrawIndexedIndirect)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
uint32_t count,
uint32_t stride)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
struct anv_bo *bo = buffer->bo;
@@ -632,12 +632,12 @@ void genX(CmdDrawIndexedIndirect)(
}
void genX(CmdDispatch)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
uint32_t x,
uint32_t y,
uint32_t z)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
@@ -662,11 +662,11 @@ void genX(CmdDispatch)(
#define GPGPU_DISPATCHDIMZ 0x2508
void genX(CmdDispatchIndirect)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
@@ -815,11 +815,11 @@ begin_render_pass(struct anv_cmd_buffer *cmd_buffer,
}
void genX(CmdBeginRenderPass)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo* pRenderPassBegin,
- VkRenderPassContents contents)
+ VkSubpassContents contents)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
begin_render_pass(cmd_buffer, pRenderPassBegin);
@@ -828,20 +828,20 @@ void genX(CmdBeginRenderPass)(
}
void genX(CmdNextSubpass)(
- VkCmdBuffer cmdBuffer,
- VkRenderPassContents contents)
+ VkCommandBuffer commandBuffer,
+ VkSubpassContents contents)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
+ assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
gen7_cmd_buffer_begin_subpass(cmd_buffer, cmd_buffer->state.subpass + 1);
}
void genX(CmdEndRenderPass)(
- VkCmdBuffer cmdBuffer)
+ VkCommandBuffer commandBuffer)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
/* Emit a flushing pipe control at the end of a pass. This is kind of a
* hack but it ensures that render targets always actually get written.
diff --git a/src/vulkan/gen8_cmd_buffer.c b/src/vulkan/gen8_cmd_buffer.c
index 1d1433817d9..88062a6f61b 100644
--- a/src/vulkan/gen8_cmd_buffer.c
+++ b/src/vulkan/gen8_cmd_buffer.c
@@ -364,13 +364,13 @@ cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer)
}
void genX(CmdDraw)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
uint32_t vertexCount,
uint32_t instanceCount,
uint32_t firstVertex,
uint32_t firstInstance)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
cmd_buffer_flush_state(cmd_buffer);
@@ -384,14 +384,14 @@ void genX(CmdDraw)(
}
void genX(CmdDrawIndexed)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
uint32_t indexCount,
uint32_t instanceCount,
uint32_t firstIndex,
int32_t vertexOffset,
uint32_t firstInstance)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
cmd_buffer_flush_state(cmd_buffer);
@@ -430,13 +430,13 @@ emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
#define GEN7_3DPRIM_BASE_VERTEX 0x2440
void genX(CmdDrawIndirect)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
uint32_t count,
uint32_t stride)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
struct anv_bo *bo = buffer->bo;
uint32_t bo_offset = buffer->offset + offset;
@@ -455,12 +455,12 @@ void genX(CmdDrawIndirect)(
}
void genX(CmdBindIndexBuffer)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
VkIndexType indexType)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
static const uint32_t vk_to_gen_index_type[] = {
@@ -555,13 +555,13 @@ cmd_buffer_flush_compute_state(struct anv_cmd_buffer *cmd_buffer)
}
void genX(CmdDrawIndexedIndirect)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
uint32_t count,
uint32_t stride)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
struct anv_bo *bo = buffer->bo;
uint32_t bo_offset = buffer->offset + offset;
@@ -580,12 +580,12 @@ void genX(CmdDrawIndexedIndirect)(
}
void genX(CmdDispatch)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
uint32_t x,
uint32_t y,
uint32_t z)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
@@ -610,11 +610,11 @@ void genX(CmdDispatch)(
#define GPGPU_DISPATCHDIMZ 0x2508
void genX(CmdDispatchIndirect)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
@@ -740,11 +740,11 @@ genX(cmd_buffer_begin_subpass)(struct anv_cmd_buffer *cmd_buffer,
}
void genX(CmdBeginRenderPass)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo* pRenderPassBegin,
- VkRenderPassContents contents)
+ VkSubpassContents contents)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
@@ -770,20 +770,20 @@ void genX(CmdBeginRenderPass)(
}
void genX(CmdNextSubpass)(
- VkCmdBuffer cmdBuffer,
- VkRenderPassContents contents)
+ VkCommandBuffer commandBuffer,
+ VkSubpassContents contents)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
+ assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
genX(cmd_buffer_begin_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
}
void genX(CmdEndRenderPass)(
- VkCmdBuffer cmdBuffer)
+ VkCommandBuffer commandBuffer)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
/* Emit a flushing pipe control at the end of a pass. This is kind of a
* hack but it ensures that render targets always actually get written.
@@ -811,12 +811,12 @@ emit_ps_depth_count(struct anv_batch *batch,
}
void genX(CmdBeginQuery)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t slot,
VkQueryControlFlags flags)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
switch (pool->type) {
@@ -832,11 +832,11 @@ void genX(CmdBeginQuery)(
}
void genX(CmdEndQuery)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t slot)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
switch (pool->type) {
@@ -854,12 +854,12 @@ void genX(CmdEndQuery)(
#define TIMESTAMP 0x2358
void genX(CmdWriteTimestamp)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkTimestampType timestampType,
VkBuffer destBuffer,
VkDeviceSize destOffset)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
struct anv_bo *bo = buffer->bo;
@@ -931,7 +931,7 @@ emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
}
void genX(CmdCopyQueryPoolResults)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t startQuery,
uint32_t queryCount,
@@ -940,7 +940,7 @@ void genX(CmdCopyQueryPoolResults)(
VkDeviceSize destStride,
VkQueryResultFlags flags)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
uint32_t slot_offset, dst_offset;
diff --git a/src/vulkan/genX_cmd_buffer.c b/src/vulkan/genX_cmd_buffer.c
index 31dcdcd8dd5..166e335ae53 100644
--- a/src/vulkan/genX_cmd_buffer.c
+++ b/src/vulkan/genX_cmd_buffer.c
@@ -139,14 +139,14 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
}
void genX(CmdPipelineBarrier)(
- VkCmdBuffer cmdBuffer,
+ VkCommandBuffer commandBuffer,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags destStageMask,
VkBool32 byRegion,
uint32_t memBarrierCount,
const void* const* ppMemBarriers)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
uint32_t b, *dw;
struct GENX(PIPE_CONTROL) cmd = {