aboutsummaryrefslogtreecommitdiffstats
path: root/src/intel/vulkan/gen8_cmd_buffer.c
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2016-04-18 16:08:49 -0700
committerJason Ekstrand <[email protected]>2016-04-20 14:54:09 -0700
commita48f8340d95eaf468c5f79bead0c1dad9d1cb6b4 (patch)
tree909b5febc3e018c730bff63137117c29f410b700 /src/intel/vulkan/gen8_cmd_buffer.c
parent8a6ced83e9e936c4c29854148940025c20cd831e (diff)
anv/gen8_cmd_buffer: Use the new emit macro
Acked-by: Kristian Høgsberg <[email protected]>
Diffstat (limited to 'src/intel/vulkan/gen8_cmd_buffer.c')
-rw-r--r--src/intel/vulkan/gen8_cmd_buffer.c161
1 files changed, 87 insertions, 74 deletions
diff --git a/src/intel/vulkan/gen8_cmd_buffer.c b/src/intel/vulkan/gen8_cmd_buffer.c
index 3956a58d201..96ef8463ccc 100644
--- a/src/intel/vulkan/gen8_cmd_buffer.c
+++ b/src/intel/vulkan/gen8_cmd_buffer.c
@@ -80,20 +80,17 @@ gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer)
anv_state_clflush(cc_state);
}
- anv_batch_emit(&cmd_buffer->batch,
- GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC),
- .CCViewportPointer = cc_state.offset);
- anv_batch_emit(&cmd_buffer->batch,
- GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP),
- .SFClipViewportPointer = sf_clip_state.offset);
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), cc) {
+ cc.CCViewportPointer = cc_state.offset;
+ }
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), clip) {
+ clip.SFClipViewportPointer = sf_clip_state.offset;
+ }
}
#endif
-#define emit_lri(batch, reg, imm) \
- anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), \
- .RegisterOffset = __anv_reg_num(reg), \
- .DataDWord = imm)
-
void
genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm)
{
@@ -120,10 +117,11 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm)
* flushed, which involves a first PIPE_CONTROL flush which stalls the
* pipeline...
*/
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .DCFlushEnable = true,
- .PostSyncOperation = NoWrite,
- .CommandStreamerStallEnable = true);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.DCFlushEnable = true;
+ pc.PostSyncOperation = NoWrite;
+ pc.CommandStreamerStallEnable = true;
+ }
/* ...followed by a second pipelined PIPE_CONTROL that initiates
* invalidation of the relevant caches. Note that because RO
@@ -139,22 +137,27 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm)
* previous and subsequent PIPE_CONTROLs already guarantee that there is
* no concurrent GPGPU kernel execution (see SKL HSD 2132585).
*/
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .TextureCacheInvalidationEnable = true,
- .ConstantCacheInvalidationEnable = true,
- .InstructionCacheInvalidateEnable = true,
- .StateCacheInvalidationEnable = true,
- .PostSyncOperation = NoWrite);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.TextureCacheInvalidationEnable = true,
+ pc.ConstantCacheInvalidationEnable = true,
+ pc.InstructionCacheInvalidateEnable = true,
+ pc.StateCacheInvalidationEnable = true,
+ pc.PostSyncOperation = NoWrite;
+ }
/* Now send a third stalling flush to make sure that invalidation is
* complete when the L3 configuration registers are modified.
*/
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .DCFlushEnable = true,
- .PostSyncOperation = NoWrite,
- .CommandStreamerStallEnable = true);
-
- emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG), l3cr_val);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.DCFlushEnable = true;
+ pc.PostSyncOperation = NoWrite;
+ pc.CommandStreamerStallEnable = true;
+ }
+
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
+ lri.RegisterOffset = GENX(L3CNTLREG_num);
+ lri.DataDWord = l3cr_val;
+ }
cmd_buffer->state.current_l3_config = l3cr_val;
}
}
@@ -247,10 +250,11 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
if (!cmd_buffer->device->info.has_llc)
anv_state_clflush(cc_state);
- anv_batch_emit(&cmd_buffer->batch,
- GENX(3DSTATE_CC_STATE_POINTERS),
- .ColorCalcStatePointer = cc_state.offset,
- .ColorCalcStatePointerValid = true);
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GENX(3DSTATE_CC_STATE_POINTERS), ccp) {
+ ccp.ColorCalcStatePointer = cc_state.offset;
+ ccp.ColorCalcStatePointerValid = true;
+ }
}
if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
@@ -291,10 +295,11 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
if (!cmd_buffer->device->info.has_llc)
anv_state_clflush(cc_state);
- anv_batch_emit(&cmd_buffer->batch,
- GEN9_3DSTATE_CC_STATE_POINTERS,
- .ColorCalcStatePointer = cc_state.offset,
- .ColorCalcStatePointerValid = true);
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GEN9_3DSTATE_CC_STATE_POINTERS, ccp) {
+ ccp.ColorCalcStatePointer = cc_state.offset;
+ ccp.ColorCalcStatePointerValid = true;
+ }
}
if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
@@ -324,10 +329,10 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
ANV_CMD_DIRTY_INDEX_BUFFER)) {
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF),
- .IndexedDrawCutIndexEnable = pipeline->primitive_restart,
- .CutIndex = cmd_buffer->state.restart_index,
- );
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_VF), vf) {
+ vf.IndexedDrawCutIndexEnable = pipeline->primitive_restart;
+ vf.CutIndex = cmd_buffer->state.restart_index;
+ }
}
cmd_buffer->state.dirty = 0;
@@ -354,11 +359,13 @@ void genX(CmdBindIndexBuffer)(
cmd_buffer->state.restart_index = restart_index_for_type[indexType];
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER),
- .IndexFormat = vk_to_gen_index_type[indexType],
- .MemoryObjectControlState = GENX(MOCS),
- .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
- .BufferSize = buffer->size - offset);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER), ib) {
+ ib.IndexFormat = vk_to_gen_index_type[indexType];
+ ib.MemoryObjectControlState = GENX(MOCS);
+ ib.BufferStartingAddress =
+ (struct anv_address) { buffer->bo, buffer->offset + offset };
+ ib.BufferSize = buffer->size - offset;
+ }
cmd_buffer->state.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
}
@@ -392,9 +399,10 @@ flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
unsigned push_constant_regs = reg_aligned_constant_size / 32;
if (push_state.alloc_size) {
- anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD),
- .CURBETotalDataLength = push_state.alloc_size,
- .CURBEDataStartAddress = push_state.offset);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) {
+ curbe.CURBETotalDataLength = push_state.alloc_size;
+ curbe.CURBEDataStartAddress = push_state.offset;
+ }
}
assert(prog_data->total_shared <= 64 * 1024);
@@ -424,9 +432,11 @@ flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
pipeline->cs_thread_width_max);
uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
- anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD),
- .InterfaceDescriptorTotalLength = size,
- .InterfaceDescriptorDataStartAddress = state.offset);
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), mid) {
+ mid.InterfaceDescriptorTotalLength = size;
+ mid.InterfaceDescriptorDataStartAddress = state.offset;
+ }
return VK_SUCCESS;
}
@@ -466,14 +476,15 @@ void genX(CmdSetEvent)(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_event, event, _event);
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .DestinationAddressType = DAT_PPGTT,
- .PostSyncOperation = WriteImmediateData,
- .Address = {
- &cmd_buffer->device->dynamic_state_block_pool.bo,
- event->state.offset
- },
- .ImmediateData = VK_EVENT_SET);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.DestinationAddressType = DAT_PPGTT,
+ pc.PostSyncOperation = WriteImmediateData,
+ pc.Address = (struct anv_address) {
+ &cmd_buffer->device->dynamic_state_block_pool.bo,
+ event->state.offset
+ };
+ pc.ImmediateData = VK_EVENT_SET;
+ }
}
void genX(CmdResetEvent)(
@@ -484,14 +495,15 @@ void genX(CmdResetEvent)(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_event, event, _event);
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .DestinationAddressType = DAT_PPGTT,
- .PostSyncOperation = WriteImmediateData,
- .Address = {
- &cmd_buffer->device->dynamic_state_block_pool.bo,
- event->state.offset
- },
- .ImmediateData = VK_EVENT_RESET);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.DestinationAddressType = DAT_PPGTT;
+ pc.PostSyncOperation = WriteImmediateData;
+ pc.Address = (struct anv_address) {
+ &cmd_buffer->device->dynamic_state_block_pool.bo,
+ event->state.offset
+ };
+ pc.ImmediateData = VK_EVENT_RESET;
+ }
}
void genX(CmdWaitEvents)(
@@ -511,14 +523,15 @@ void genX(CmdWaitEvents)(
for (uint32_t i = 0; i < eventCount; i++) {
ANV_FROM_HANDLE(anv_event, event, pEvents[i]);
- anv_batch_emit(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT),
- .WaitMode = PollingMode,
- .CompareOperation = COMPARE_SAD_EQUAL_SDD,
- .SemaphoreDataDword = VK_EVENT_SET,
- .SemaphoreAddress = {
- &cmd_buffer->device->dynamic_state_block_pool.bo,
- event->state.offset
- });
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT), sem) {
+ sem.WaitMode = PollingMode,
+ sem.CompareOperation = COMPARE_SAD_EQUAL_SDD,
+ sem.SemaphoreDataDword = VK_EVENT_SET,
+ sem.SemaphoreAddress = (struct anv_address) {
+ &cmd_buffer->device->dynamic_state_block_pool.bo,
+ event->state.offset
+ };
+ }
}
genX(CmdPipelineBarrier)(commandBuffer, srcStageMask, destStageMask,