diff options
author | Jason Ekstrand <[email protected]> | 2016-04-18 16:33:46 -0700 |
---|---|---|
committer | Jason Ekstrand <[email protected]> | 2016-04-20 14:54:09 -0700 |
commit | 744e133431203145225c475264ae8a4d8ec8f7d9 (patch) | |
tree | 32a91825804871ef67a819a2d57cd3f6912378c5 /src/intel/vulkan/gen7_cmd_buffer.c | |
parent | cae2f14947be97a5b99848da03af5a3a4acfa8fa (diff) |
anv/gen7_cmd_buffer: Use the new emit macro
Acked-by: Kristian Høgsberg <[email protected]>
Diffstat (limited to 'src/intel/vulkan/gen7_cmd_buffer.c')
-rw-r--r-- | src/intel/vulkan/gen7_cmd_buffer.c | 122 |
1 files changed, 70 insertions, 52 deletions
diff --git a/src/intel/vulkan/gen7_cmd_buffer.c b/src/intel/vulkan/gen7_cmd_buffer.c index 5130a40d277..88964daa9f0 100644 --- a/src/intel/vulkan/gen7_cmd_buffer.c +++ b/src/intel/vulkan/gen7_cmd_buffer.c @@ -57,18 +57,20 @@ gen7_cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer, anv_foreach_stage(s, stages) { if (cmd_buffer->state.samplers[s].alloc_size > 0) { - anv_batch_emit(&cmd_buffer->batch, - GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), - ._3DCommandSubOpcode = sampler_state_opcodes[s], - .PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset); + anv_batch_emit_blk(&cmd_buffer->batch, + GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) { + ssp._3DCommandSubOpcode = sampler_state_opcodes[s]; + ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset; + } } /* Always emit binding table pointers if we're asked to, since on SKL * this is what flushes push constants. */ - anv_batch_emit(&cmd_buffer->batch, - GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), - ._3DCommandSubOpcode = binding_table_opcodes[s], - .PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset); + anv_batch_emit_blk(&cmd_buffer->batch, + GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) { + btp._3DCommandSubOpcode = binding_table_opcodes[s]; + btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset; + } } } @@ -173,8 +175,10 @@ gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer) } } - anv_batch_emit(&cmd_buffer->batch, GEN7_3DSTATE_SCISSOR_STATE_POINTERS, - .ScissorRectPointer = scissor_state.offset); + anv_batch_emit_blk(&cmd_buffer->batch, + GEN7_3DSTATE_SCISSOR_STATE_POINTERS, ssp) { + ssp.ScissorRectPointer = scissor_state.offset; + } if (!cmd_buffer->device->info.has_llc) anv_state_clflush(scissor_state); @@ -237,9 +241,10 @@ flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) unsigned push_constant_regs = reg_aligned_constant_size / 32; if (push_state.alloc_size) { - anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), - .CURBETotalDataLength = push_state.alloc_size, - .CURBEDataStartAddress = push_state.offset); + anv_batch_emit_blk(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) { + curbe.CURBETotalDataLength = push_state.alloc_size; + curbe.CURBEDataStartAddress = push_state.offset; + } } assert(prog_data->total_shared <= 64 * 1024); @@ -269,18 +274,15 @@ flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) pipeline->cs_thread_width_max); const uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t); - anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), - .InterfaceDescriptorTotalLength = size, - .InterfaceDescriptorDataStartAddress = state.offset); + anv_batch_emit_blk(&cmd_buffer->batch, + GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), idl) { + idl.InterfaceDescriptorTotalLength = size; + idl.InterfaceDescriptorDataStartAddress = state.offset; + } return VK_SUCCESS; } -#define emit_lri(batch, reg, imm) \ - anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), \ - .RegisterOffset = __anv_reg_num(reg), \ - .DataDWord = imm) - void genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm) { @@ -310,10 +312,11 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm) * flushed, which involves a first PIPE_CONTROL flush which stalls the * pipeline... */ - anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), - .DCFlushEnable = true, - .PostSyncOperation = NoWrite, - .CommandStreamerStallEnable = true); + anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { + pc.DCFlushEnable = true; + pc.CommandStreamerStallEnable = true; + pc.PostSyncOperation = NoWrite; + } /* ...followed by a second pipelined PIPE_CONTROL that initiates * invalidation of the relevant caches. Note that because RO @@ -329,23 +332,28 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm) * previous and subsequent PIPE_CONTROLs already guarantee that there is * no concurrent GPGPU kernel execution (see SKL HSD 2132585). */ - anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), - .TextureCacheInvalidationEnable = true, - .ConstantCacheInvalidationEnable = true, - .InstructionCacheInvalidateEnable = true, - .StateCacheInvalidationEnable = true, - .PostSyncOperation = NoWrite); + anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { + pc.TextureCacheInvalidationEnable = true; + pc.ConstantCacheInvalidationEnable = true; + pc.InstructionCacheInvalidateEnable = true; + pc.StateCacheInvalidationEnable = true; + pc.PostSyncOperation = NoWrite; + } /* Now send a third stalling flush to make sure that invalidation is * complete when the L3 configuration registers are modified. */ - anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), - .DCFlushEnable = true, - .PostSyncOperation = NoWrite, - .CommandStreamerStallEnable = true); + anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { + pc.DCFlushEnable = true; + pc.CommandStreamerStallEnable = true; + pc.PostSyncOperation = NoWrite; + } anv_finishme("write GEN7_L3SQCREG1"); - emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG2), l3cr2_val); + anv_batch_emit_blk(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) { + lri.RegisterOffset = GENX(L3CNTLREG2_num); + lri.DataDWord = l3cr2_val; + } uint32_t l3cr3_slm, l3cr3_noslm; anv_pack_struct(&l3cr3_noslm, GENX(L3CNTLREG3), @@ -357,7 +365,10 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm) .CAllocation = 8, .TAllocation = 8); const uint32_t l3cr3_val = enable_slm ? l3cr3_slm : l3cr3_noslm; - emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG3), l3cr3_val); + anv_batch_emit_blk(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) { + lri.RegisterOffset = GENX(L3CNTLREG3_num); + lri.DataDWord = l3cr3_val; + } cmd_buffer->state.current_l3_config = l3cr2_val; } @@ -444,9 +455,10 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) if (!cmd_buffer->device->info.has_llc) anv_state_clflush(cc_state); - anv_batch_emit(&cmd_buffer->batch, - GENX(3DSTATE_CC_STATE_POINTERS), - .ColorCalcStatePointer = cc_state.offset); + anv_batch_emit_blk(&cmd_buffer->batch, + GENX(3DSTATE_CC_STATE_POINTERS), ccp) { + ccp.ColorCalcStatePointer = cc_state.offset; + } } if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE | @@ -470,9 +482,10 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) pipeline->gen7.depth_stencil_state, GENX(DEPTH_STENCIL_STATE_length), 64); - anv_batch_emit(&cmd_buffer->batch, - GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS), - .PointertoDEPTH_STENCIL_STATE = ds_state.offset); + anv_batch_emit_blk(&cmd_buffer->batch, + GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS), dsp) { + dsp.PointertoDEPTH_STENCIL_STATE = ds_state.offset; + } } if (cmd_buffer->state.gen7.index_buffer && @@ -482,19 +495,24 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) uint32_t offset = cmd_buffer->state.gen7.index_offset; #if GEN_IS_HASWELL - anv_batch_emit(&cmd_buffer->batch, GEN75_3DSTATE_VF, - .IndexedDrawCutIndexEnable = pipeline->primitive_restart, - .CutIndex = cmd_buffer->state.restart_index); + anv_batch_emit_blk(&cmd_buffer->batch, GEN75_3DSTATE_VF, vf) { + vf.IndexedDrawCutIndexEnable = pipeline->primitive_restart; + vf.CutIndex = cmd_buffer->state.restart_index; + } #endif - anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER), + anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER), ib) { #if !GEN_IS_HASWELL - .CutIndexEnable = pipeline->primitive_restart, + ib.CutIndexEnable = pipeline->primitive_restart; #endif - .IndexFormat = cmd_buffer->state.gen7.index_type, - .MemoryObjectControlState = GENX(MOCS), - .BufferStartingAddress = { buffer->bo, buffer->offset + offset }, - .BufferEndingAddress = { buffer->bo, buffer->offset + buffer->size }); + ib.IndexFormat = cmd_buffer->state.gen7.index_type; + ib.MemoryObjectControlState = GENX(MOCS); + + ib.BufferStartingAddress = + (struct anv_address) { buffer->bo, buffer->offset + offset }; + ib.BufferEndingAddress = + (struct anv_address) { buffer->bo, buffer->offset + buffer->size }; + } } cmd_buffer->state.dirty = 0; |