diff options
author | Dave Airlie <[email protected]> | 2017-11-06 04:05:59 +0000 |
---|---|---|
committer | Dave Airlie <[email protected]> | 2017-11-06 21:45:59 +0000 |
commit | 25660499b62a60f99ad72807fcc37f9fb622a2ea (patch) | |
tree | 7ccb3da5223cf01097441586fa06664d1b95c6ad /src/amd | |
parent | 31b5da7958a8d7635d39ec160723bf6950443b32 (diff) |
radv: wrap cs_add_buffer in an inline. (v2)
The next patch will try and avoid calling the indirect function.
v2: add a missing conversion.
Reviewed-by: Samuel Pitoiset <[email protected]>
Signed-off-by: Dave Airlie <[email protected]>
Diffstat (limited to 'src/amd')
-rw-r--r-- | src/amd/vulkan/radv_cmd_buffer.c | 44 | ||||
-rw-r--r-- | src/amd/vulkan/radv_descriptor_set.c | 6 | ||||
-rw-r--r-- | src/amd/vulkan/radv_device.c | 14 | ||||
-rw-r--r-- | src/amd/vulkan/radv_meta_buffer.c | 8 | ||||
-rw-r--r-- | src/amd/vulkan/radv_query.c | 10 | ||||
-rw-r--r-- | src/amd/vulkan/radv_radeon_winsys.h | 8 |
6 files changed, 49 insertions, 41 deletions
diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index 8f6b8682af6..9a191cda1c5 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -279,8 +279,8 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) cmd_buffer->sample_positions_needed = false; if (cmd_buffer->upload.upload_bo) - cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, - cmd_buffer->upload.upload_bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, + cmd_buffer->upload.upload_bo, 8); cmd_buffer->upload.offset = 0; cmd_buffer->record_result = VK_SUCCESS; @@ -321,7 +321,7 @@ radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer, return false; } - device->ws->cs_add_buffer(cmd_buffer->cs, bo, 8); + radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo, 8); if (cmd_buffer->upload.upload_bo) { upload = malloc(sizeof(*upload)); @@ -415,7 +415,7 @@ void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer) MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7); ++cmd_buffer->state.trace_id; - device->ws->cs_add_buffer(cs, device->trace_bo, 8); + radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8); radv_emit_write_data_packet(cs, va, 1, &cmd_buffer->state.trace_id); radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id)); @@ -472,7 +472,7 @@ radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer, data[0] = (uintptr_t)pipeline; data[1] = (uintptr_t)pipeline >> 32; - device->ws->cs_add_buffer(cs, device->trace_bo, 8); + radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8); radv_emit_write_data_packet(cs, va, 2, data); } @@ -508,7 +508,7 @@ radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer) data[i * 2 + 1] = (uintptr_t)set >> 32; } - device->ws->cs_add_buffer(cs, device->trace_bo, 8); + radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8); radv_emit_write_data_packet(cs, va, MAX_SETS * 2, data); } @@ -673,7 +673,7 @@ radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer, va = radv_buffer_get_va(shader->bo) + shader->bo_offset; - ws->cs_add_buffer(cs, shader->bo, 8); + radv_cs_add_buffer(ws, cs, shader->bo, 8); if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) si_cp_dma_prefetch(cmd_buffer, va, shader->code_size); } @@ -1310,7 +1310,7 @@ radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer, if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) ++reg_count; - cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, image->bo, 8); radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0)); radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) | @@ -1371,7 +1371,7 @@ radv_set_dcc_need_cmask_elim_pred(struct radv_cmd_buffer *cmd_buffer, if (!image->surface.dcc_size) return; - cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, image->bo, 8); radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0)); radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) | @@ -1395,7 +1395,7 @@ radv_set_color_clear_regs(struct radv_cmd_buffer *cmd_buffer, if (!image->cmask.size && !image->surface.dcc_size) return; - cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, image->bo, 8); radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0)); radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) | @@ -1458,7 +1458,7 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) int idx = subpass->color_attachments[i].attachment; struct radv_attachment_info *att = &framebuffer->attachments[idx]; - cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8); assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT); radv_emit_fb_color_state(cmd_buffer, i, &att->cb); @@ -1471,7 +1471,7 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) VkImageLayout layout = subpass->depth_stencil_attachment.layout; struct radv_attachment_info *att = &framebuffer->attachments[idx]; struct radv_image *image = att->attachment->image; - cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8); MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image, cmd_buffer->queue_family_index, cmd_buffer->queue_family_index); @@ -1801,7 +1801,7 @@ radv_cmd_buffer_update_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, bo struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer; uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb]; - device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8); + radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer->bo, 8); va = radv_buffer_get_va(buffer->bo); offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i]; @@ -2198,7 +2198,7 @@ static void emit_gfx_buffer_state(struct radv_cmd_buffer *cmd_buffer) struct radv_device *device = cmd_buffer->device; if (device->gfx_init) { uint64_t va = radv_buffer_get_va(device->gfx_init); - device->ws->cs_add_buffer(cmd_buffer->cs, device->gfx_init, 8); + radv_cs_add_buffer(device->ws, cmd_buffer->cs, device->gfx_init, 8); radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0)); radeon_emit(cmd_buffer->cs, va); radeon_emit(cmd_buffer->cs, va >> 32); @@ -2317,7 +2317,7 @@ void radv_CmdBindIndexBuffer( int index_size_shift = cmd_buffer->state.index_type ? 2 : 1; cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER; - cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, index_buffer->bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo, 8); } @@ -2335,10 +2335,10 @@ radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer, for (unsigned j = 0; j < set->layout->buffer_count; ++j) if (set->descriptors[j]) - ws->cs_add_buffer(cmd_buffer->cs, set->descriptors[j], 7); + radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j], 7); if(set->bo) - ws->cs_add_buffer(cmd_buffer->cs, set->bo, 8); + radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo, 8); } void radv_CmdBindDescriptorSets( @@ -3110,7 +3110,7 @@ radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer, va += info->indirect->offset + info->indirect_offset; - ws->cs_add_buffer(cs, info->indirect->bo, 8); + radv_cs_add_buffer(ws, cs, info->indirect->bo, 8); radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0)); radeon_emit(cs, 1); @@ -3122,7 +3122,7 @@ radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer, count_va += info->count_buffer->offset + info->count_buffer_offset; - ws->cs_add_buffer(cs, info->count_buffer->bo, 8); + radv_cs_add_buffer(ws, cs, info->count_buffer->bo, 8); } if (!state->subpass->view_mask) { @@ -3467,7 +3467,7 @@ radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer, va += info->indirect->offset + info->indirect_offset; - ws->cs_add_buffer(cs, info->indirect->bo, 8); + radv_cs_add_buffer(ws, cs, info->indirect->bo, 8); if (loc->sgpr_idx != -1) { for (unsigned i = 0; i < grid_used; ++i) { @@ -3934,7 +3934,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, struct radeon_winsys_cs *cs = cmd_buffer->cs; uint64_t va = radv_buffer_get_va(event->bo); - cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8); MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18); @@ -3990,7 +3990,7 @@ void radv_CmdWaitEvents(VkCommandBuffer commandBuffer, RADV_FROM_HANDLE(radv_event, event, pEvents[i]); uint64_t va = radv_buffer_get_va(event->bo); - cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8); MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7); diff --git a/src/amd/vulkan/radv_descriptor_set.c b/src/amd/vulkan/radv_descriptor_set.c index 424756c13f5..a98ff37ced6 100644 --- a/src/amd/vulkan/radv_descriptor_set.c +++ b/src/amd/vulkan/radv_descriptor_set.c @@ -566,7 +566,7 @@ static void write_texel_buffer_descriptor(struct radv_device *device, memcpy(dst, buffer_view->state, 4 * 4); if (cmd_buffer) - device->ws->cs_add_buffer(cmd_buffer->cs, buffer_view->bo, 7); + radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer_view->bo, 7); else *buffer_list = buffer_view->bo; } @@ -596,7 +596,7 @@ static void write_buffer_descriptor(struct radv_device *device, S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); if (cmd_buffer) - device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 7); + radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer->bo, 7); else *buffer_list = buffer->bo; } @@ -639,7 +639,7 @@ write_image_descriptor(struct radv_device *device, } if (cmd_buffer) - device->ws->cs_add_buffer(cmd_buffer->cs, iview->bo, 7); + radv_cs_add_buffer(device->ws, cmd_buffer->cs, iview->bo, 7); else *buffer_list = iview->bo; } diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index e34b19c648a..1ecf70d4a9d 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -1564,22 +1564,22 @@ radv_get_preamble_cs(struct radv_queue *queue, dest_cs[i] = cs; if (scratch_bo) - queue->device->ws->cs_add_buffer(cs, scratch_bo, 8); + radv_cs_add_buffer(queue->device->ws, cs, scratch_bo, 8); if (esgs_ring_bo) - queue->device->ws->cs_add_buffer(cs, esgs_ring_bo, 8); + radv_cs_add_buffer(queue->device->ws, cs, esgs_ring_bo, 8); if (gsvs_ring_bo) - queue->device->ws->cs_add_buffer(cs, gsvs_ring_bo, 8); + radv_cs_add_buffer(queue->device->ws, cs, gsvs_ring_bo, 8); if (tess_factor_ring_bo) - queue->device->ws->cs_add_buffer(cs, tess_factor_ring_bo, 8); + radv_cs_add_buffer(queue->device->ws, cs, tess_factor_ring_bo, 8); if (tess_offchip_ring_bo) - queue->device->ws->cs_add_buffer(cs, tess_offchip_ring_bo, 8); + radv_cs_add_buffer(queue->device->ws, cs, tess_offchip_ring_bo, 8); if (descriptor_bo) - queue->device->ws->cs_add_buffer(cs, descriptor_bo, 8); + radv_cs_add_buffer(queue->device->ws, cs, descriptor_bo, 8); if (descriptor_bo != queue->descriptor_bo) { uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo); @@ -1678,7 +1678,7 @@ radv_get_preamble_cs(struct radv_queue *queue, uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) | S_008F04_SWIZZLE_ENABLE(1); - queue->device->ws->cs_add_buffer(cs, compute_scratch_bo, 8); + radv_cs_add_buffer(queue->device->ws, cs, compute_scratch_bo, 8); radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2); radeon_emit(cs, scratch_va); diff --git a/src/amd/vulkan/radv_meta_buffer.c b/src/amd/vulkan/radv_meta_buffer.c index f7ffcbbc90b..41cdc76b95b 100644 --- a/src/amd/vulkan/radv_meta_buffer.c +++ b/src/amd/vulkan/radv_meta_buffer.c @@ -421,7 +421,7 @@ uint32_t radv_fill_buffer(struct radv_cmd_buffer *cmd_buffer, } else if (size) { uint64_t va = radv_buffer_get_va(bo); va += offset; - cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, bo, 8); si_cp_dma_clear_buffer(cmd_buffer, va, size, value); } @@ -444,8 +444,8 @@ void radv_copy_buffer(struct radv_cmd_buffer *cmd_buffer, src_va += src_offset; dst_va += dst_offset; - cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, src_bo, 8); - cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, dst_bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, src_bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_bo, 8); si_cp_dma_buffer_copy(cmd_buffer, src_va, dst_va, size); } @@ -512,7 +512,7 @@ void radv_CmdUpdateBuffer( if (dataSize < RADV_BUFFER_OPS_CS_THRESHOLD) { si_emit_cache_flush(cmd_buffer); - cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, dst_buffer->bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo, 8); radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, words + 4); diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c index 16ffd846487..a3cae17ae11 100644 --- a/src/amd/vulkan/radv_query.c +++ b/src/amd/vulkan/radv_query.c @@ -957,8 +957,8 @@ void radv_CmdCopyQueryPoolResults( uint64_t dest_va = radv_buffer_get_va(dst_buffer->bo); dest_va += dst_buffer->offset + dstOffset; - cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, pool->bo, 8); - cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, dst_buffer->bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pool->bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo, 8); switch (pool->type) { case VK_QUERY_TYPE_OCCLUSION: @@ -1084,7 +1084,7 @@ void radv_CmdBeginQuery( uint64_t va = radv_buffer_get_va(pool->bo); va += pool->stride * query; - cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo, 8); switch (pool->type) { case VK_QUERY_TYPE_OCCLUSION: @@ -1125,7 +1125,7 @@ void radv_CmdEndQuery( uint64_t avail_va = va + pool->availability_offset + 4 * query; va += pool->stride * query; - cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 8); + radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo, 8); switch (pool->type) { case VK_QUERY_TYPE_OCCLUSION: @@ -1177,7 +1177,7 @@ void radv_CmdWriteTimestamp( uint64_t avail_va = va + pool->availability_offset + 4 * query; uint64_t query_va = va + pool->stride * query; - cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 5); + radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo, 5); MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28); diff --git a/src/amd/vulkan/radv_radeon_winsys.h b/src/amd/vulkan/radv_radeon_winsys.h index 395c8499b3d..bab19a6233d 100644 --- a/src/amd/vulkan/radv_radeon_winsys.h +++ b/src/amd/vulkan/radv_radeon_winsys.h @@ -279,4 +279,12 @@ static inline uint64_t radv_buffer_get_va(struct radeon_winsys_bo *bo) return bo->va; } +static inline void radv_cs_add_buffer(struct radeon_winsys *ws, + struct radeon_winsys_cs *cs, + struct radeon_winsys_bo *bo, + uint8_t priority) +{ + ws->cs_add_buffer(cs, bo, priority); +} + #endif /* RADV_RADEON_WINSYS_H */ |