summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorSamuel Pitoiset <[email protected]>2019-09-03 17:39:23 +0200
committerSamuel Pitoiset <[email protected]>2019-09-06 15:52:03 +0200
commit83499ac765d33e8645ba9a40e9b0c15614cd85d4 (patch)
tree5d3954df4810443ec3a9d71f9331fb6fd2194340 /src
parent878439bba3c817dee70ebe5aa74238bad36ba8e3 (diff)
radv: merge radv_shader_variant_info into radv_shader_info
Having two different structs is useless. Signed-off-by: Samuel Pitoiset <[email protected]> Reviewed-by: Bas Nieuwenhuizen <[email protected]>
Diffstat (limited to 'src')
-rw-r--r--src/amd/vulkan/radv_cmd_buffer.c16
-rw-r--r--src/amd/vulkan/radv_nir_to_llvm.c194
-rw-r--r--src/amd/vulkan/radv_pipeline.c122
-rw-r--r--src/amd/vulkan/radv_private.h6
-rw-r--r--src/amd/vulkan/radv_shader.c84
-rw-r--r--src/amd/vulkan/radv_shader.h146
6 files changed, 275 insertions, 293 deletions
diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c
index aed2e9f8909..7baa0b3aa36 100644
--- a/src/amd/vulkan/radv_cmd_buffer.c
+++ b/src/amd/vulkan/radv_cmd_buffer.c
@@ -225,7 +225,7 @@ radv_bind_streamout_state(struct radv_cmd_buffer *cmd_buffer,
if (!pipeline->streamout_shader)
return;
- info = &pipeline->streamout_shader->info.info;
+ info = &pipeline->streamout_shader->info;
for (int i = 0; i < MAX_SO_BUFFERS; i++)
so->stride_in_dw[i] = info->so.strides[i];
@@ -863,7 +863,7 @@ radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
struct radv_multisample_state *ms = &pipeline->graphics.ms;
struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
- if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions)
+ if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.needs_sample_positions)
cmd_buffer->sample_positions_needed = true;
if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
@@ -2312,11 +2312,11 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
if (!pipeline->shaders[stage])
continue;
- need_push_constants |= pipeline->shaders[stage]->info.info.loads_push_constants;
- need_push_constants |= pipeline->shaders[stage]->info.info.loads_dynamic_offsets;
+ need_push_constants |= pipeline->shaders[stage]->info.loads_push_constants;
+ need_push_constants |= pipeline->shaders[stage]->info.loads_dynamic_offsets;
- uint8_t base = pipeline->shaders[stage]->info.info.base_inline_push_consts;
- uint8_t count = pipeline->shaders[stage]->info.info.num_inline_push_consts;
+ uint8_t base = pipeline->shaders[stage]->info.base_inline_push_consts;
+ uint8_t count = pipeline->shaders[stage]->info.num_inline_push_consts;
radv_emit_inline_push_consts(cmd_buffer, pipeline, stage,
AC_UD_INLINE_PUSH_CONSTANTS,
@@ -2367,7 +2367,7 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer,
if ((pipeline_is_dirty ||
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
cmd_buffer->state.pipeline->num_vertex_bindings &&
- radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) {
+ radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.has_vertex_buffers) {
struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
unsigned vb_offset;
void *vb_ptr;
@@ -4297,7 +4297,7 @@ radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer,
struct radeon_cmdbuf *cs = cmd_buffer->cs;
unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
: V_0287F0_DI_SRC_SEL_AUTO_INDEX;
- bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.needs_draw_id;
+ bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.needs_draw_id;
uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
bool predicating = cmd_buffer->state.predicating;
assert(base_reg);
diff --git a/src/amd/vulkan/radv_nir_to_llvm.c b/src/amd/vulkan/radv_nir_to_llvm.c
index 27567317c8c..43b2ea9cb12 100644
--- a/src/amd/vulkan/radv_nir_to_llvm.c
+++ b/src/amd/vulkan/radv_nir_to_llvm.c
@@ -48,7 +48,7 @@
struct radv_shader_context {
struct ac_llvm_context ac;
const struct radv_nir_compiler_options *options;
- struct radv_shader_variant_info *shader_info;
+ struct radv_shader_info *shader_info;
const struct nir_shader *shader;
struct ac_shader_abi abi;
@@ -156,8 +156,8 @@ get_tcs_num_patches(struct radv_shader_context *ctx)
unsigned num_tcs_output_cp = ctx->shader->info.tess.tcs_vertices_out;
uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
- uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
- uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
+ uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->tcs.outputs_written);
+ uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->tcs.patch_outputs_written);
uint32_t output_vertex_size = num_tcs_outputs * 16;
uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
uint32_t output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
@@ -212,8 +212,8 @@ calculate_tess_lds_size(struct radv_shader_context *ctx)
unsigned lds_size;
num_tcs_output_cp = ctx->shader->info.tess.tcs_vertices_out;
- num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
- num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
+ num_tcs_outputs = util_last_bit64(ctx->shader_info->tcs.outputs_written);
+ num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->tcs.patch_outputs_written);
input_vertex_size = ctx->tcs_num_inputs * 16;
output_vertex_size = num_tcs_outputs * 16;
@@ -264,8 +264,8 @@ get_tcs_in_patch_stride(struct radv_shader_context *ctx)
static LLVMValueRef
get_tcs_out_patch_stride(struct radv_shader_context *ctx)
{
- uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
- uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
+ uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->tcs.outputs_written);
+ uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->tcs.patch_outputs_written);
uint32_t output_vertex_size = num_tcs_outputs * 16;
uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
uint32_t output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
@@ -276,7 +276,7 @@ get_tcs_out_patch_stride(struct radv_shader_context *ctx)
static LLVMValueRef
get_tcs_out_vertex_stride(struct radv_shader_context *ctx)
{
- uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
+ uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->tcs.outputs_written);
uint32_t output_vertex_size = num_tcs_outputs * 16;
output_vertex_size /= 4;
return LLVMConstInt(ctx->ac.i32, output_vertex_size, false);
@@ -304,7 +304,7 @@ get_tcs_out_patch0_patch_data_offset(struct radv_shader_context *ctx)
uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
uint32_t output_patch0_offset = input_patch_size;
- uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
+ uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->tcs.outputs_written);
uint32_t output_vertex_size = num_tcs_outputs * 16;
uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
unsigned num_patches = ctx->tcs_num_patches;
@@ -511,17 +511,17 @@ static bool needs_view_index_sgpr(struct radv_shader_context *ctx,
{
switch (stage) {
case MESA_SHADER_VERTEX:
- if (ctx->shader_info->info.needs_multiview_view_index ||
+ if (ctx->shader_info->needs_multiview_view_index ||
(!ctx->options->key.vs_common_out.as_es && !ctx->options->key.vs_common_out.as_ls && ctx->options->key.has_multiview_view_index))
return true;
break;
case MESA_SHADER_TESS_EVAL:
- if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.vs_common_out.as_es && ctx->options->key.has_multiview_view_index))
+ if (ctx->shader_info->needs_multiview_view_index || (!ctx->options->key.vs_common_out.as_es && ctx->options->key.has_multiview_view_index))
return true;
break;
case MESA_SHADER_GEOMETRY:
case MESA_SHADER_TESS_CTRL:
- if (ctx->shader_info->info.needs_multiview_view_index)
+ if (ctx->shader_info->needs_multiview_view_index)
return true;
break;
default:
@@ -535,9 +535,9 @@ count_vs_user_sgprs(struct radv_shader_context *ctx)
{
uint8_t count = 0;
- if (ctx->shader_info->info.vs.has_vertex_buffers)
+ if (ctx->shader_info->vs.has_vertex_buffers)
count++;
- count += ctx->shader_info->info.vs.needs_draw_id ? 3 : 2;
+ count += ctx->shader_info->vs.needs_draw_id ? 3 : 2;
return count;
}
@@ -548,42 +548,42 @@ static void allocate_inline_push_consts(struct radv_shader_context *ctx,
uint8_t remaining_sgprs = user_sgpr_info->remaining_sgprs;
/* Only supported if shaders use push constants. */
- if (ctx->shader_info->info.min_push_constant_used == UINT8_MAX)
+ if (ctx->shader_info->min_push_constant_used == UINT8_MAX)
return;
/* Only supported if shaders don't have indirect push constants. */
- if (ctx->shader_info->info.has_indirect_push_constants)
+ if (ctx->shader_info->has_indirect_push_constants)
return;
/* Only supported for 32-bit push constants. */
- if (!ctx->shader_info->info.has_only_32bit_push_constants)
+ if (!ctx->shader_info->has_only_32bit_push_constants)
return;
uint8_t num_push_consts =
- (ctx->shader_info->info.max_push_constant_used -
- ctx->shader_info->info.min_push_constant_used) / 4;
+ (ctx->shader_info->max_push_constant_used -
+ ctx->shader_info->min_push_constant_used) / 4;
/* Check if the number of user SGPRs is large enough. */
if (num_push_consts < remaining_sgprs) {
- ctx->shader_info->info.num_inline_push_consts = num_push_consts;
+ ctx->shader_info->num_inline_push_consts = num_push_consts;
} else {
- ctx->shader_info->info.num_inline_push_consts = remaining_sgprs;
+ ctx->shader_info->num_inline_push_consts = remaining_sgprs;
}
/* Clamp to the maximum number of allowed inlined push constants. */
- if (ctx->shader_info->info.num_inline_push_consts > AC_MAX_INLINE_PUSH_CONSTS)
- ctx->shader_info->info.num_inline_push_consts = AC_MAX_INLINE_PUSH_CONSTS;
+ if (ctx->shader_info->num_inline_push_consts > AC_MAX_INLINE_PUSH_CONSTS)
+ ctx->shader_info->num_inline_push_consts = AC_MAX_INLINE_PUSH_CONSTS;
- if (ctx->shader_info->info.num_inline_push_consts == num_push_consts &&
- !ctx->shader_info->info.loads_dynamic_offsets) {
+ if (ctx->shader_info->num_inline_push_consts == num_push_consts &&
+ !ctx->shader_info->loads_dynamic_offsets) {
/* Disable the default push constants path if all constants are
* inlined and if shaders don't use dynamic descriptors.
*/
- ctx->shader_info->info.loads_push_constants = false;
+ ctx->shader_info->loads_push_constants = false;
}
- ctx->shader_info->info.base_inline_push_consts =
- ctx->shader_info->info.min_push_constant_used / 4;
+ ctx->shader_info->base_inline_push_consts =
+ ctx->shader_info->min_push_constant_used / 4;
}
static void allocate_user_sgprs(struct radv_shader_context *ctx,
@@ -606,7 +606,7 @@ static void allocate_user_sgprs(struct radv_shader_context *ctx,
user_sgpr_info->need_ring_offsets = true;
if (stage == MESA_SHADER_FRAGMENT &&
- ctx->shader_info->info.ps.needs_sample_positions)
+ ctx->shader_info->ps.needs_sample_positions)
user_sgpr_info->need_ring_offsets = true;
/* 2 user sgprs will nearly always be allocated for scratch/rings */
@@ -616,11 +616,11 @@ static void allocate_user_sgprs(struct radv_shader_context *ctx,
switch (stage) {
case MESA_SHADER_COMPUTE:
- if (ctx->shader_info->info.cs.uses_grid_size)
+ if (ctx->shader_info->cs.uses_grid_size)
user_sgpr_count += 3;
break;
case MESA_SHADER_FRAGMENT:
- user_sgpr_count += ctx->shader_info->info.ps.needs_sample_positions;
+ user_sgpr_count += ctx->shader_info->ps.needs_sample_positions;
break;
case MESA_SHADER_VERTEX:
if (!ctx->is_gs_copy_shader)
@@ -648,7 +648,7 @@ static void allocate_user_sgprs(struct radv_shader_context *ctx,
if (needs_view_index)
user_sgpr_count++;
- if (ctx->shader_info->info.loads_push_constants)
+ if (ctx->shader_info->loads_push_constants)
user_sgpr_count++;
if (ctx->streamout_buffers)
@@ -657,7 +657,7 @@ static void allocate_user_sgprs(struct radv_shader_context *ctx,
uint32_t available_sgprs = ctx->options->chip_class >= GFX9 && stage != MESA_SHADER_COMPUTE ? 32 : 16;
uint32_t remaining_sgprs = available_sgprs - user_sgpr_count;
uint32_t num_desc_set =
- util_bitcount(ctx->shader_info->info.desc_set_used_mask);
+ util_bitcount(ctx->shader_info->desc_set_used_mask);
if (remaining_sgprs < num_desc_set) {
user_sgpr_info->indirect_all_descriptor_sets = true;
@@ -679,7 +679,7 @@ declare_global_input_sgprs(struct radv_shader_context *ctx,
/* 1 for each descriptor set */
if (!user_sgpr_info->indirect_all_descriptor_sets) {
- uint32_t mask = ctx->shader_info->info.desc_set_used_mask;
+ uint32_t mask = ctx->shader_info->desc_set_used_mask;
while (mask) {
int i = u_bit_scan(&mask);
@@ -691,19 +691,19 @@ declare_global_input_sgprs(struct radv_shader_context *ctx,
desc_sets);
}
- if (ctx->shader_info->info.loads_push_constants) {
+ if (ctx->shader_info->loads_push_constants) {
/* 1 for push constants and dynamic descriptors */
add_arg(args, ARG_SGPR, type, &ctx->abi.push_constants);
}
- for (unsigned i = 0; i < ctx->shader_info->info.num_inline_push_consts; i++) {
+ for (unsigned i = 0; i < ctx->shader_info->num_inline_push_consts; i++) {
add_arg(args, ARG_SGPR, ctx->ac.i32,
&ctx->abi.inline_push_consts[i]);
}
- ctx->abi.num_inline_push_consts = ctx->shader_info->info.num_inline_push_consts;
- ctx->abi.base_inline_push_consts = ctx->shader_info->info.base_inline_push_consts;
+ ctx->abi.num_inline_push_consts = ctx->shader_info->num_inline_push_consts;
+ ctx->abi.base_inline_push_consts = ctx->shader_info->base_inline_push_consts;
- if (ctx->shader_info->info.so.num_outputs) {
+ if (ctx->shader_info->so.num_outputs) {
add_arg(args, ARG_SGPR,
ac_array_in_const32_addr_space(ctx->ac.v4i32),
&ctx->streamout_buffers);
@@ -720,14 +720,14 @@ declare_vs_specific_input_sgprs(struct radv_shader_context *ctx,
if (!ctx->is_gs_copy_shader &&
(stage == MESA_SHADER_VERTEX ||
(has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
- if (ctx->shader_info->info.vs.has_vertex_buffers) {
+ if (ctx->shader_info->vs.has_vertex_buffers) {
add_arg(args, ARG_SGPR,
ac_array_in_const32_addr_space(ctx->ac.v4i32),
&ctx->vertex_buffers);
}
add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.base_vertex);
add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.start_instance);
- if (ctx->shader_info->info.vs.needs_draw_id) {
+ if (ctx->shader_info->vs.needs_draw_id) {
add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.draw_id);
}
}
@@ -774,7 +774,7 @@ declare_streamout_sgprs(struct radv_shader_context *ctx, gl_shader_stage stage,
int i;
/* Streamout SGPRs. */
- if (ctx->shader_info->info.so.num_outputs) {
+ if (ctx->shader_info->so.num_outputs) {
assert(stage == MESA_SHADER_VERTEX ||
stage == MESA_SHADER_TESS_EVAL);
@@ -790,7 +790,7 @@ declare_streamout_sgprs(struct radv_shader_context *ctx, gl_shader_stage stage,
/* A streamout buffer offset is loaded if the stride is non-zero. */
for (i = 0; i < 4; i++) {
- if (!ctx->shader_info->info.so.strides[i])
+ if (!ctx->shader_info->so.strides[i])
continue;
add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->streamout_offset[i]);
@@ -811,7 +811,7 @@ set_global_input_locs(struct radv_shader_context *ctx,
const struct user_sgpr_info *user_sgpr_info,
LLVMValueRef desc_sets, uint8_t *user_sgpr_idx)
{
- uint32_t mask = ctx->shader_info->info.desc_set_used_mask;
+ uint32_t mask = ctx->shader_info->desc_set_used_mask;
if (!user_sgpr_info->indirect_all_descriptor_sets) {
while (mask) {
@@ -835,13 +835,13 @@ set_global_input_locs(struct radv_shader_context *ctx,
ctx->shader_info->need_indirect_descriptor_sets = true;
}
- if (ctx->shader_info->info.loads_push_constants) {
+ if (ctx->shader_info->loads_push_constants) {
set_loc_shader_ptr(ctx, AC_UD_PUSH_CONSTANTS, user_sgpr_idx);
}
- if (ctx->shader_info->info.num_inline_push_consts) {
+ if (ctx->shader_info->num_inline_push_consts) {
set_loc_shader(ctx, AC_UD_INLINE_PUSH_CONSTANTS, user_sgpr_idx,
- ctx->shader_info->info.num_inline_push_consts);
+ ctx->shader_info->num_inline_push_consts);
}
if (ctx->streamout_buffers) {
@@ -859,13 +859,13 @@ set_vs_specific_input_locs(struct radv_shader_context *ctx,
if (!ctx->is_gs_copy_shader &&
(stage == MESA_SHADER_VERTEX ||
(has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
- if (ctx->shader_info->info.vs.has_vertex_buffers) {
+ if (ctx->shader_info->vs.has_vertex_buffers) {
set_loc_shader_ptr(ctx, AC_UD_VS_VERTEX_BUFFERS,
user_sgpr_idx);
}
unsigned vs_num = 2;
- if (ctx->shader_info->info.vs.needs_draw_id)
+ if (ctx->shader_info->vs.needs_draw_id)
vs_num++;
set_loc_shader(ctx, AC_UD_VS_BASE_VERTEX_START_INSTANCE,
@@ -941,20 +941,20 @@ static void create_function(struct radv_shader_context *ctx,
declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
&desc_sets);
- if (ctx->shader_info->info.cs.uses_grid_size) {
+ if (ctx->shader_info->cs.uses_grid_size) {
add_arg(&args, ARG_SGPR, ctx->ac.v3i32,
&ctx->abi.num_work_groups);
}
for (int i = 0; i < 3; i++) {
ctx->abi.workgroup_ids[i] = NULL;
- if (ctx->shader_info->info.cs.uses_block_id[i]) {
+ if (ctx->shader_info->cs.uses_block_id[i]) {
add_arg(&args, ARG_SGPR, ctx->ac.i32,
&ctx->abi.workgroup_ids[i]);
}
}
- if (ctx->shader_info->info.cs.uses_local_invocation_idx)
+ if (ctx->shader_info->cs.uses_local_invocation_idx)
add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->abi.tg_size);
add_arg(&args, ARG_VGPR, ctx->ac.v3i32,
&ctx->abi.local_invocation_ids);
@@ -1189,7 +1189,7 @@ static void create_function(struct radv_shader_context *ctx,
switch (stage) {
case MESA_SHADER_COMPUTE:
- if (ctx->shader_info->info.cs.uses_grid_size) {
+ if (ctx->shader_info->cs.uses_grid_size) {
set_loc_shader(ctx, AC_UD_CS_GRID_SIZE,
&user_sgpr_idx, 3);
}
@@ -1322,7 +1322,7 @@ static LLVMValueRef get_non_vertex_index_offset(struct radv_shader_context *ctx)
uint32_t num_patches = ctx->tcs_num_patches;
uint32_t num_tcs_outputs;
if (ctx->stage == MESA_SHADER_TESS_CTRL)
- num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
+ num_tcs_outputs = util_last_bit64(ctx->shader_info->tcs.outputs_written);
else
num_tcs_outputs = ctx->options->key.tes.tcs_num_outputs;
@@ -1709,7 +1709,7 @@ static LLVMValueRef load_sample_mask_in(struct ac_shader_abi *abi)
struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
uint8_t log2_ps_iter_samples;
- if (ctx->shader_info->info.ps.force_persample) {
+ if (ctx->shader_info->ps.force_persample) {
log2_ps_iter_samples =
util_logbase2(ctx->options->key.fs.num_samples);
} else {
@@ -1770,9 +1770,9 @@ visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addr
for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
unsigned output_usage_mask =
- ctx->shader_info->info.gs.output_usage_mask[i];
+ ctx->shader_info->gs.output_usage_mask[i];
uint8_t output_stream =
- ctx->shader_info->info.gs.output_streams[i];
+ ctx->shader_info->gs.output_streams[i];
LLVMValueRef *out_ptr = &addrs[i * 4];
int length = util_last_bit(output_usage_mask);
@@ -2116,7 +2116,7 @@ handle_vs_input_decl(struct radv_shader_context *ctx,
LLVMValueRef buffer_index;
unsigned attrib_count = glsl_count_attribute_slots(variable->type, true);
uint8_t input_usage_mask =
- ctx->shader_info->info.vs.input_usage_mask[variable->data.location];
+ ctx->shader_info->vs.input_usage_mask[variable->data.location];
unsigned num_input_channels = util_last_bit(input_usage_mask);
variable->data.driver_location = variable->data.location * 4;
@@ -2579,7 +2579,7 @@ radv_emit_streamout(struct radv_shader_context *ctx, unsigned stream)
LLVMValueRef buf_ptr = ctx->streamout_buffers;
for (i = 0; i < 4; i++) {
- uint16_t stride = ctx->shader_info->info.so.strides[i];
+ uint16_t stride = ctx->shader_info->so.strides[i];
if (!stride)
continue;
@@ -2603,10 +2603,10 @@ radv_emit_streamout(struct radv_shader_context *ctx, unsigned stream)
}
/* Write streamout data. */
- for (i = 0; i < ctx->shader_info->info.so.num_outputs; i++) {
+ for (i = 0; i < ctx->shader_info->so.num_outputs; i++) {
struct radv_shader_output_values shader_out = {};
struct radv_stream_output *output =
- &ctx->shader_info->info.so.outputs[i];
+ &ctx->shader_info->so.outputs[i];
if (stream != output->stream)
continue;
@@ -2821,7 +2821,7 @@ handle_vs_outputs_post(struct radv_shader_context *ctx,
outinfo->writes_viewport_index = true;
}
- if (ctx->shader_info->info.so.num_outputs &&
+ if (ctx->shader_info->so.num_outputs &&
!ctx->is_gs_copy_shader) {
/* The GS copy shader emission already emits streamout. */
radv_emit_streamout(ctx, 0);
@@ -2841,14 +2841,14 @@ handle_vs_outputs_post(struct radv_shader_context *ctx,
if (ctx->stage == MESA_SHADER_VERTEX &&
!ctx->is_gs_copy_shader) {
outputs[noutput].usage_mask =
- ctx->shader_info->info.vs.output_usage_mask[i];
+ ctx->shader_info->vs.output_usage_mask[i];
} else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
outputs[noutput].usage_mask =
- ctx->shader_info->info.tes.output_usage_mask[i];
+ ctx->shader_info->tes.output_usage_mask[i];
} else {
assert(ctx->is_gs_copy_shader);
outputs[noutput].usage_mask =
- ctx->shader_info->info.gs.output_usage_mask[i];
+ ctx->shader_info->gs.output_usage_mask[i];
}
for (unsigned j = 0; j < 4; j++) {
@@ -2921,11 +2921,11 @@ handle_es_outputs_post(struct radv_shader_context *ctx,
if (ctx->stage == MESA_SHADER_VERTEX) {
output_usage_mask =
- ctx->shader_info->info.vs.output_usage_mask[i];
+ ctx->shader_info->vs.output_usage_mask[i];
} else {
assert(ctx->stage == MESA_SHADER_TESS_EVAL);
output_usage_mask =
- ctx->shader_info->info.tes.output_usage_mask[i];
+ ctx->shader_info->tes.output_usage_mask[i];
}
param_index = shader_io_get_unique_index(i);
@@ -2967,7 +2967,7 @@ static void
handle_ls_outputs_post(struct radv_shader_context *ctx)
{
LLVMValueRef vertex_id = ctx->rel_auto_id;
- uint32_t num_tcs_inputs = util_last_bit64(ctx->shader_info->info.vs.ls_outputs_written);
+ uint32_t num_tcs_inputs = util_last_bit64(ctx->shader_info->vs.ls_outputs_written);
LLVMValueRef vertex_dw_stride = LLVMConstInt(ctx->ac.i32, num_tcs_inputs * 4, false);
LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
vertex_dw_stride, "");
@@ -3341,7 +3341,7 @@ static void gfx10_ngg_gs_emit_epilogue_1(struct radv_shader_context *ctx)
unsigned num_components;
num_components =
- ctx->shader_info->info.gs.num_stream_output_components[stream];
+ ctx->shader_info->gs.num_stream_output_components[stream];
if (!num_components)
continue;
@@ -3556,7 +3556,7 @@ static void gfx10_ngg_gs_emit_epilogue_2(struct radv_shader_context *ctx)
outputs[noutput].slot_name = i;
outputs[noutput].slot_index = i == VARYING_SLOT_CLIP_DIST1;
- outputs[noutput].usage_mask = ctx->shader_info->info.gs.output_usage_mask[i];
+ outputs[noutput].usage_mask = ctx->shader_info->gs.output_usage_mask[i];
int length = util_last_bit(outputs[noutput].usage_mask);
for (unsigned j = 0; j < length; j++, out_idx++) {
@@ -3626,9 +3626,9 @@ static void gfx10_ngg_gs_emit_vertex(struct radv_shader_context *ctx,
unsigned out_idx = 0;
for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
unsigned output_usage_mask =
- ctx->shader_info->info.gs.output_usage_mask[i];
+ ctx->shader_info->gs.output_usage_mask[i];
uint8_t output_stream =
- ctx->shader_info->info.gs.output_streams[i];
+ ctx->shader_info->gs.output_streams[i];
LLVMValueRef *out_ptr = &addrs[i * 4];
int length = util_last_bit(output_usage_mask);
@@ -3887,15 +3887,15 @@ handle_fs_outputs_post(struct radv_shader_context *ctx)
}
/* Process depth, stencil, samplemask. */
- if (ctx->shader_info->info.ps.writes_z) {
+ if (ctx->shader_info->ps.writes_z) {
depth = ac_to_float(&ctx->ac,
radv_load_output(ctx, FRAG_RESULT_DEPTH, 0));
}
- if (ctx->shader_info->info.ps.writes_stencil) {
+ if (ctx->shader_info->ps.writes_stencil) {
stencil = ac_to_float(&ctx->ac,
radv_load_output(ctx, FRAG_RESULT_STENCIL, 0));
}
- if (ctx->shader_info->info.ps.writes_sample_mask) {
+ if (ctx->shader_info->ps.writes_sample_mask) {
samplemask = ac_to_float(&ctx->ac,
radv_load_output(ctx, FRAG_RESULT_SAMPLE_MASK, 0));
}
@@ -3904,9 +3904,9 @@ handle_fs_outputs_post(struct radv_shader_context *ctx)
* exported.
*/
if (index > 0 &&
- !ctx->shader_info->info.ps.writes_z &&
- !ctx->shader_info->info.ps.writes_stencil &&
- !ctx->shader_info->info.ps.writes_sample_mask) {
+ !ctx->shader_info->ps.writes_z &&
+ !ctx->shader_info->ps.writes_stencil &&
+ !ctx->shader_info->ps.writes_sample_mask) {
unsigned last = index - 1;
color_args[last].valid_mask = 1; /* whether the EXEC mask is valid */
@@ -4069,7 +4069,7 @@ ac_setup_rings(struct radv_shader_context *ctx)
LLVMValueRef ring, tmp;
num_components =
- ctx->shader_info->info.gs.num_stream_output_components[stream];
+ ctx->shader_info->gs.num_stream_output_components[stream];
if (!num_components)
continue;
@@ -4172,7 +4172,7 @@ static
LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
struct nir_shader *const *shaders,
int shader_count,
- struct radv_shader_variant_info *shader_info,
+ struct radv_shader_info *shader_info,
const struct radv_nir_compiler_options *options)
{
struct radv_shader_context ctx = {0};
@@ -4188,10 +4188,10 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
options->family, float_mode, options->wave_size, 64);
ctx.context = ctx.ac.context;
- radv_nir_shader_info_init(&shader_info->info);
+ radv_nir_shader_info_init(shader_info);
for(int i = 0; i < shader_count; ++i)
- radv_nir_shader_info_pass(shaders[i], options, &shader_info->info);
+ radv_nir_shader_info_pass(shaders[i], options, shader_info);
for (i = 0; i < MAX_SETS; i++)
shader_info->user_sgprs_locs.descriptor_sets[i].sgpr_idx = -1;
@@ -4275,7 +4275,7 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
if (shader_count == 1)
ctx.tcs_num_inputs = ctx.options->key.tcs.num_inputs;
else
- ctx.tcs_num_inputs = util_last_bit64(shader_info->info.vs.ls_outputs_written);
+ ctx.tcs_num_inputs = util_last_bit64(shader_info->vs.ls_outputs_written);
ctx.tcs_num_patches = get_tcs_num_patches(&ctx);
} else if (shaders[i]->info.stage == MESA_SHADER_TESS_EVAL) {
ctx.abi.load_tess_varyings = load_tes_input;
@@ -4499,7 +4499,7 @@ static void ac_compile_llvm_module(struct ac_llvm_compiler *ac_llvm,
}
static void
-ac_fill_shader_info(struct radv_shader_variant_info *shader_info, struct nir_shader *nir, const struct radv_nir_compiler_options *options)
+ac_fill_shader_info(struct radv_shader_info *shader_info, struct nir_shader *nir, const struct radv_nir_compiler_options *options)
{
switch (nir->info.stage) {
case MESA_SHADER_COMPUTE:
@@ -4507,9 +4507,9 @@ ac_fill_shader_info(struct radv_shader_variant_info *shader_info, struct nir_sha
shader_info->cs.block_size[i] = nir->info.cs.local_size[i];
break;
case MESA_SHADER_FRAGMENT:
- shader_info->fs.can_discard = nir->info.fs.uses_discard;
- shader_info->fs.early_fragment_test = nir->info.fs.early_fragment_tests;
- shader_info->fs.post_depth_coverage = nir->info.fs.post_depth_coverage;
+ shader_info->ps.can_discard = nir->info.fs.uses_discard;
+ shader_info->ps.early_fragment_test = nir->info.fs.early_fragment_tests;
+ shader_info->ps.post_depth_coverage = nir->info.fs.post_depth_coverage;
break;
case MESA_SHADER_GEOMETRY:
shader_info->gs.vertices_in = nir->info.gs.vertices_in;
@@ -4543,7 +4543,7 @@ ac_fill_shader_info(struct radv_shader_variant_info *shader_info, struct nir_sha
void
radv_compile_nir_shader(struct ac_llvm_compiler *ac_llvm,
struct radv_shader_binary **rbinary,
- struct radv_shader_variant_info *shader_info,
+ struct radv_shader_info *shader_info,
struct nir_shader *const *nir,
int nir_count,
const struct radv_nir_compiler_options *options)
@@ -4570,7 +4570,7 @@ radv_compile_nir_shader(struct ac_llvm_compiler *ac_llvm,
shader_info->gs.es_type = nir[0]->info.stage;
}
}
- shader_info->info.wave_size = options->wave_size;
+ shader_info->wave_size = options->wave_size;
}
static void
@@ -4582,7 +4582,7 @@ ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
LLVMValueRef stream_id;
/* Fetch the vertex stream ID. */
- if (ctx->shader_info->info.so.num_outputs) {
+ if (ctx->shader_info->so.num_outputs) {
stream_id =
ac_unpack_param(&ctx->ac, ctx->streamout_config, 24, 2);
} else {
@@ -4598,14 +4598,14 @@ ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
for (unsigned stream = 0; stream < 4; stream++) {
unsigned num_components =
- ctx->shader_info->info.gs.num_stream_output_components[stream];
+ ctx->shader_info->gs.num_stream_output_components[stream];
LLVMBasicBlockRef bb;
unsigned offset;
if (!num_components)
continue;
- if (stream > 0 && !ctx->shader_info->info.so.num_outputs)
+ if (stream > 0 && !ctx->shader_info->so.num_outputs)
continue;
bb = LLVMInsertBasicBlockInContext(ctx->ac.context, end_bb, "out");
@@ -4615,9 +4615,9 @@ ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
offset = 0;
for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
unsigned output_usage_mask =
- ctx->shader_info->info.gs.output_usage_mask[i];
+ ctx->shader_info->gs.output_usage_mask[i];
unsigned output_stream =
- ctx->shader_info->info.gs.output_streams[i];
+ ctx->shader_info->gs.output_streams[i];
int length = util_last_bit(output_usage_mask);
if (!(ctx->output_mask & (1ull << i)) ||
@@ -4653,7 +4653,7 @@ ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
}
}
- if (ctx->shader_info->info.so.num_outputs)
+ if (ctx->shader_info->so.num_outputs)
radv_emit_streamout(ctx, stream);
if (stream == 0) {
@@ -4671,7 +4671,7 @@ void
radv_compile_gs_copy_shader(struct ac_llvm_compiler *ac_llvm,
struct nir_shader *geom_shader,
struct radv_shader_binary **rbinary,
- struct radv_shader_variant_info *shader_info,
+ struct radv_shader_info *shader_info,
const struct radv_nir_compiler_options *options)
{
struct radv_shader_context ctx = {0};
@@ -4690,7 +4690,7 @@ radv_compile_gs_copy_shader(struct ac_llvm_compiler *ac_llvm,
ctx.stage = MESA_SHADER_VERTEX;
ctx.shader = geom_shader;
- radv_nir_shader_info_pass(geom_shader, options, &shader_info->info);
+ radv_nir_shader_info_pass(geom_shader, options, shader_info);
create_function(&ctx, MESA_SHADER_VERTEX, false, MESA_SHADER_VERTEX);
diff --git a/src/amd/vulkan/radv_pipeline.c b/src/amd/vulkan/radv_pipeline.c
index d387e56c60b..83ba4cacbf7 100644
--- a/src/amd/vulkan/radv_pipeline.c
+++ b/src/amd/vulkan/radv_pipeline.c
@@ -1077,8 +1077,8 @@ radv_pipeline_out_of_order_rast(struct radv_pipeline *pipeline,
* except when early Z/S tests are requested.
*/
if (ps &&
- ps->info.info.ps.writes_memory &&
- ps->info.fs.early_fragment_test &&
+ ps->info.ps.writes_memory &&
+ ps->info.ps.early_fragment_test &&
!dsa_order_invariant.pass_set)
return false;
@@ -1129,7 +1129,7 @@ radv_pipeline_init_multisample_state(struct radv_pipeline *pipeline,
if (vkms)
ps_iter_samples = radv_pipeline_get_ps_iter_samples(vkms);
- if (vkms && !vkms->sampleShadingEnable && pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.force_persample) {
+ if (vkms && !vkms->sampleShadingEnable && pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.force_persample) {
ps_iter_samples = ms->num_samples;
}
@@ -1515,7 +1515,7 @@ calculate_gs_info(const VkGraphicsPipelineCreateInfo *pCreateInfo,
const struct radv_pipeline *pipeline)
{
struct radv_gs_state gs = {0};
- struct radv_shader_variant_info *gs_info = &pipeline->shaders[MESA_SHADER_GEOMETRY]->info;
+ struct radv_shader_info *gs_info = &pipeline->shaders[MESA_SHADER_GEOMETRY]->info;
struct radv_es_output_info *es_info;
if (pipeline->device->physical_device->rad_info.chip_class >= GFX9)
es_info = radv_pipeline_has_tess(pipeline) ? &gs_info->tes.es_info : &gs_info->vs.es_info;
@@ -1669,7 +1669,7 @@ calculate_ngg_info(const VkGraphicsPipelineCreateInfo *pCreateInfo,
struct radv_pipeline *pipeline)
{
struct radv_ngg_state ngg = {0};
- struct radv_shader_variant_info *gs_info = &pipeline->shaders[MESA_SHADER_GEOMETRY]->info;
+ struct radv_shader_info *gs_info = &pipeline->shaders[MESA_SHADER_GEOMETRY]->info;
struct radv_es_output_info *es_info =
radv_pipeline_has_tess(pipeline) ? &gs_info->tes.es_info : &gs_info->vs.es_info;
unsigned gs_type = radv_pipeline_has_gs(pipeline) ? MESA_SHADER_GEOMETRY : MESA_SHADER_VERTEX;
@@ -1881,7 +1881,7 @@ calculate_gs_ring_sizes(struct radv_pipeline *pipeline, const struct radv_gs_sta
unsigned alignment = 256 * num_se;
/* The maximum size is 63.999 MB per SE. */
unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se;
- struct radv_shader_variant_info *gs_info = &pipeline->shaders[MESA_SHADER_GEOMETRY]->info;
+ struct radv_shader_info *gs_info = &pipeline->shaders[MESA_SHADER_GEOMETRY]->info;
/* Calculate the minimum size. */
unsigned min_esgs_ring_size = align(gs->vgt_esgs_ring_itemsize * 4 * gs_vertex_reuse *
@@ -2597,17 +2597,17 @@ void radv_create_shaders(struct radv_pipeline *pipeline,
/* TODO: These are no longer used as keys we should refactor this */
keys[MESA_SHADER_VERTEX].vs_common_out.export_prim_id =
- pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.prim_id_input;
+ pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.prim_id_input;
keys[MESA_SHADER_VERTEX].vs_common_out.export_layer_id =
- pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.layer_input;
+ pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.layer_input;
keys[MESA_SHADER_VERTEX].vs_common_out.export_clip_dists =
- !!pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.num_input_clips_culls;
+ !!pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.num_input_clips_culls;
keys[MESA_SHADER_TESS_EVAL].vs_common_out.export_prim_id =
- pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.prim_id_input;
+ pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.prim_id_input;
keys[MESA_SHADER_TESS_EVAL].vs_common_out.export_layer_id =
- pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.layer_input;
+ pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.layer_input;
keys[MESA_SHADER_TESS_EVAL].vs_common_out.export_clip_dists =
- !!pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.num_input_clips_culls;
+ !!pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.num_input_clips_culls;
}
if (device->physical_device->rad_info.chip_class >= GFX9 && modules[MESA_SHADER_TESS_CTRL]) {
@@ -2627,7 +2627,7 @@ void radv_create_shaders(struct radv_pipeline *pipeline,
}
modules[MESA_SHADER_VERTEX] = NULL;
keys[MESA_SHADER_TESS_EVAL].tes.num_patches = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.num_patches;
- keys[MESA_SHADER_TESS_EVAL].tes.tcs_num_outputs = util_last_bit64(pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.info.tcs.outputs_written);
+ keys[MESA_SHADER_TESS_EVAL].tes.tcs_num_outputs = util_last_bit64(pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.outputs_written);
}
if (device->physical_device->rad_info.chip_class >= GFX9 && modules[MESA_SHADER_GEOMETRY]) {
@@ -2650,11 +2650,11 @@ void radv_create_shaders(struct radv_pipeline *pipeline,
for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
if(modules[i] && !pipeline->shaders[i]) {
if (i == MESA_SHADER_TESS_CTRL) {
- keys[MESA_SHADER_TESS_CTRL].tcs.num_inputs = util_last_bit64(pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.ls_outputs_written);
+ keys[MESA_SHADER_TESS_CTRL].tcs.num_inputs = util_last_bit64(pipeline->shaders[MESA_SHADER_VERTEX]->info.vs.ls_outputs_written);
}
if (i == MESA_SHADER_TESS_EVAL) {
keys[MESA_SHADER_TESS_EVAL].tes.num_patches = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.num_patches;
- keys[MESA_SHADER_TESS_EVAL].tes.tcs_num_outputs = util_last_bit64(pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.info.tcs.outputs_written);
+ keys[MESA_SHADER_TESS_EVAL].tes.tcs_num_outputs = util_last_bit64(pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.outputs_written);
}
radv_start_feedback(stage_feedbacks[i]);
@@ -3495,7 +3495,7 @@ radv_pipeline_generate_vgt_gs_mode(struct radeon_cmdbuf *ctx_cs,
vgt_gs_mode = ac_vgt_gs_mode(gs->info.gs.vertices_out,
pipeline->device->physical_device->rad_info.chip_class);
- } else if (outinfo->export_prim_id || vs->info.info.uses_prim_id) {
+ } else if (outinfo->export_prim_id || vs->info.uses_prim_id) {
vgt_gs_mode = S_028A40_MODE(V_028A40_GS_SCENARIO_A);
vgt_primitiveid_en |= S_028A84_PRIMITIVEID_EN(1);
}
@@ -3638,7 +3638,7 @@ radv_pipeline_generate_hw_ngg(struct radeon_cmdbuf *ctx_cs,
outinfo->writes_layer ||
outinfo->writes_viewport_index;
bool es_enable_prim_id = outinfo->export_prim_id ||
- (es && es->info.info.uses_prim_id);
+ (es && es->info.uses_prim_id);
bool break_wave_at_eoi = false;
unsigned ge_cntl;
unsigned nparams;
@@ -3647,7 +3647,7 @@ radv_pipeline_generate_hw_ngg(struct radeon_cmdbuf *ctx_cs,
struct radv_shader_variant *gs =
pipeline->shaders[MESA_SHADER_GEOMETRY];
- if (es_enable_prim_id || (gs && gs->info.info.uses_prim_id))
+ if (es_enable_prim_id || (gs && gs->info.uses_prim_id))
break_wave_at_eoi = true;
}
@@ -3869,8 +3869,8 @@ radv_pipeline_generate_hw_gs(struct radeon_cmdbuf *ctx_cs,
uint64_t va;
gs_max_out_vertices = gs->info.gs.vertices_out;
- max_stream = gs->info.info.gs.max_stream;
- num_components = gs->info.info.gs.num_stream_output_components;
+ max_stream = gs->info.gs.max_stream;
+ num_components = gs->info.gs.num_stream_output_components;
offset = num_components[0] * gs_max_out_vertices;
@@ -3984,7 +3984,7 @@ radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf *ctx_cs,
unsigned ps_offset = 0;
- if (ps->info.info.ps.prim_id_input) {
+ if (ps->info.ps.prim_id_input) {
unsigned vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID];
if (vs_offset != AC_EXP_PARAM_UNDEFINED) {
ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, true, false);
@@ -3992,8 +3992,8 @@ radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf *ctx_cs,
}
}
- if (ps->info.info.ps.layer_input ||
- ps->info.info.needs_multiview_view_index) {
+ if (ps->info.ps.layer_input ||
+ ps->info.needs_multiview_view_index) {
unsigned vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_LAYER];
if (vs_offset != AC_EXP_PARAM_UNDEFINED)
ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, true, false);
@@ -4002,14 +4002,14 @@ radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf *ctx_cs,
++ps_offset;
}
- if (ps->info.info.ps.has_pcoord) {
+ if (ps->info.ps.has_pcoord) {
unsigned val;
val = S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20);
ps_input_cntl[ps_offset] = val;
ps_offset++;
}
- if (ps->info.info.ps.num_input_clips_culls) {
+ if (ps->info.ps.num_input_clips_culls) {
unsigned vs_offset;
vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_CLIP_DIST0];
@@ -4020,17 +4020,17 @@ radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf *ctx_cs,
vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_CLIP_DIST1];
if (vs_offset != AC_EXP_PARAM_UNDEFINED &&
- ps->info.info.ps.num_input_clips_culls > 4) {
+ ps->info.ps.num_input_clips_culls > 4) {
ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, false, false);
++ps_offset;
}
}
- for (unsigned i = 0; i < 32 && (1u << i) <= ps->info.info.ps.input_mask; ++i) {
+ for (unsigned i = 0; i < 32 && (1u << i) <= ps->info.ps.input_mask; ++i) {
unsigned vs_offset;
bool flat_shade;
bool float16;
- if (!(ps->info.info.ps.input_mask & (1u << i)))
+ if (!(ps->info.ps.input_mask & (1u << i)))
continue;
vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_VAR0 + i];
@@ -4040,8 +4040,8 @@ radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf *ctx_cs,
continue;
}
- flat_shade = !!(ps->info.info.ps.flat_shaded_mask & (1u << ps_offset));
- float16 = !!(ps->info.info.ps.float16_shaded_mask & (1u << ps_offset));
+ flat_shade = !!(ps->info.ps.flat_shaded_mask & (1u << ps_offset));
+ float16 = !!(ps->info.ps.float16_shaded_mask & (1u << ps_offset));
ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, flat_shade, float16);
++ps_offset;
@@ -4061,7 +4061,7 @@ radv_compute_db_shader_control(const struct radv_device *device,
const struct radv_shader_variant *ps)
{
unsigned z_order;
- if (ps->info.fs.early_fragment_test || !ps->info.info.ps.writes_memory)
+ if (ps->info.ps.early_fragment_test || !ps->info.ps.writes_memory)
z_order = V_02880C_EARLY_Z_THEN_LATE_Z;
else
z_order = V_02880C_LATE_Z;
@@ -4073,17 +4073,17 @@ radv_compute_db_shader_control(const struct radv_device *device,
* but this appears to break Project Cars (DXVK). See
* https://bugs.freedesktop.org/show_bug.cgi?id=109401
*/
- bool mask_export_enable = ps->info.info.ps.writes_sample_mask;
+ bool mask_export_enable = ps->info.ps.writes_sample_mask;
- return S_02880C_Z_EXPORT_ENABLE(ps->info.info.ps.writes_z) |
- S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps->info.info.ps.writes_stencil) |
- S_02880C_KILL_ENABLE(!!ps->info.fs.can_discard) |
+ return S_02880C_Z_EXPORT_ENABLE(ps->info.ps.writes_z) |
+ S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps->info.ps.writes_stencil) |
+ S_02880C_KILL_ENABLE(!!ps->info.ps.can_discard) |
S_02880C_MASK_EXPORT_ENABLE(mask_export_enable) |
S_02880C_Z_ORDER(z_order) |
- S_02880C_DEPTH_BEFORE_SHADER(ps->info.fs.early_fragment_test) |
- S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(ps->info.fs.post_depth_coverage) |
- S_02880C_EXEC_ON_HIER_FAIL(ps->info.info.ps.writes_memory) |
- S_02880C_EXEC_ON_NOOP(ps->info.info.ps.writes_memory) |
+ S_02880C_DEPTH_BEFORE_SHADER(ps->info.ps.early_fragment_test) |
+ S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(ps->info.ps.post_depth_coverage) |
+ S_02880C_EXEC_ON_HIER_FAIL(ps->info.ps.writes_memory) |
+ S_02880C_EXEC_ON_NOOP(ps->info.ps.writes_memory) |
S_02880C_DUAL_QUAD_DISABLE(disable_rbplus);
}
@@ -4116,15 +4116,15 @@ radv_pipeline_generate_fragment_shader(struct radeon_cmdbuf *ctx_cs,
ps->config.spi_ps_input_addr);
radeon_set_context_reg(ctx_cs, R_0286D8_SPI_PS_IN_CONTROL,
- S_0286D8_NUM_INTERP(ps->info.info.ps.num_interp) |
- S_0286D8_PS_W32_EN(ps->info.info.wave_size == 32));
+ S_0286D8_NUM_INTERP(ps->info.ps.num_interp) |
+ S_0286D8_PS_W32_EN(ps->info.wave_size == 32));
radeon_set_context_reg(ctx_cs, R_0286E0_SPI_BARYC_CNTL, pipeline->graphics.spi_baryc_cntl);
radeon_set_context_reg(ctx_cs, R_028710_SPI_SHADER_Z_FORMAT,
- ac_get_spi_shader_z_format(ps->info.info.ps.writes_z,
- ps->info.info.ps.writes_stencil,
- ps->info.info.ps.writes_sample_mask));
+ ac_get_spi_shader_z_format(ps->info.ps.writes_z,
+ ps->info.ps.writes_stencil,
+ ps->info.ps.writes_sample_mask));
if (pipeline->device->dfsm_allowed) {
/* optimise this? */
@@ -4185,16 +4185,16 @@ radv_compute_vgt_shader_stages_en(const struct radv_pipeline *pipeline)
uint8_t hs_size = 64, gs_size = 64, vs_size = 64;
if (radv_pipeline_has_tess(pipeline))
- hs_size = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.info.wave_size;
+ hs_size = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.wave_size;
if (pipeline->shaders[MESA_SHADER_GEOMETRY]) {
- vs_size = gs_size = pipeline->shaders[MESA_SHADER_GEOMETRY]->info.info.wave_size;
+ vs_size = gs_size = pipeline->shaders[MESA_SHADER_GEOMETRY]->info.wave_size;
if (pipeline->gs_copy_shader)
- vs_size = pipeline->gs_copy_shader->info.info.wave_size;
+ vs_size = pipeline->gs_copy_shader->info.wave_size;
} else if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
- vs_size = pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.info.wave_size;
+ vs_size = pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.wave_size;
else if (pipeline->shaders[MESA_SHADER_VERTEX])
- vs_size = pipeline->shaders[MESA_SHADER_VERTEX]->info.info.wave_size;
+ vs_size = pipeline->shaders[MESA_SHADER_VERTEX]->info.wave_size;
if (radv_pipeline_has_ngg(pipeline))
gs_size = vs_size;
@@ -4262,8 +4262,8 @@ gfx10_pipeline_generate_ge_cntl(struct radeon_cmdbuf *ctx_cs,
}
if (radv_pipeline_has_tess(pipeline)) {
- if (pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.info.uses_prim_id ||
- radv_get_shader(pipeline, MESA_SHADER_TESS_EVAL)->info.info.uses_prim_id)
+ if (pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.uses_prim_id ||
+ radv_get_shader(pipeline, MESA_SHADER_TESS_EVAL)->info.uses_prim_id)
break_wave_at_eoi = true;
}
@@ -4369,15 +4369,15 @@ radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline *pipeline,
}
ia_multi_vgt_param.ia_switch_on_eoi = false;
- if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.prim_id_input)
+ if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.prim_id_input)
ia_multi_vgt_param.ia_switch_on_eoi = true;
if (radv_pipeline_has_gs(pipeline) &&
- pipeline->shaders[MESA_SHADER_GEOMETRY]->info.info.uses_prim_id)
+ pipeline->shaders[MESA_SHADER_GEOMETRY]->info.uses_prim_id)
ia_multi_vgt_param.ia_switch_on_eoi = true;
if (radv_pipeline_has_tess(pipeline)) {
/* SWITCH_ON_EOI must be set if PrimID is used. */
- if (pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.info.uses_prim_id ||
- radv_get_shader(pipeline, MESA_SHADER_TESS_EVAL)->info.info.uses_prim_id)
+ if (pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.uses_prim_id ||
+ radv_get_shader(pipeline, MESA_SHADER_TESS_EVAL)->info.uses_prim_id)
ia_multi_vgt_param.ia_switch_on_eoi = true;
}
@@ -4480,7 +4480,7 @@ radv_pipeline_get_streamout_shader(struct radv_pipeline *pipeline)
struct radv_shader_variant *shader =
radv_get_shader(pipeline, i);
- if (shader && shader->info.info.so.num_outputs > 0)
+ if (shader && shader->info.so.num_outputs > 0)
return shader;
}
@@ -4575,11 +4575,11 @@ radv_pipeline_init(struct radv_pipeline *pipeline,
*/
struct radv_shader_variant *ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
if ((pipeline->device->physical_device->rad_info.chip_class <= GFX9 ||
- ps->info.fs.can_discard) &&
+ ps->info.ps.can_discard) &&
!blend.spi_shader_col_format) {
- if (!ps->info.info.ps.writes_z &&
- !ps->info.info.ps.writes_stencil &&
- !ps->info.info.ps.writes_sample_mask)
+ if (!ps->info.ps.writes_z &&
+ !ps->info.ps.writes_stencil &&
+ !ps->info.ps.writes_sample_mask)
blend.spi_shader_col_format = V_028714_SPI_SHADER_32_R;
}
@@ -4620,7 +4620,7 @@ radv_pipeline_init(struct radv_pipeline *pipeline,
if (loc->sgpr_idx != -1) {
pipeline->graphics.vtx_base_sgpr = pipeline->user_data_0[MESA_SHADER_VERTEX];
pipeline->graphics.vtx_base_sgpr += loc->sgpr_idx * 4;
- if (radv_get_shader(pipeline, MESA_SHADER_VERTEX)->info.info.vs.needs_draw_id)
+ if (radv_get_shader(pipeline, MESA_SHADER_VERTEX)->info.vs.needs_draw_id)
pipeline->graphics.vtx_emit_num = 3;
else
pipeline->graphics.vtx_emit_num = 2;
diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h
index 219495ef3d6..a9c0fc6a97b 100644
--- a/src/amd/vulkan/radv_private.h
+++ b/src/amd/vulkan/radv_private.h
@@ -2107,18 +2107,18 @@ struct radv_fence {
};
/* radv_nir_to_llvm.c */
-struct radv_shader_variant_info;
+struct radv_shader_info;
struct radv_nir_compiler_options;
void radv_compile_gs_copy_shader(struct ac_llvm_compiler *ac_llvm,
struct nir_shader *geom_shader,
struct radv_shader_binary **rbinary,
- struct radv_shader_variant_info *shader_info,
+ struct radv_shader_info *info,
const struct radv_nir_compiler_options *option);
void radv_compile_nir_shader(struct ac_llvm_compiler *ac_llvm,
struct radv_shader_binary **rbinary,
- struct radv_shader_variant_info *shader_info,
+ struct radv_shader_info *info,
struct nir_shader *const *nir,
int nir_count,
const struct radv_nir_compiler_options *options);
diff --git a/src/amd/vulkan/radv_shader.c b/src/amd/vulkan/radv_shader.c
index f90689e85b5..c99e2615fca 100644
--- a/src/amd/vulkan/radv_shader.c
+++ b/src/amd/vulkan/radv_shader.c
@@ -616,7 +616,7 @@ radv_get_shader_binary_size(size_t code_size)
static void radv_postprocess_config(const struct radv_physical_device *pdevice,
const struct ac_shader_config *config_in,
- const struct radv_shader_variant_info *info,
+ const struct radv_shader_info *info,
gl_shader_stage stage,
struct ac_shader_config *config_out)
{
@@ -684,14 +684,14 @@ static void radv_postprocess_config(const struct radv_physical_device *pdevice,
config_out->rsrc2 = S_00B12C_USER_SGPR(info->num_user_sgprs) |
S_00B12C_SCRATCH_EN(scratch_enabled) |
- S_00B12C_SO_BASE0_EN(!!info->info.so.strides[0]) |
- S_00B12C_SO_BASE1_EN(!!info->info.so.strides[1]) |
- S_00B12C_SO_BASE2_EN(!!info->info.so.strides[2]) |
- S_00B12C_SO_BASE3_EN(!!info->info.so.strides[3]) |
- S_00B12C_SO_EN(!!info->info.so.num_outputs);
+ S_00B12C_SO_BASE0_EN(!!info->so.strides[0]) |
+ S_00B12C_SO_BASE1_EN(!!info->so.strides[1]) |
+ S_00B12C_SO_BASE2_EN(!!info->so.strides[2]) |
+ S_00B12C_SO_BASE3_EN(!!info->so.strides[3]) |
+ S_00B12C_SO_EN(!!info->so.num_outputs);
config_out->rsrc1 = S_00B848_VGPRS((num_vgprs - 1) /
- (info->info.wave_size == 32 ? 8 : 4)) |
+ (info->wave_size == 32 ? 8 : 4)) |
S_00B848_DX10_CLAMP(1) |
S_00B848_FLOAT_MODE(config_out->float_mode);
@@ -709,11 +709,11 @@ static void radv_postprocess_config(const struct radv_physical_device *pdevice,
config_out->rsrc2 |= S_00B22C_OC_LDS_EN(1);
} else if (info->tes.as_es) {
assert(pdevice->rad_info.chip_class <= GFX8);
- vgpr_comp_cnt = info->info.uses_prim_id ? 3 : 2;
+ vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
} else {
- bool enable_prim_id = info->tes.export_prim_id || info->info.uses_prim_id;
+ bool enable_prim_id = info->tes.export_prim_id || info->uses_prim_id;
vgpr_comp_cnt = enable_prim_id ? 3 : 2;
config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
@@ -727,9 +727,9 @@ static void radv_postprocess_config(const struct radv_physical_device *pdevice,
* StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
*/
if (pdevice->rad_info.chip_class >= GFX10) {
- vgpr_comp_cnt = info->info.vs.needs_instance_id ? 3 : 1;
+ vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 1;
} else {
- vgpr_comp_cnt = info->info.vs.needs_instance_id ? 2 : 1;
+ vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
}
} else {
config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
@@ -746,21 +746,21 @@ static void radv_postprocess_config(const struct radv_physical_device *pdevice,
* VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
* StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
*/
- vgpr_comp_cnt = info->info.vs.needs_instance_id ? 2 : 1;
+ vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
} else if (info->vs.as_es) {
assert(pdevice->rad_info.chip_class <= GFX8);
/* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
- vgpr_comp_cnt = info->info.vs.needs_instance_id ? 1 : 0;
+ vgpr_comp_cnt = info->vs.needs_instance_id ? 1 : 0;
} else {
/* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
* If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
* StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
*/
- if (info->info.vs.needs_instance_id && pdevice->rad_info.chip_class >= GFX10) {
+ if (info->vs.needs_instance_id && pdevice->rad_info.chip_class >= GFX10) {
vgpr_comp_cnt = 3;
} else if (info->vs.export_prim_id) {
vgpr_comp_cnt = 2;
- } else if (info->info.vs.needs_instance_id) {
+ } else if (info->vs.needs_instance_id) {
vgpr_comp_cnt = 1;
} else {
vgpr_comp_cnt = 0;
@@ -780,12 +780,12 @@ static void radv_postprocess_config(const struct radv_physical_device *pdevice,
config_out->rsrc1 |= S_00B848_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
config_out->rsrc2 |=
- S_00B84C_TGID_X_EN(info->info.cs.uses_block_id[0]) |
- S_00B84C_TGID_Y_EN(info->info.cs.uses_block_id[1]) |
- S_00B84C_TGID_Z_EN(info->info.cs.uses_block_id[2]) |
- S_00B84C_TIDIG_COMP_CNT(info->info.cs.uses_thread_id[2] ? 2 :
- info->info.cs.uses_thread_id[1] ? 1 : 0) |
- S_00B84C_TG_SIZE_EN(info->info.cs.uses_local_invocation_idx) |
+ S_00B84C_TGID_X_EN(info->cs.uses_block_id[0]) |
+ S_00B84C_TGID_Y_EN(info->cs.uses_block_id[1]) |
+ S_00B84C_TGID_Z_EN(info->cs.uses_block_id[2]) |
+ S_00B84C_TIDIG_COMP_CNT(info->cs.uses_thread_id[2] ? 2 :
+ info->cs.uses_thread_id[1] ? 1 : 0) |
+ S_00B84C_TG_SIZE_EN(info->cs.uses_local_invocation_idx) |
S_00B84C_LDS_SIZE(config_in->lds_size);
break;
default:
@@ -802,18 +802,18 @@ static void radv_postprocess_config(const struct radv_physical_device *pdevice,
/* VGPR5-8: (VertexID, UserVGPR0, UserVGPR1, UserVGPR2 / InstanceID) */
if (es_stage == MESA_SHADER_VERTEX) {
- es_vgpr_comp_cnt = info->info.vs.needs_instance_id ? 3 : 0;
+ es_vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 0;
} else if (es_stage == MESA_SHADER_TESS_EVAL) {
- bool enable_prim_id = info->tes.export_prim_id || info->info.uses_prim_id;
+ bool enable_prim_id = info->tes.export_prim_id || info->uses_prim_id;
es_vgpr_comp_cnt = enable_prim_id ? 3 : 2;
} else
unreachable("Unexpected ES shader stage");
bool tes_triangles = stage == MESA_SHADER_TESS_EVAL &&
info->tes.primitive_mode >= 4; /* GL_TRIANGLES */
- if (info->info.uses_invocation_id || stage == MESA_SHADER_VERTEX) {
+ if (info->uses_invocation_id || stage == MESA_SHADER_VERTEX) {
gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
- } else if (info->info.uses_prim_id) {
+ } else if (info->uses_prim_id) {
gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
} else if (info->gs.vertices_in >= 3 || tes_triangles) {
gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
@@ -833,13 +833,13 @@ static void radv_postprocess_config(const struct radv_physical_device *pdevice,
if (es_type == MESA_SHADER_VERTEX) {
/* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
- if (info->info.vs.needs_instance_id) {
+ if (info->vs.needs_instance_id) {
es_vgpr_comp_cnt = pdevice->rad_info.chip_class >= GFX10 ? 3 : 1;
} else {
es_vgpr_comp_cnt = 0;
}
} else if (es_type == MESA_SHADER_TESS_EVAL) {
- es_vgpr_comp_cnt = info->info.uses_prim_id ? 3 : 2;
+ es_vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
} else {
unreachable("invalid shader ES type");
}
@@ -847,9 +847,9 @@ static void radv_postprocess_config(const struct radv_physical_device *pdevice,
/* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
* VGPR[0:4] are always loaded.
*/
- if (info->info.uses_invocation_id) {
+ if (info->uses_invocation_id) {
gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
- } else if (info->info.uses_prim_id) {
+ } else if (info->uses_prim_id) {
gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
} else if (info->gs.vertices_in >= 3) {
gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
@@ -894,14 +894,14 @@ radv_shader_variant_create(struct radv_device *device,
esgs_ring_size = 32 * 1024;
}
- if (binary->variant_info.is_ngg) {
+ if (binary->info.is_ngg) {
/* GS stores Primitive IDs into LDS at the address
* corresponding to the ES thread of the provoking
* vertex. All ES threads load and export PrimitiveID
* for their thread.
*/
if (binary->stage == MESA_SHADER_VERTEX &&
- binary->variant_info.vs.export_prim_id) {
+ binary->info.vs.export_prim_id) {
/* TODO: Do not harcode this value */
esgs_ring_size = 256 /* max_out_verts */ * 4;
}
@@ -918,14 +918,14 @@ radv_shader_variant_create(struct radv_device *device,
/* Make sure to have LDS space for NGG scratch. */
/* TODO: Compute this correctly somehow? */
- if (binary->variant_info.is_ngg)
+ if (binary->info.is_ngg)
sym->size -= 32;
}
struct ac_rtld_open_info open_info = {
.info = &device->physical_device->rad_info,
.shader_type = binary->stage,
- .wave_size = binary->variant_info.info.wave_size,
+ .wave_size = binary->info.wave_size,
.num_parts = 1,
.elf_ptrs = &elf_data,
.elf_sizes = &elf_size,
@@ -958,8 +958,8 @@ radv_shader_variant_create(struct radv_device *device,
variant->exec_size = variant->code_size;
}
- variant->info = binary->variant_info;
- radv_postprocess_config(device->physical_device, &config, &binary->variant_info,
+ variant->info = binary->info;
+ radv_postprocess_config(device->physical_device, &config, &binary->info,
binary->stage, &variant->config);
void *dest_ptr = radv_alloc_shader_memory(device, variant);
@@ -1048,7 +1048,7 @@ shader_variant_compile(struct radv_device *device,
enum ac_target_machine_options tm_options = 0;
struct ac_llvm_compiler ac_llvm;
struct radv_shader_binary *binary = NULL;
- struct radv_shader_variant_info variant_info = {0};
+ struct radv_shader_info info = {0};
bool thread_compiler;
options->family = chip_family;
@@ -1090,12 +1090,12 @@ shader_variant_compile(struct radv_device *device,
if (gs_copy_shader) {
assert(shader_count == 1);
radv_compile_gs_copy_shader(&ac_llvm, *shaders, &binary,
- &variant_info, options);
+ &info, options);
} else {
- radv_compile_nir_shader(&ac_llvm, &binary, &variant_info,
+ radv_compile_nir_shader(&ac_llvm, &binary, &info,
shaders, shader_count, options);
}
- binary->variant_info = variant_info;
+ binary->info = info;
radv_destroy_llvm_compiler(&ac_llvm, thread_compiler);
@@ -1184,7 +1184,7 @@ radv_shader_variant_destroy(struct radv_device *device,
}
const char *
-radv_get_shader_name(struct radv_shader_variant_info *info,
+radv_get_shader_name(struct radv_shader_info *info,
gl_shader_stage stage)
{
switch (stage) {
@@ -1244,7 +1244,7 @@ radv_get_max_waves(struct radv_device *device,
{
enum chip_class chip_class = device->physical_device->rad_info.chip_class;
unsigned lds_increment = chip_class >= GFX7 ? 512 : 256;
- uint8_t wave_size = variant->info.info.wave_size;
+ uint8_t wave_size = variant->info.wave_size;
struct ac_shader_config *conf = &variant->config;
unsigned max_simd_waves;
unsigned lds_per_wave = 0;
@@ -1253,7 +1253,7 @@ radv_get_max_waves(struct radv_device *device,
if (stage == MESA_SHADER_FRAGMENT) {
lds_per_wave = conf->lds_size * lds_increment +
- align(variant->info.info.ps.num_interp * 48,
+ align(variant->info.ps.num_interp * 48,
lds_increment);
} else if (stage == MESA_SHADER_COMPUTE) {
unsigned max_workgroup_size =
diff --git a/src/amd/vulkan/radv_shader.h b/src/amd/vulkan/radv_shader.h
index 9d18d4410c1..0deb786f70e 100644
--- a/src/amd/vulkan/radv_shader.h
+++ b/src/amd/vulkan/radv_shader.h
@@ -168,6 +168,33 @@ struct radv_streamout_info {
uint32_t enabled_stream_buffers_mask;
};
+struct radv_userdata_info {
+ int8_t sgpr_idx;
+ uint8_t num_sgprs;
+};
+
+struct radv_userdata_locations {
+ struct radv_userdata_info descriptor_sets[MAX_SETS];
+ struct radv_userdata_info shader_data[AC_UD_MAX_UD];
+ uint32_t descriptor_sets_enabled;
+};
+
+struct radv_vs_output_info {
+ uint8_t vs_output_param_offset[VARYING_SLOT_MAX];
+ uint8_t clip_dist_mask;
+ uint8_t cull_dist_mask;
+ uint8_t param_exports;
+ bool writes_pointsize;
+ bool writes_layer;
+ bool writes_viewport_index;
+ bool export_prim_id;
+ unsigned pos_exports;
+};
+
+struct radv_es_output_info {
+ uint32_t esgs_itemsize;
+};
+
struct radv_shader_info {
bool loads_push_constants;
bool loads_dynamic_offsets;
@@ -182,6 +209,13 @@ struct radv_shader_info {
bool uses_invocation_id;
bool uses_prim_id;
uint8_t wave_size;
+ struct radv_userdata_locations user_sgprs_locs;
+ unsigned num_user_sgprs;
+ unsigned num_input_sgprs;
+ unsigned num_input_vgprs;
+ unsigned private_mem_vgprs;
+ bool need_indirect_descriptor_sets;
+ bool is_ngg;
struct {
uint64_t ls_outputs_written;
uint8_t input_usage_mask[VERT_ATTRIB_MAX];
@@ -189,15 +223,35 @@ struct radv_shader_info {
bool has_vertex_buffers; /* needs vertex buffers and base/start */
bool needs_draw_id;
bool needs_instance_id;
+ struct radv_vs_output_info outinfo;
+ struct radv_es_output_info es_info;
+ bool as_es;
+ bool as_ls;
+ bool export_prim_id;
} vs;
struct {
uint8_t output_usage_mask[VARYING_SLOT_VAR31 + 1];
uint8_t num_stream_output_components[4];
uint8_t output_streams[VARYING_SLOT_VAR31 + 1];
uint8_t max_stream;
+ unsigned gsvs_vertex_size;
+ unsigned max_gsvs_emit_size;
+ unsigned vertices_in;
+ unsigned vertices_out;
+ unsigned output_prim;
+ unsigned invocations;
+ unsigned es_type; /* GFX9: VS or TES */
} gs;
struct {
uint8_t output_usage_mask[VARYING_SLOT_VAR31 + 1];
+ struct radv_vs_output_info outinfo;
+ struct radv_es_output_info es_info;
+ bool as_es;
+ unsigned primitive_mode;
+ enum gl_tess_spacing spacing;
+ bool ccw;
+ bool point_mode;
+ bool export_prim_id;
} tes;
struct {
bool force_persample;
@@ -214,100 +268,28 @@ struct radv_shader_info {
uint32_t flat_shaded_mask;
uint32_t float16_shaded_mask;
uint32_t num_interp;
+ bool can_discard;
+ bool early_fragment_test;
+ bool post_depth_coverage;
} ps;
struct {
bool uses_grid_size;
bool uses_block_id[3];
bool uses_thread_id[3];
bool uses_local_invocation_idx;
+ unsigned block_size[3];
} cs;
struct {
uint64_t outputs_written;
uint64_t patch_outputs_written;
+ unsigned tcs_vertices_out;
+ uint32_t num_patches;
+ uint32_t lds_size;
} tcs;
struct radv_streamout_info so;
};
-struct radv_userdata_info {
- int8_t sgpr_idx;
- uint8_t num_sgprs;
-};
-
-struct radv_userdata_locations {
- struct radv_userdata_info descriptor_sets[MAX_SETS];
- struct radv_userdata_info shader_data[AC_UD_MAX_UD];
- uint32_t descriptor_sets_enabled;
-};
-
-struct radv_vs_output_info {
- uint8_t vs_output_param_offset[VARYING_SLOT_MAX];
- uint8_t clip_dist_mask;
- uint8_t cull_dist_mask;
- uint8_t param_exports;
- bool writes_pointsize;
- bool writes_layer;
- bool writes_viewport_index;
- bool export_prim_id;
- unsigned pos_exports;
-};
-
-struct radv_es_output_info {
- uint32_t esgs_itemsize;
-};
-
-struct radv_shader_variant_info {
- struct radv_userdata_locations user_sgprs_locs;
- struct radv_shader_info info;
- unsigned num_user_sgprs;
- unsigned num_input_sgprs;
- unsigned num_input_vgprs;
- unsigned private_mem_vgprs;
- bool need_indirect_descriptor_sets;
- bool is_ngg;
- struct {
- struct {
- struct radv_vs_output_info outinfo;
- struct radv_es_output_info es_info;
- bool as_es;
- bool as_ls;
- bool export_prim_id;
- } vs;
- struct {
- bool can_discard;
- bool early_fragment_test;
- bool post_depth_coverage;
- } fs;
- struct {
- unsigned block_size[3];
- } cs;
- struct {
- unsigned vertices_in;
- unsigned vertices_out;
- unsigned output_prim;
- unsigned invocations;
- unsigned gsvs_vertex_size;
- unsigned max_gsvs_emit_size;
- unsigned es_type; /* GFX9: VS or TES */
- } gs;
- struct {
- unsigned tcs_vertices_out;
- uint32_t num_patches;
- uint32_t lds_size;
- } tcs;
- struct {
- struct radv_vs_output_info outinfo;
- struct radv_es_output_info es_info;
- bool as_es;
- unsigned primitive_mode;
- enum gl_tess_spacing spacing;
- bool ccw;
- bool point_mode;
- bool export_prim_id;
- } tes;
- };
-};
-
enum radv_shader_binary_type {
RADV_BINARY_TYPE_LEGACY,
RADV_BINARY_TYPE_RTLD
@@ -318,7 +300,7 @@ struct radv_shader_binary {
gl_shader_stage stage;
bool is_gs_copy_shader;
- struct radv_shader_variant_info variant_info;
+ struct radv_shader_info info;
/* Self-referential size so we avoid consistency issues. */
uint32_t total_size;
@@ -351,7 +333,7 @@ struct radv_shader_variant {
struct ac_shader_config config;
uint32_t code_size;
uint32_t exec_size;
- struct radv_shader_variant_info info;
+ struct radv_shader_info info;
/* debug only */
uint32_t *spirv;
@@ -429,7 +411,7 @@ radv_get_max_workgroup_size(enum chip_class chip_class,
const unsigned *sizes);
const char *
-radv_get_shader_name(struct radv_shader_variant_info *info,
+radv_get_shader_name(struct radv_shader_info *info,
gl_shader_stage stage);
void