summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/amd/common/ac_nir_to_llvm.c38
-rw-r--r--src/amd/vulkan/radv_meta.c4
-rw-r--r--src/amd/vulkan/radv_meta_blit.c8
-rw-r--r--src/amd/vulkan/radv_meta_blit2d.c8
-rw-r--r--src/amd/vulkan/radv_meta_buffer.c28
-rw-r--r--src/amd/vulkan/radv_meta_bufimage.c56
-rw-r--r--src/amd/vulkan/radv_meta_clear.c8
-rw-r--r--src/amd/vulkan/radv_meta_resolve.c2
-rw-r--r--src/amd/vulkan/radv_meta_resolve_cs.c14
-rw-r--r--src/amd/vulkan/radv_meta_resolve_fs.c4
-rw-r--r--src/amd/vulkan/radv_pipeline.c24
-rw-r--r--src/amd/vulkan/radv_query.c28
-rw-r--r--src/compiler/glsl/glsl_to_nir.cpp12
-rw-r--r--src/compiler/nir/nir.c3
-rw-r--r--src/compiler/nir/nir.h2
-rw-r--r--src/compiler/nir/nir_clone.c8
-rw-r--r--src/compiler/nir/nir_gather_info.c46
-rw-r--r--src/compiler/nir/nir_lower_bitmap.c2
-rw-r--r--src/compiler/nir/nir_lower_clip.c2
-rw-r--r--src/compiler/nir/nir_lower_clip_cull_distance_arrays.c4
-rw-r--r--src/compiler/nir/nir_lower_gs_intrinsics.c2
-rw-r--r--src/compiler/nir/nir_lower_system_values.c10
-rw-r--r--src/compiler/nir/nir_print.c18
-rw-r--r--src/compiler/nir/nir_sweep.c14
-rw-r--r--src/compiler/spirv/spirv_to_nir.c48
-rw-r--r--src/compiler/spirv/vtn_variables.c14
-rw-r--r--src/gallium/auxiliary/nir/tgsi_to_nir.c14
-rw-r--r--src/gallium/drivers/freedreno/ir3/ir3_compiler_nir.c4
-rw-r--r--src/gallium/drivers/freedreno/ir3/ir3_shader.c2
-rw-r--r--src/gallium/drivers/vc4/vc4_nir_lower_blend.c2
-rw-r--r--src/gallium/drivers/vc4/vc4_program.c6
-rw-r--r--src/intel/blorp/blorp.c6
-rw-r--r--src/intel/blorp/blorp_blit.c2
-rw-r--r--src/intel/blorp/blorp_clear.c4
-rw-r--r--src/intel/compiler/brw_fs.cpp86
-rw-r--r--src/intel/compiler/brw_fs_nir.cpp18
-rw-r--r--src/intel/compiler/brw_fs_visitor.cpp10
-rw-r--r--src/intel/compiler/brw_nir.c8
-rw-r--r--src/intel/compiler/brw_nir_intrinsics.c4
-rw-r--r--src/intel/compiler/brw_shader.cpp28
-rw-r--r--src/intel/compiler/brw_vec4.cpp20
-rw-r--r--src/intel/compiler/brw_vec4_generator.cpp4
-rw-r--r--src/intel/compiler/brw_vec4_gs_visitor.cpp36
-rw-r--r--src/intel/compiler/brw_vec4_nir.cpp8
-rw-r--r--src/intel/compiler/brw_vec4_tcs.cpp28
-rw-r--r--src/intel/compiler/brw_wm_iz.cpp2
-rw-r--r--src/intel/compiler/gen6_gs_visitor.cpp12
-rw-r--r--src/intel/vulkan/anv_pipeline.c34
-rw-r--r--src/mesa/drivers/dri/i965/brw_link.cpp2
-rw-r--r--src/mesa/drivers/dri/i965/brw_program.c16
-rw-r--r--src/mesa/drivers/dri/i965/brw_tcs.c14
-rw-r--r--src/mesa/drivers/dri/i965/brw_tes.c8
-rw-r--r--src/mesa/drivers/dri/i965/brw_vs.c2
-rw-r--r--src/mesa/drivers/dri/i965/brw_wm.c6
-rw-r--r--src/mesa/drivers/dri/i965/brw_wm_surface_state.c12
-rw-r--r--src/mesa/program/prog_to_nir.c26
56 files changed, 410 insertions, 421 deletions
diff --git a/src/amd/common/ac_nir_to_llvm.c b/src/amd/common/ac_nir_to_llvm.c
index 535b1523366..8ae0a75fd04 100644
--- a/src/amd/common/ac_nir_to_llvm.c
+++ b/src/amd/common/ac_nir_to_llvm.c
@@ -5924,9 +5924,9 @@ LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm,
} else if (nir->stage == MESA_SHADER_GEOMETRY) {
ctx.gs_next_vertex = ac_build_alloca(&ctx, ctx.i32, "gs_next_vertex");
- ctx.gs_max_out_vertices = nir->info->gs.vertices_out;
+ ctx.gs_max_out_vertices = nir->info.gs.vertices_out;
} else if (nir->stage == MESA_SHADER_TESS_EVAL) {
- ctx.tes_primitive_mode = nir->info->tess.primitive_mode;
+ ctx.tes_primitive_mode = nir->info.tess.primitive_mode;
}
ac_setup_rings(&ctx);
@@ -5937,8 +5937,8 @@ LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm,
if (nir->stage == MESA_SHADER_FRAGMENT)
handle_fs_inputs_pre(&ctx, nir);
- ctx.num_output_clips = nir->info->clip_distance_array_size;
- ctx.num_output_culls = nir->info->cull_distance_array_size;
+ ctx.num_output_clips = nir->info.clip_distance_array_size;
+ ctx.num_output_culls = nir->info.cull_distance_array_size;
nir_foreach_variable(variable, &nir->outputs)
handle_shader_output_decl(&ctx, variable);
@@ -5969,7 +5969,7 @@ LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm,
unsigned addclip = ctx.num_output_clips + ctx.num_output_culls > 4;
shader_info->gs.gsvs_vertex_size = (util_bitcount64(ctx.output_mask) + addclip) * 16;
shader_info->gs.max_gsvs_emit_size = shader_info->gs.gsvs_vertex_size *
- nir->info->gs.vertices_out;
+ nir->info.gs.vertices_out;
} else if (nir->stage == MESA_SHADER_TESS_CTRL) {
shader_info->tcs.outputs_written = ctx.tess_outputs_written;
shader_info->tcs.patch_outputs_written = ctx.tess_patch_outputs_written;
@@ -6122,26 +6122,26 @@ void ac_compile_nir_shader(LLVMTargetMachineRef tm,
switch (nir->stage) {
case MESA_SHADER_COMPUTE:
for (int i = 0; i < 3; ++i)
- shader_info->cs.block_size[i] = nir->info->cs.local_size[i];
+ shader_info->cs.block_size[i] = nir->info.cs.local_size[i];
break;
case MESA_SHADER_FRAGMENT:
- shader_info->fs.early_fragment_test = nir->info->fs.early_fragment_tests;
+ shader_info->fs.early_fragment_test = nir->info.fs.early_fragment_tests;
break;
case MESA_SHADER_GEOMETRY:
- shader_info->gs.vertices_in = nir->info->gs.vertices_in;
- shader_info->gs.vertices_out = nir->info->gs.vertices_out;
- shader_info->gs.output_prim = nir->info->gs.output_primitive;
- shader_info->gs.invocations = nir->info->gs.invocations;
+ shader_info->gs.vertices_in = nir->info.gs.vertices_in;
+ shader_info->gs.vertices_out = nir->info.gs.vertices_out;
+ shader_info->gs.output_prim = nir->info.gs.output_primitive;
+ shader_info->gs.invocations = nir->info.gs.invocations;
break;
case MESA_SHADER_TESS_EVAL:
- shader_info->tes.primitive_mode = nir->info->tess.primitive_mode;
- shader_info->tes.spacing = nir->info->tess.spacing;
- shader_info->tes.ccw = nir->info->tess.ccw;
- shader_info->tes.point_mode = nir->info->tess.point_mode;
+ shader_info->tes.primitive_mode = nir->info.tess.primitive_mode;
+ shader_info->tes.spacing = nir->info.tess.spacing;
+ shader_info->tes.ccw = nir->info.tess.ccw;
+ shader_info->tes.point_mode = nir->info.tess.point_mode;
shader_info->tes.as_es = options->key.tes.as_es;
break;
case MESA_SHADER_TESS_CTRL:
- shader_info->tcs.tcs_vertices_out = nir->info->tess.tcs_vertices_out;
+ shader_info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
break;
case MESA_SHADER_VERTEX:
shader_info->vs.as_es = options->key.vs.as_es;
@@ -6231,11 +6231,11 @@ void ac_create_gs_copy_shader(LLVMTargetMachineRef tm,
create_function(&ctx);
- ctx.gs_max_out_vertices = geom_shader->info->gs.vertices_out;
+ ctx.gs_max_out_vertices = geom_shader->info.gs.vertices_out;
ac_setup_rings(&ctx);
- ctx.num_output_clips = geom_shader->info->clip_distance_array_size;
- ctx.num_output_culls = geom_shader->info->cull_distance_array_size;
+ ctx.num_output_clips = geom_shader->info.clip_distance_array_size;
+ ctx.num_output_culls = geom_shader->info.cull_distance_array_size;
nir_foreach_variable(variable, &geom_shader->outputs)
handle_shader_output_decl(&ctx, variable);
diff --git a/src/amd/vulkan/radv_meta.c b/src/amd/vulkan/radv_meta.c
index fb83576fe64..973316103aa 100644
--- a/src/amd/vulkan/radv_meta.c
+++ b/src/amd/vulkan/radv_meta.c
@@ -452,7 +452,7 @@ radv_meta_build_nir_vs_generate_vertices(void)
nir_variable *v_position;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, "meta_vs_gen_verts");
+ b.shader->info.name = ralloc_strdup(b.shader, "meta_vs_gen_verts");
nir_ssa_def *outvec = radv_meta_gen_rect_vertices(&b);
@@ -471,7 +471,7 @@ radv_meta_build_nir_fs_noop(void)
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
- b.shader->info->name = ralloc_asprintf(b.shader,
+ b.shader->info.name = ralloc_asprintf(b.shader,
"meta_noop_fs");
return b.shader;
diff --git a/src/amd/vulkan/radv_meta_blit.c b/src/amd/vulkan/radv_meta_blit.c
index c04a611f4a8..439309903d0 100644
--- a/src/amd/vulkan/radv_meta_blit.c
+++ b/src/amd/vulkan/radv_meta_blit.c
@@ -38,7 +38,7 @@ build_nir_vertex_shader(void)
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, "meta_blit_vs");
+ b.shader->info.name = ralloc_strdup(b.shader, "meta_blit_vs");
nir_variable *pos_out = nir_variable_create(b.shader, nir_var_shader_out,
vec4, "gl_Position");
@@ -109,7 +109,7 @@ build_nir_copy_fragment_shader(enum glsl_sampler_dim tex_dim)
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
sprintf(shader_name, "meta_blit_fs.%d", tex_dim);
- b.shader->info->name = ralloc_strdup(b.shader, shader_name);
+ b.shader->info.name = ralloc_strdup(b.shader, shader_name);
nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vec4, "v_tex_pos");
@@ -163,7 +163,7 @@ build_nir_copy_fragment_shader_depth(enum glsl_sampler_dim tex_dim)
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
sprintf(shader_name, "meta_blit_depth_fs.%d", tex_dim);
- b.shader->info->name = ralloc_strdup(b.shader, shader_name);
+ b.shader->info.name = ralloc_strdup(b.shader, shader_name);
nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vec4, "v_tex_pos");
@@ -217,7 +217,7 @@ build_nir_copy_fragment_shader_stencil(enum glsl_sampler_dim tex_dim)
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
sprintf(shader_name, "meta_blit_stencil_fs.%d", tex_dim);
- b.shader->info->name = ralloc_strdup(b.shader, shader_name);
+ b.shader->info.name = ralloc_strdup(b.shader, shader_name);
nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vec4, "v_tex_pos");
diff --git a/src/amd/vulkan/radv_meta_blit2d.c b/src/amd/vulkan/radv_meta_blit2d.c
index ec4e52fd31e..aae35d2a79e 100644
--- a/src/amd/vulkan/radv_meta_blit2d.c
+++ b/src/amd/vulkan/radv_meta_blit2d.c
@@ -385,7 +385,7 @@ build_nir_vertex_shader(void)
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, "meta_blit2d_vs");
+ b.shader->info.name = ralloc_strdup(b.shader, "meta_blit2d_vs");
nir_variable *pos_out = nir_variable_create(b.shader, nir_var_shader_out,
vec4, "gl_Position");
@@ -527,7 +527,7 @@ build_nir_copy_fragment_shader(struct radv_device *device,
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, name);
+ b.shader->info.name = ralloc_strdup(b.shader, name);
nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vec2, "v_tex_pos");
@@ -556,7 +556,7 @@ build_nir_copy_fragment_shader_depth(struct radv_device *device,
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, name);
+ b.shader->info.name = ralloc_strdup(b.shader, name);
nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vec2, "v_tex_pos");
@@ -585,7 +585,7 @@ build_nir_copy_fragment_shader_stencil(struct radv_device *device,
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, name);
+ b.shader->info.name = ralloc_strdup(b.shader, name);
nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vec2, "v_tex_pos");
diff --git a/src/amd/vulkan/radv_meta_buffer.c b/src/amd/vulkan/radv_meta_buffer.c
index 68de81e095f..a8a41e05fa3 100644
--- a/src/amd/vulkan/radv_meta_buffer.c
+++ b/src/amd/vulkan/radv_meta_buffer.c
@@ -10,17 +10,17 @@ build_buffer_fill_shader(struct radv_device *dev)
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, "meta_buffer_fill");
- b.shader->info->cs.local_size[0] = 64;
- b.shader->info->cs.local_size[1] = 1;
- b.shader->info->cs.local_size[2] = 1;
+ b.shader->info.name = ralloc_strdup(b.shader, "meta_buffer_fill");
+ b.shader->info.cs.local_size[0] = 64;
+ b.shader->info.cs.local_size[1] = 1;
+ b.shader->info.cs.local_size[2] = 1;
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
nir_ssa_def *block_size = nir_imm_ivec4(&b,
- b.shader->info->cs.local_size[0],
- b.shader->info->cs.local_size[1],
- b.shader->info->cs.local_size[2], 0);
+ b.shader->info.cs.local_size[0],
+ b.shader->info.cs.local_size[1],
+ b.shader->info.cs.local_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
@@ -62,17 +62,17 @@ build_buffer_copy_shader(struct radv_device *dev)
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, "meta_buffer_copy");
- b.shader->info->cs.local_size[0] = 64;
- b.shader->info->cs.local_size[1] = 1;
- b.shader->info->cs.local_size[2] = 1;
+ b.shader->info.name = ralloc_strdup(b.shader, "meta_buffer_copy");
+ b.shader->info.cs.local_size[0] = 64;
+ b.shader->info.cs.local_size[1] = 1;
+ b.shader->info.cs.local_size[2] = 1;
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
nir_ssa_def *block_size = nir_imm_ivec4(&b,
- b.shader->info->cs.local_size[0],
- b.shader->info->cs.local_size[1],
- b.shader->info->cs.local_size[2], 0);
+ b.shader->info.cs.local_size[0],
+ b.shader->info.cs.local_size[1],
+ b.shader->info.cs.local_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
diff --git a/src/amd/vulkan/radv_meta_bufimage.c b/src/amd/vulkan/radv_meta_bufimage.c
index a40d4b430c1..1588d6b6e06 100644
--- a/src/amd/vulkan/radv_meta_bufimage.c
+++ b/src/amd/vulkan/radv_meta_bufimage.c
@@ -42,10 +42,10 @@ build_nir_itob_compute_shader(struct radv_device *dev)
false,
GLSL_TYPE_FLOAT);
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, "meta_itob_cs");
- b.shader->info->cs.local_size[0] = 16;
- b.shader->info->cs.local_size[1] = 16;
- b.shader->info->cs.local_size[2] = 1;
+ b.shader->info.name = ralloc_strdup(b.shader, "meta_itob_cs");
+ b.shader->info.cs.local_size[0] = 16;
+ b.shader->info.cs.local_size[1] = 16;
+ b.shader->info.cs.local_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
sampler_type, "s_tex");
input_img->data.descriptor_set = 0;
@@ -59,9 +59,9 @@ build_nir_itob_compute_shader(struct radv_device *dev)
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
nir_ssa_def *block_size = nir_imm_ivec4(&b,
- b.shader->info->cs.local_size[0],
- b.shader->info->cs.local_size[1],
- b.shader->info->cs.local_size[2], 0);
+ b.shader->info.cs.local_size[0],
+ b.shader->info.cs.local_size[1],
+ b.shader->info.cs.local_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
@@ -244,10 +244,10 @@ build_nir_btoi_compute_shader(struct radv_device *dev)
false,
GLSL_TYPE_FLOAT);
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, "meta_btoi_cs");
- b.shader->info->cs.local_size[0] = 16;
- b.shader->info->cs.local_size[1] = 16;
- b.shader->info->cs.local_size[2] = 1;
+ b.shader->info.name = ralloc_strdup(b.shader, "meta_btoi_cs");
+ b.shader->info.cs.local_size[0] = 16;
+ b.shader->info.cs.local_size[1] = 16;
+ b.shader->info.cs.local_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
buf_type, "s_tex");
input_img->data.descriptor_set = 0;
@@ -261,9 +261,9 @@ build_nir_btoi_compute_shader(struct radv_device *dev)
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
nir_ssa_def *block_size = nir_imm_ivec4(&b,
- b.shader->info->cs.local_size[0],
- b.shader->info->cs.local_size[1],
- b.shader->info->cs.local_size[2], 0);
+ b.shader->info.cs.local_size[0],
+ b.shader->info.cs.local_size[1],
+ b.shader->info.cs.local_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
@@ -444,10 +444,10 @@ build_nir_itoi_compute_shader(struct radv_device *dev)
false,
GLSL_TYPE_FLOAT);
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, "meta_itoi_cs");
- b.shader->info->cs.local_size[0] = 16;
- b.shader->info->cs.local_size[1] = 16;
- b.shader->info->cs.local_size[2] = 1;
+ b.shader->info.name = ralloc_strdup(b.shader, "meta_itoi_cs");
+ b.shader->info.cs.local_size[0] = 16;
+ b.shader->info.cs.local_size[1] = 16;
+ b.shader->info.cs.local_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
buf_type, "s_tex");
input_img->data.descriptor_set = 0;
@@ -461,9 +461,9 @@ build_nir_itoi_compute_shader(struct radv_device *dev)
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
nir_ssa_def *block_size = nir_imm_ivec4(&b,
- b.shader->info->cs.local_size[0],
- b.shader->info->cs.local_size[1],
- b.shader->info->cs.local_size[2], 0);
+ b.shader->info.cs.local_size[0],
+ b.shader->info.cs.local_size[1],
+ b.shader->info.cs.local_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
@@ -634,10 +634,10 @@ build_nir_cleari_compute_shader(struct radv_device *dev)
false,
GLSL_TYPE_FLOAT);
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, "meta_cleari_cs");
- b.shader->info->cs.local_size[0] = 16;
- b.shader->info->cs.local_size[1] = 16;
- b.shader->info->cs.local_size[2] = 1;
+ b.shader->info.name = ralloc_strdup(b.shader, "meta_cleari_cs");
+ b.shader->info.cs.local_size[0] = 16;
+ b.shader->info.cs.local_size[1] = 16;
+ b.shader->info.cs.local_size[2] = 1;
nir_variable *output_img = nir_variable_create(b.shader, nir_var_uniform,
img_type, "out_img");
@@ -647,9 +647,9 @@ build_nir_cleari_compute_shader(struct radv_device *dev)
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
nir_ssa_def *block_size = nir_imm_ivec4(&b,
- b.shader->info->cs.local_size[0],
- b.shader->info->cs.local_size[1],
- b.shader->info->cs.local_size[2], 0);
+ b.shader->info.cs.local_size[0],
+ b.shader->info.cs.local_size[1],
+ b.shader->info.cs.local_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
diff --git a/src/amd/vulkan/radv_meta_clear.c b/src/amd/vulkan/radv_meta_clear.c
index adabc1b9212..57b812d2b14 100644
--- a/src/amd/vulkan/radv_meta_clear.c
+++ b/src/amd/vulkan/radv_meta_clear.c
@@ -45,8 +45,8 @@ build_color_shaders(struct nir_shader **out_vs,
nir_builder_init_simple_shader(&vs_b, NULL, MESA_SHADER_VERTEX, NULL);
nir_builder_init_simple_shader(&fs_b, NULL, MESA_SHADER_FRAGMENT, NULL);
- vs_b.shader->info->name = ralloc_strdup(vs_b.shader, "meta_clear_color_vs");
- fs_b.shader->info->name = ralloc_strdup(fs_b.shader, "meta_clear_color_fs");
+ vs_b.shader->info.name = ralloc_strdup(vs_b.shader, "meta_clear_color_vs");
+ fs_b.shader->info.name = ralloc_strdup(fs_b.shader, "meta_clear_color_fs");
const struct glsl_type *position_type = glsl_vec4_type();
const struct glsl_type *color_type = glsl_vec4_type();
@@ -412,8 +412,8 @@ build_depthstencil_shader(struct nir_shader **out_vs, struct nir_shader **out_fs
nir_builder_init_simple_shader(&vs_b, NULL, MESA_SHADER_VERTEX, NULL);
nir_builder_init_simple_shader(&fs_b, NULL, MESA_SHADER_FRAGMENT, NULL);
- vs_b.shader->info->name = ralloc_strdup(vs_b.shader, "meta_clear_depthstencil_vs");
- fs_b.shader->info->name = ralloc_strdup(fs_b.shader, "meta_clear_depthstencil_fs");
+ vs_b.shader->info.name = ralloc_strdup(vs_b.shader, "meta_clear_depthstencil_vs");
+ fs_b.shader->info.name = ralloc_strdup(fs_b.shader, "meta_clear_depthstencil_fs");
const struct glsl_type *position_out_type = glsl_vec4_type();
nir_variable *vs_out_pos =
diff --git a/src/amd/vulkan/radv_meta_resolve.c b/src/amd/vulkan/radv_meta_resolve.c
index 6f50f198b4d..fca6005ea17 100644
--- a/src/amd/vulkan/radv_meta_resolve.c
+++ b/src/amd/vulkan/radv_meta_resolve.c
@@ -38,7 +38,7 @@ build_nir_fs(void)
nir_variable *f_color; /* vec4, fragment output color */
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
- b.shader->info->name = ralloc_asprintf(b.shader,
+ b.shader->info.name = ralloc_asprintf(b.shader,
"meta_resolve_fs");
f_color = nir_variable_create(b.shader, nir_var_shader_out, vec4,
diff --git a/src/amd/vulkan/radv_meta_resolve_cs.c b/src/amd/vulkan/radv_meta_resolve_cs.c
index fdbf51ab99a..6166e0cb8c3 100644
--- a/src/amd/vulkan/radv_meta_resolve_cs.c
+++ b/src/amd/vulkan/radv_meta_resolve_cs.c
@@ -46,10 +46,10 @@ build_resolve_compute_shader(struct radv_device *dev, bool is_integer, bool is_s
GLSL_TYPE_FLOAT);
snprintf(name, 64, "meta_resolve_cs-%d-%s", samples, is_integer ? "int" : (is_srgb ? "srgb" : "float"));
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, name);
- b.shader->info->cs.local_size[0] = 16;
- b.shader->info->cs.local_size[1] = 16;
- b.shader->info->cs.local_size[2] = 1;
+ b.shader->info.name = ralloc_strdup(b.shader, name);
+ b.shader->info.cs.local_size[0] = 16;
+ b.shader->info.cs.local_size[1] = 16;
+ b.shader->info.cs.local_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
sampler_type, "s_tex");
@@ -63,9 +63,9 @@ build_resolve_compute_shader(struct radv_device *dev, bool is_integer, bool is_s
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
nir_ssa_def *block_size = nir_imm_ivec4(&b,
- b.shader->info->cs.local_size[0],
- b.shader->info->cs.local_size[1],
- b.shader->info->cs.local_size[2], 0);
+ b.shader->info.cs.local_size[0],
+ b.shader->info.cs.local_size[1],
+ b.shader->info.cs.local_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
diff --git a/src/amd/vulkan/radv_meta_resolve_fs.c b/src/amd/vulkan/radv_meta_resolve_fs.c
index c0345cf22fd..e583808b2d1 100644
--- a/src/amd/vulkan/radv_meta_resolve_fs.c
+++ b/src/amd/vulkan/radv_meta_resolve_fs.c
@@ -38,7 +38,7 @@ build_nir_vertex_shader(void)
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, "meta_resolve_vs");
+ b.shader->info.name = ralloc_strdup(b.shader, "meta_resolve_vs");
nir_variable *pos_out = nir_variable_create(b.shader, nir_var_shader_out,
vec4, "gl_Position");
@@ -64,7 +64,7 @@ build_resolve_fragment_shader(struct radv_device *dev, bool is_integer, bool is_
snprintf(name, 64, "meta_resolve_fs-%d-%s", samples, is_integer ? "int" : (is_srgb ? "srgb" : "float"));
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, name);
+ b.shader->info.name = ralloc_strdup(b.shader, name);
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
sampler_type, "s_tex");
diff --git a/src/amd/vulkan/radv_pipeline.c b/src/amd/vulkan/radv_pipeline.c
index 2992df6ed43..3282652ddd4 100644
--- a/src/amd/vulkan/radv_pipeline.c
+++ b/src/amd/vulkan/radv_pipeline.c
@@ -264,7 +264,7 @@ radv_shader_compile_to_nir(struct radv_device *device,
}
/* Vulkan uses the separate-shader linking model */
- nir->info->separate_shader = true;
+ nir->info.separate_shader = true;
nir_shader_gather_info(nir, entry_point->impl);
@@ -539,8 +539,8 @@ radv_pipeline_compile(struct radv_pipeline *pipeline,
bool dump = (pipeline->device->debug_flags & RADV_DEBUG_DUMP_SHADERS);
if (module->nir)
- _mesa_sha1_compute(module->nir->info->name,
- strlen(module->nir->info->name),
+ _mesa_sha1_compute(module->nir->info.name,
+ strlen(module->nir->info.name),
module->sha1);
radv_hash_shader(sha1, module, entrypoint, spec_info, layout, key, 0);
@@ -644,8 +644,8 @@ radv_tess_pipeline_compile(struct radv_pipeline *pipeline,
bool dump = (pipeline->device->debug_flags & RADV_DEBUG_DUMP_SHADERS);
if (tes_module->nir)
- _mesa_sha1_compute(tes_module->nir->info->name,
- strlen(tes_module->nir->info->name),
+ _mesa_sha1_compute(tes_module->nir->info.name,
+ strlen(tes_module->nir->info.name),
tes_module->sha1);
radv_hash_shader(tes_sha1, tes_module, tes_entrypoint, tes_spec_info, layout, &tes_key, 0);
@@ -657,8 +657,8 @@ radv_tess_pipeline_compile(struct radv_pipeline *pipeline,
tcs_key = radv_compute_tcs_key(tes_variant->info.tes.primitive_mode, input_vertices);
if (tcs_module->nir)
- _mesa_sha1_compute(tcs_module->nir->info->name,
- strlen(tcs_module->nir->info->name),
+ _mesa_sha1_compute(tcs_module->nir->info.name,
+ strlen(tcs_module->nir->info.name),
tcs_module->sha1);
radv_hash_shader(tcs_sha1, tcs_module, tcs_entrypoint, tcs_spec_info, layout, &tcs_key, 0);
@@ -687,16 +687,16 @@ radv_tess_pipeline_compile(struct radv_pipeline *pipeline,
return;
nir_lower_tes_patch_vertices(tes_nir,
- tcs_nir->info->tess.tcs_vertices_out);
+ tcs_nir->info.tess.tcs_vertices_out);
tes_variant = radv_shader_variant_create(pipeline->device, tes_nir,
layout, &tes_key, &tes_code,
&tes_code_size, dump);
- tcs_key = radv_compute_tcs_key(tes_nir->info->tess.primitive_mode, input_vertices);
+ tcs_key = radv_compute_tcs_key(tes_nir->info.tess.primitive_mode, input_vertices);
if (tcs_module->nir)
- _mesa_sha1_compute(tcs_module->nir->info->name,
- strlen(tcs_module->nir->info->name),
+ _mesa_sha1_compute(tcs_module->nir->info.name,
+ strlen(tcs_module->nir->info.name),
tcs_module->sha1);
radv_hash_shader(tcs_sha1, tcs_module, tcs_entrypoint, tcs_spec_info, layout, &tcs_key, 0);
@@ -2041,7 +2041,7 @@ radv_pipeline_init(struct radv_pipeline *pipeline,
if (!modules[MESA_SHADER_FRAGMENT]) {
nir_builder fs_b;
nir_builder_init_simple_shader(&fs_b, NULL, MESA_SHADER_FRAGMENT, NULL);
- fs_b.shader->info->name = ralloc_strdup(fs_b.shader, "noop_fs");
+ fs_b.shader->info.name = ralloc_strdup(fs_b.shader, "noop_fs");
fs_m.nir = fs_b.shader;
modules[MESA_SHADER_FRAGMENT] = &fs_m;
}
diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c
index 8db04d465cd..88d8ccb050c 100644
--- a/src/amd/vulkan/radv_query.c
+++ b/src/amd/vulkan/radv_query.c
@@ -127,10 +127,10 @@ build_occlusion_query_shader(struct radv_device *device) {
*/
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, "occlusion_query");
- b.shader->info->cs.local_size[0] = 64;
- b.shader->info->cs.local_size[1] = 1;
- b.shader->info->cs.local_size[2] = 1;
+ b.shader->info.name = ralloc_strdup(b.shader, "occlusion_query");
+ b.shader->info.cs.local_size[0] = 64;
+ b.shader->info.cs.local_size[1] = 1;
+ b.shader->info.cs.local_size[2] = 1;
nir_variable *result = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "result");
nir_variable *outer_counter = nir_local_variable_create(b.impl, glsl_int_type(), "outer_counter");
@@ -160,9 +160,9 @@ build_occlusion_query_shader(struct radv_device *device) {
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
nir_ssa_def *block_size = nir_imm_ivec4(&b,
- b.shader->info->cs.local_size[0],
- b.shader->info->cs.local_size[1],
- b.shader->info->cs.local_size[2], 0);
+ b.shader->info.cs.local_size[0],
+ b.shader->info.cs.local_size[1],
+ b.shader->info.cs.local_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
global_id = nir_channel(&b, global_id, 0); // We only care about x here.
@@ -322,10 +322,10 @@ build_pipeline_statistics_query_shader(struct radv_device *device) {
*/
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, "pipeline_statistics_query");
- b.shader->info->cs.local_size[0] = 64;
- b.shader->info->cs.local_size[1] = 1;
- b.shader->info->cs.local_size[2] = 1;
+ b.shader->info.name = ralloc_strdup(b.shader, "pipeline_statistics_query");
+ b.shader->info.cs.local_size[0] = 64;
+ b.shader->info.cs.local_size[1] = 1;
+ b.shader->info.cs.local_size[2] = 1;
nir_variable *output_offset = nir_local_variable_create(b.impl, glsl_int_type(), "output_offset");
@@ -352,9 +352,9 @@ build_pipeline_statistics_query_shader(struct radv_device *device) {
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
nir_ssa_def *block_size = nir_imm_ivec4(&b,
- b.shader->info->cs.local_size[0],
- b.shader->info->cs.local_size[1],
- b.shader->info->cs.local_size[2], 0);
+ b.shader->info.cs.local_size[0],
+ b.shader->info.cs.local_size[1],
+ b.shader->info.cs.local_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
global_id = nir_channel(&b, global_id, 0); // We only care about x here.
diff --git a/src/compiler/glsl/glsl_to_nir.cpp b/src/compiler/glsl/glsl_to_nir.cpp
index 307276555ee..6513484fa01 100644
--- a/src/compiler/glsl/glsl_to_nir.cpp
+++ b/src/compiler/glsl/glsl_to_nir.cpp
@@ -133,13 +133,13 @@ static void
nir_remap_attributes(nir_shader *shader)
{
nir_foreach_variable(var, &shader->inputs) {
- var->data.location += _mesa_bitcount_64(shader->info->double_inputs_read &
+ var->data.location += _mesa_bitcount_64(shader->info.double_inputs_read &
BITFIELD64_MASK(var->data.location));
}
/* Once the remap is done, reset double_inputs_read, so later it will have
* which location/slots are doubles */
- shader->info->double_inputs_read = 0;
+ shader->info.double_inputs_read = 0;
}
nir_shader *
@@ -166,10 +166,10 @@ glsl_to_nir(const struct gl_shader_program *shader_prog,
if (shader->stage == MESA_SHADER_VERTEX)
nir_remap_attributes(shader);
- shader->info->name = ralloc_asprintf(shader, "GLSL%d", shader_prog->Name);
+ shader->info.name = ralloc_asprintf(shader, "GLSL%d", shader_prog->Name);
if (shader_prog->Label)
- shader->info->label = ralloc_strdup(shader, shader_prog->Label);
- shader->info->has_transform_feedback_varyings =
+ shader->info.label = ralloc_strdup(shader, shader_prog->Label);
+ shader->info.has_transform_feedback_varyings =
shader_prog->TransformFeedback.NumVarying > 0;
return shader;
@@ -368,7 +368,7 @@ nir_visitor::visit(ir_variable *ir)
if (glsl_type_is_dual_slot(glsl_without_array(var->type))) {
for (uint i = 0; i < glsl_count_attribute_slots(var->type, true); i++) {
uint64_t bitfield = BITFIELD64_BIT(var->data.location + i);
- shader->info->double_inputs_read |= bitfield;
+ shader->info.double_inputs_read |= bitfield;
}
}
break;
diff --git a/src/compiler/nir/nir.c b/src/compiler/nir/nir.c
index 8f7ed8a50f6..491b908396c 100644
--- a/src/compiler/nir/nir.c
+++ b/src/compiler/nir/nir.c
@@ -44,7 +44,8 @@ nir_shader_create(void *mem_ctx,
shader->options = options;
- shader->info = si ? si : rzalloc(shader, shader_info);
+ if (si)
+ shader->info = *si;
exec_list_make_empty(&shader->functions);
exec_list_make_empty(&shader->registers);
diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h
index 2a9ab542f6e..3b827bf7fca 100644
--- a/src/compiler/nir/nir.h
+++ b/src/compiler/nir/nir.h
@@ -1863,7 +1863,7 @@ typedef struct nir_shader {
const struct nir_shader_compiler_options *options;
/** Various bits of compile-time information about a given shader */
- struct shader_info *info;
+ struct shader_info info;
/** list of global variables in the shader (nir_variable) */
struct exec_list globals;
diff --git a/src/compiler/nir/nir_clone.c b/src/compiler/nir/nir_clone.c
index e2204c4e72a..c13163f25c6 100644
--- a/src/compiler/nir/nir_clone.c
+++ b/src/compiler/nir/nir_clone.c
@@ -765,10 +765,10 @@ nir_shader_clone(void *mem_ctx, const nir_shader *s)
clone_reg_list(&state, &ns->registers, &s->registers);
ns->reg_alloc = s->reg_alloc;
- *ns->info = *s->info;
- ns->info->name = ralloc_strdup(ns, ns->info->name);
- if (ns->info->label)
- ns->info->label = ralloc_strdup(ns, ns->info->label);
+ ns->info = s->info;
+ ns->info.name = ralloc_strdup(ns, ns->info.name);
+ if (ns->info.label)
+ ns->info.label = ralloc_strdup(ns, ns->info.label);
ns->num_inputs = s->num_inputs;
ns->num_uniforms = s->num_uniforms;
diff --git a/src/compiler/nir/nir_gather_info.c b/src/compiler/nir/nir_gather_info.c
index 0c70787252b..24ac74ee79d 100644
--- a/src/compiler/nir/nir_gather_info.c
+++ b/src/compiler/nir/nir_gather_info.c
@@ -49,23 +49,23 @@ set_io_mask(nir_shader *shader, nir_variable *var, int offset, int len)
if (var->data.mode == nir_var_shader_in) {
if (is_patch_generic)
- shader->info->patch_inputs_read |= bitfield;
+ shader->info.patch_inputs_read |= bitfield;
else
- shader->info->inputs_read |= bitfield;
+ shader->info.inputs_read |= bitfield;
if (shader->stage == MESA_SHADER_FRAGMENT) {
- shader->info->fs.uses_sample_qualifier |= var->data.sample;
+ shader->info.fs.uses_sample_qualifier |= var->data.sample;
}
} else {
assert(var->data.mode == nir_var_shader_out);
if (is_patch_generic) {
- shader->info->patch_outputs_written |= bitfield;
+ shader->info.patch_outputs_written |= bitfield;
} else if (!var->data.read_only) {
- shader->info->outputs_written |= bitfield;
+ shader->info.outputs_written |= bitfield;
}
if (var->data.fb_fetch_output)
- shader->info->outputs_read |= bitfield;
+ shader->info.outputs_read |= bitfield;
}
}
}
@@ -197,7 +197,7 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader)
case nir_intrinsic_discard:
case nir_intrinsic_discard_if:
assert(shader->stage == MESA_SHADER_FRAGMENT);
- shader->info->fs.uses_discard = true;
+ shader->info.fs.uses_discard = true;
break;
case nir_intrinsic_interp_var_at_centroid:
@@ -219,7 +219,7 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader)
glsl_type_is_dual_slot(glsl_without_array(var->type))) {
for (uint i = 0; i < glsl_count_attribute_slots(var->type, false); i++) {
int idx = var->data.location + i;
- shader->info->double_inputs_read |= BITFIELD64_BIT(idx);
+ shader->info.double_inputs_read |= BITFIELD64_BIT(idx);
}
}
}
@@ -245,14 +245,14 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader)
case nir_intrinsic_load_tess_coord:
case nir_intrinsic_load_tess_level_outer:
case nir_intrinsic_load_tess_level_inner:
- shader->info->system_values_read |=
+ shader->info.system_values_read |=
(1ull << nir_system_value_from_intrinsic(instr->intrinsic));
break;
case nir_intrinsic_end_primitive:
case nir_intrinsic_end_primitive_with_counter:
assert(shader->stage == MESA_SHADER_GEOMETRY);
- shader->info->gs.uses_end_primitive = 1;
+ shader->info.gs.uses_end_primitive = 1;
break;
default:
@@ -264,7 +264,7 @@ static void
gather_tex_info(nir_tex_instr *instr, nir_shader *shader)
{
if (instr->op == nir_texop_tg4)
- shader->info->uses_texture_gather = true;
+ shader->info.uses_texture_gather = true;
}
static void
@@ -290,8 +290,8 @@ gather_info_block(nir_block *block, nir_shader *shader)
void
nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint)
{
- shader->info->num_textures = 0;
- shader->info->num_images = 0;
+ shader->info.num_textures = 0;
+ shader->info.num_images = 0;
nir_foreach_variable(var, &shader->uniforms) {
const struct glsl_type *type = var->type;
unsigned count = 1;
@@ -301,21 +301,21 @@ nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint)
}
if (glsl_type_is_image(type)) {
- shader->info->num_images += count;
+ shader->info.num_images += count;
} else if (glsl_type_is_sampler(type)) {
- shader->info->num_textures += count;
+ shader->info.num_textures += count;
}
}
- shader->info->inputs_read = 0;
- shader->info->outputs_written = 0;
- shader->info->outputs_read = 0;
- shader->info->double_inputs_read = 0;
- shader->info->patch_inputs_read = 0;
- shader->info->patch_outputs_written = 0;
- shader->info->system_values_read = 0;
+ shader->info.inputs_read = 0;
+ shader->info.outputs_written = 0;
+ shader->info.outputs_read = 0;
+ shader->info.double_inputs_read = 0;
+ shader->info.patch_inputs_read = 0;
+ shader->info.patch_outputs_written = 0;
+ shader->info.system_values_read = 0;
if (shader->stage == MESA_SHADER_FRAGMENT) {
- shader->info->fs.uses_sample_qualifier = false;
+ shader->info.fs.uses_sample_qualifier = false;
}
nir_foreach_block(block, entrypoint) {
gather_info_block(block, shader);
diff --git a/src/compiler/nir/nir_lower_bitmap.c b/src/compiler/nir/nir_lower_bitmap.c
index a1b4a32a5d4..9d04ae79dd8 100644
--- a/src/compiler/nir/nir_lower_bitmap.c
+++ b/src/compiler/nir/nir_lower_bitmap.c
@@ -111,7 +111,7 @@ lower_bitmap(nir_shader *shader, nir_builder *b,
discard->src[0] = nir_src_for_ssa(cond);
nir_builder_instr_insert(b, &discard->instr);
- shader->info->fs.uses_discard = true;
+ shader->info.fs.uses_discard = true;
}
static void
diff --git a/src/compiler/nir/nir_lower_clip.c b/src/compiler/nir/nir_lower_clip.c
index 7bed46b1bfc..ea12f51a7bb 100644
--- a/src/compiler/nir/nir_lower_clip.c
+++ b/src/compiler/nir/nir_lower_clip.c
@@ -289,7 +289,7 @@ lower_clip_fs(nir_function_impl *impl, unsigned ucp_enables,
discard->src[0] = nir_src_for_ssa(cond);
nir_builder_instr_insert(&b, &discard->instr);
- b.shader->info->fs.uses_discard = true;
+ b.shader->info.fs.uses_discard = true;
}
}
diff --git a/src/compiler/nir/nir_lower_clip_cull_distance_arrays.c b/src/compiler/nir/nir_lower_clip_cull_distance_arrays.c
index 6705a3c4597..ea23a604ed1 100644
--- a/src/compiler/nir/nir_lower_clip_cull_distance_arrays.c
+++ b/src/compiler/nir/nir_lower_clip_cull_distance_arrays.c
@@ -142,8 +142,8 @@ combine_clip_cull(nir_shader *nir,
const unsigned cull_array_size = get_unwrapped_array_length(nir, cull);
if (store_info) {
- nir->info->clip_distance_array_size = clip_array_size;
- nir->info->cull_distance_array_size = cull_array_size;
+ nir->info.clip_distance_array_size = clip_array_size;
+ nir->info.cull_distance_array_size = cull_array_size;
}
if (clip)
diff --git a/src/compiler/nir/nir_lower_gs_intrinsics.c b/src/compiler/nir/nir_lower_gs_intrinsics.c
index 68e20dd600f..4ddace9cf6a 100644
--- a/src/compiler/nir/nir_lower_gs_intrinsics.c
+++ b/src/compiler/nir/nir_lower_gs_intrinsics.c
@@ -77,7 +77,7 @@ rewrite_emit_vertex(nir_intrinsic_instr *intrin, struct state *state)
nir_ssa_def *count = nir_load_var(b, state->vertex_count_var);
nir_ssa_def *max_vertices =
- nir_imm_int(b, b->shader->info->gs.vertices_out);
+ nir_imm_int(b, b->shader->info.gs.vertices_out);
/* Create: if (vertex_count < max_vertices) and insert it.
*
diff --git a/src/compiler/nir/nir_lower_system_values.c b/src/compiler/nir/nir_lower_system_values.c
index 6ad5ad6940d..810100a0816 100644
--- a/src/compiler/nir/nir_lower_system_values.c
+++ b/src/compiler/nir/nir_lower_system_values.c
@@ -58,9 +58,9 @@ convert_block(nir_block *block, nir_builder *b)
*/
nir_const_value local_size;
- local_size.u32[0] = b->shader->info->cs.local_size[0];
- local_size.u32[1] = b->shader->info->cs.local_size[1];
- local_size.u32[2] = b->shader->info->cs.local_size[2];
+ local_size.u32[0] = b->shader->info.cs.local_size[0];
+ local_size.u32[1] = b->shader->info.cs.local_size[1];
+ local_size.u32[2] = b->shader->info.cs.local_size[2];
nir_ssa_def *group_id = nir_load_work_group_id(b);
nir_ssa_def *local_id = nir_load_local_invocation_id(b);
@@ -88,9 +88,9 @@ convert_block(nir_block *block, nir_builder *b)
nir_ssa_def *local_id = nir_load_local_invocation_id(b);
nir_ssa_def *size_x =
- nir_imm_int(b, b->shader->info->cs.local_size[0]);
+ nir_imm_int(b, b->shader->info.cs.local_size[0]);
nir_ssa_def *size_y =
- nir_imm_int(b, b->shader->info->cs.local_size[1]);
+ nir_imm_int(b, b->shader->info.cs.local_size[1]);
sysval = nir_imul(b, nir_channel(b, local_id, 2),
nir_imul(b, size_x, size_y));
diff --git a/src/compiler/nir/nir_print.c b/src/compiler/nir/nir_print.c
index dfdb5f36191..66c0669b594 100644
--- a/src/compiler/nir/nir_print.c
+++ b/src/compiler/nir/nir_print.c
@@ -1159,20 +1159,20 @@ nir_print_shader_annotated(nir_shader *shader, FILE *fp,
fprintf(fp, "shader: %s\n", gl_shader_stage_name(shader->stage));
- if (shader->info->name)
- fprintf(fp, "name: %s\n", shader->info->name);
+ if (shader->info.name)
+ fprintf(fp, "name: %s\n", shader->info.name);
- if (shader->info->label)
- fprintf(fp, "label: %s\n", shader->info->label);
+ if (shader->info.label)
+ fprintf(fp, "label: %s\n", shader->info.label);
switch (shader->stage) {
case MESA_SHADER_COMPUTE:
fprintf(fp, "local-size: %u, %u, %u%s\n",
- shader->info->cs.local_size[0],
- shader->info->cs.local_size[1],
- shader->info->cs.local_size[2],
- shader->info->cs.local_size_variable ? " (variable)" : "");
- fprintf(fp, "shared-size: %u\n", shader->info->cs.shared_size);
+ shader->info.cs.local_size[0],
+ shader->info.cs.local_size[1],
+ shader->info.cs.local_size[2],
+ shader->info.cs.local_size_variable ? " (variable)" : "");
+ fprintf(fp, "shared-size: %u\n", shader->info.cs.shared_size);
break;
default:
break;
diff --git a/src/compiler/nir/nir_sweep.c b/src/compiler/nir/nir_sweep.c
index e6ae298dd36..0f1debce3ad 100644
--- a/src/compiler/nir/nir_sweep.c
+++ b/src/compiler/nir/nir_sweep.c
@@ -150,20 +150,12 @@ nir_sweep(nir_shader *nir)
{
void *rubbish = ralloc_context(NULL);
- /* The shader may not own shader_info so check first */
- bool steal_info = false;
- if (nir == ralloc_parent(nir->info))
- steal_info = true;
-
/* First, move ownership of all the memory to a temporary context; assume dead. */
ralloc_adopt(rubbish, nir);
- if (steal_info)
- ralloc_steal(nir, nir->info);
-
- ralloc_steal(nir, (char *)nir->info->name);
- if (nir->info->label)
- ralloc_steal(nir, (char *)nir->info->label);
+ ralloc_steal(nir, (char *)nir->info.name);
+ if (nir->info.label)
+ ralloc_steal(nir, (char *)nir->info.label);
/* Variables and registers are not dead. Steal them back. */
steal_list(nir, nir_variable, &nir->uniforms);
diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c
index c120ad6d19d..0a5eb0eb6b0 100644
--- a/src/compiler/spirv/spirv_to_nir.c
+++ b/src/compiler/spirv/spirv_to_nir.c
@@ -1017,9 +1017,9 @@ handle_workgroup_size_decoration_cb(struct vtn_builder *b,
assert(val->const_type == glsl_vector_type(GLSL_TYPE_UINT, 3));
- b->shader->info->cs.local_size[0] = val->constant->values[0].u32[0];
- b->shader->info->cs.local_size[1] = val->constant->values[0].u32[1];
- b->shader->info->cs.local_size[2] = val->constant->values[0].u32[2];
+ b->shader->info.cs.local_size[0] = val->constant->values[0].u32[0];
+ b->shader->info.cs.local_size[1] = val->constant->values[0].u32[1];
+ b->shader->info.cs.local_size[2] = val->constant->values[0].u32[2];
}
static void
@@ -2808,36 +2808,36 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
case SpvExecutionModeEarlyFragmentTests:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info->fs.early_fragment_tests = true;
+ b->shader->info.fs.early_fragment_tests = true;
break;
case SpvExecutionModeInvocations:
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
- b->shader->info->gs.invocations = MAX2(1, mode->literals[0]);
+ b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
break;
case SpvExecutionModeDepthReplacing:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
break;
case SpvExecutionModeDepthGreater:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
break;
case SpvExecutionModeDepthLess:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
break;
case SpvExecutionModeDepthUnchanged:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
break;
case SpvExecutionModeLocalSize:
assert(b->shader->stage == MESA_SHADER_COMPUTE);
- b->shader->info->cs.local_size[0] = mode->literals[0];
- b->shader->info->cs.local_size[1] = mode->literals[1];
- b->shader->info->cs.local_size[2] = mode->literals[2];
+ b->shader->info.cs.local_size[0] = mode->literals[0];
+ b->shader->info.cs.local_size[1] = mode->literals[1];
+ b->shader->info.cs.local_size[2] = mode->literals[2];
break;
case SpvExecutionModeLocalSizeHint:
break; /* Nothing to do with this */
@@ -2845,10 +2845,10 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
case SpvExecutionModeOutputVertices:
if (b->shader->stage == MESA_SHADER_TESS_CTRL ||
b->shader->stage == MESA_SHADER_TESS_EVAL) {
- b->shader->info->tess.tcs_vertices_out = mode->literals[0];
+ b->shader->info.tess.tcs_vertices_out = mode->literals[0];
} else {
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
- b->shader->info->gs.vertices_out = mode->literals[0];
+ b->shader->info.gs.vertices_out = mode->literals[0];
}
break;
@@ -2861,11 +2861,11 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
case SpvExecutionModeIsolines:
if (b->shader->stage == MESA_SHADER_TESS_CTRL ||
b->shader->stage == MESA_SHADER_TESS_EVAL) {
- b->shader->info->tess.primitive_mode =
+ b->shader->info.tess.primitive_mode =
gl_primitive_from_spv_execution_mode(mode->exec_mode);
} else {
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
- b->shader->info->gs.vertices_in =
+ b->shader->info.gs.vertices_in =
vertices_in_from_spv_execution_mode(mode->exec_mode);
}
break;
@@ -2874,24 +2874,24 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
case SpvExecutionModeOutputLineStrip:
case SpvExecutionModeOutputTriangleStrip:
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
- b->shader->info->gs.output_primitive =
+ b->shader->info.gs.output_primitive =
gl_primitive_from_spv_execution_mode(mode->exec_mode);
break;
case SpvExecutionModeSpacingEqual:
assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
b->shader->stage == MESA_SHADER_TESS_EVAL);
- b->shader->info->tess.spacing = TESS_SPACING_EQUAL;
+ b->shader->info.tess.spacing = TESS_SPACING_EQUAL;
break;
case SpvExecutionModeSpacingFractionalEven:
assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
b->shader->stage == MESA_SHADER_TESS_EVAL);
- b->shader->info->tess.spacing = TESS_SPACING_FRACTIONAL_EVEN;
+ b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_EVEN;
break;
case SpvExecutionModeSpacingFractionalOdd:
assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
b->shader->stage == MESA_SHADER_TESS_EVAL);
- b->shader->info->tess.spacing = TESS_SPACING_FRACTIONAL_ODD;
+ b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_ODD;
break;
case SpvExecutionModeVertexOrderCw:
assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
@@ -2900,18 +2900,18 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
* but be the opposite of OpenGL. Currently NIR follows GL semantics,
* so we set it backwards here.
*/
- b->shader->info->tess.ccw = true;
+ b->shader->info.tess.ccw = true;
break;
case SpvExecutionModeVertexOrderCcw:
assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
b->shader->stage == MESA_SHADER_TESS_EVAL);
/* Backwards; see above */
- b->shader->info->tess.ccw = false;
+ b->shader->info.tess.ccw = false;
break;
case SpvExecutionModePointMode:
assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
b->shader->stage == MESA_SHADER_TESS_EVAL);
- b->shader->info->tess.point_mode = true;
+ b->shader->info.tess.point_mode = true;
break;
case SpvExecutionModePixelCenterInteger:
@@ -3287,7 +3287,7 @@ spirv_to_nir(const uint32_t *words, size_t word_count,
b->shader = nir_shader_create(NULL, stage, options, NULL);
/* Set shader info defaults */
- b->shader->info->gs.invocations = 1;
+ b->shader->info.gs.invocations = 1;
/* Parse execution modes */
vtn_foreach_execution_mode(b, b->entry_point,
diff --git a/src/compiler/spirv/vtn_variables.c b/src/compiler/spirv/vtn_variables.c
index 365e562386f..0f0cc1cd5c4 100644
--- a/src/compiler/spirv/vtn_variables.c
+++ b/src/compiler/spirv/vtn_variables.c
@@ -1094,9 +1094,9 @@ apply_var_decoration(struct vtn_builder *b, nir_variable *nir_var,
nir_var->data.read_only = true;
nir_constant *c = rzalloc(nir_var, nir_constant);
- c->values[0].u32[0] = b->shader->info->cs.local_size[0];
- c->values[0].u32[1] = b->shader->info->cs.local_size[1];
- c->values[0].u32[2] = b->shader->info->cs.local_size[2];
+ c->values[0].u32[0] = b->shader->info.cs.local_size[0];
+ c->values[0].u32[1] = b->shader->info.cs.local_size[1];
+ c->values[0].u32[2] = b->shader->info.cs.local_size[2];
nir_var->constant_initializer = c;
break;
}
@@ -1335,18 +1335,18 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
case SpvStorageClassUniformConstant:
if (without_array->block) {
var->mode = vtn_variable_mode_ubo;
- b->shader->info->num_ubos++;
+ b->shader->info.num_ubos++;
} else if (without_array->buffer_block) {
var->mode = vtn_variable_mode_ssbo;
- b->shader->info->num_ssbos++;
+ b->shader->info.num_ssbos++;
} else if (glsl_type_is_image(without_array->type)) {
var->mode = vtn_variable_mode_image;
nir_mode = nir_var_uniform;
- b->shader->info->num_images++;
+ b->shader->info.num_images++;
} else if (glsl_type_is_sampler(without_array->type)) {
var->mode = vtn_variable_mode_sampler;
nir_mode = nir_var_uniform;
- b->shader->info->num_textures++;
+ b->shader->info.num_textures++;
} else {
assert(!"Invalid uniform variable type");
}
diff --git a/src/gallium/auxiliary/nir/tgsi_to_nir.c b/src/gallium/auxiliary/nir/tgsi_to_nir.c
index de33375cb52..1d68c220c01 100644
--- a/src/gallium/auxiliary/nir/tgsi_to_nir.c
+++ b/src/gallium/auxiliary/nir/tgsi_to_nir.c
@@ -315,8 +315,8 @@ ttn_emit_declaration(struct ttn_compile *c)
/* nothing to do for UBOs: */
if ((file == TGSI_FILE_CONSTANT) && decl->Declaration.Dimension) {
- b->shader->info->num_ubos =
- MAX2(b->shader->info->num_ubos, decl->Dim.Index2D);
+ b->shader->info.num_ubos =
+ MAX2(b->shader->info.num_ubos, decl->Dim.Index2D);
return;
}
@@ -374,7 +374,7 @@ ttn_emit_declaration(struct ttn_compile *c)
exec_list_push_tail(&b->shader->inputs, &var->node);
for (int i = 0; i < array_size; i++)
- b->shader->info->inputs_read |= 1 << (var->data.location + i);
+ b->shader->info.inputs_read |= 1 << (var->data.location + i);
break;
case TGSI_FILE_OUTPUT: {
@@ -440,7 +440,7 @@ ttn_emit_declaration(struct ttn_compile *c)
exec_list_push_tail(&b->shader->outputs, &var->node);
for (int i = 0; i < array_size; i++)
- b->shader->info->outputs_written |= 1 << (var->data.location + i);
+ b->shader->info.outputs_written |= 1 << (var->data.location + i);
}
break;
case TGSI_FILE_CONSTANT:
@@ -587,7 +587,7 @@ ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index,
src = nir_src_for_ssa(&load->dest.ssa);
- b->shader->info->system_values_read |=
+ b->shader->info.system_values_read |=
(1 << nir_system_value_from_intrinsic(op));
break;
@@ -1068,7 +1068,7 @@ ttn_kill(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
nir_intrinsic_instr *discard =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard);
nir_builder_instr_insert(b, &discard->instr);
- b->shader->info->fs.uses_discard = true;
+ b->shader->info.fs.uses_discard = true;
}
static void
@@ -1081,7 +1081,7 @@ ttn_kill_if(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard_if);
discard->src[0] = nir_src_for_ssa(cmp);
nir_builder_instr_insert(b, &discard->instr);
- b->shader->info->fs.uses_discard = true;
+ b->shader->info.fs.uses_discard = true;
}
static void
diff --git a/src/gallium/drivers/freedreno/ir3/ir3_compiler_nir.c b/src/gallium/drivers/freedreno/ir3/ir3_compiler_nir.c
index 46d0525c93d..dfebb62b16b 100644
--- a/src/gallium/drivers/freedreno/ir3/ir3_compiler_nir.c
+++ b/src/gallium/drivers/freedreno/ir3/ir3_compiler_nir.c
@@ -204,7 +204,7 @@ compile_init(struct ir3_compiler *compiler,
}
so->num_uniforms = ctx->s->num_uniforms;
- so->num_ubos = ctx->s->info->num_ubos;
+ so->num_ubos = ctx->s->info.num_ubos;
/* Layout of constant registers, each section aligned to vec4. Note
* that pointer size (ubo, etc) changes depending on generation.
@@ -228,7 +228,7 @@ compile_init(struct ir3_compiler *compiler,
if (so->num_ubos > 0) {
so->constbase.ubo = constoff;
- constoff += align(ctx->s->info->num_ubos * ptrsz, 4) / 4;
+ constoff += align(ctx->s->info.num_ubos * ptrsz, 4) / 4;
}
unsigned num_driver_params = 0;
diff --git a/src/gallium/drivers/freedreno/ir3/ir3_shader.c b/src/gallium/drivers/freedreno/ir3/ir3_shader.c
index 4ca96ce3bb2..65d64abe297 100644
--- a/src/gallium/drivers/freedreno/ir3/ir3_shader.c
+++ b/src/gallium/drivers/freedreno/ir3/ir3_shader.c
@@ -514,7 +514,7 @@ ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin)
uint64_t
ir3_shader_outputs(const struct ir3_shader *so)
{
- return so->nir->info->outputs_written;
+ return so->nir->info.outputs_written;
}
/* This has to reach into the fd_context a bit more than the rest of
diff --git a/src/gallium/drivers/vc4/vc4_nir_lower_blend.c b/src/gallium/drivers/vc4/vc4_nir_lower_blend.c
index 2ed89ead55b..a28ebb5bb7c 100644
--- a/src/gallium/drivers/vc4/vc4_nir_lower_blend.c
+++ b/src/gallium/drivers/vc4/vc4_nir_lower_blend.c
@@ -494,7 +494,7 @@ vc4_nir_emit_alpha_test_discard(struct vc4_compile *c, nir_builder *b,
discard->num_components = 1;
discard->src[0] = nir_src_for_ssa(nir_inot(b, condition));
nir_builder_instr_insert(b, &discard->instr);
- c->s->info->fs.uses_discard = true;
+ c->s->info.fs.uses_discard = true;
}
static nir_ssa_def *
diff --git a/src/gallium/drivers/vc4/vc4_program.c b/src/gallium/drivers/vc4/vc4_program.c
index 59368734d08..f1c40c06a0e 100644
--- a/src/gallium/drivers/vc4/vc4_program.c
+++ b/src/gallium/drivers/vc4/vc4_program.c
@@ -1347,7 +1347,7 @@ emit_frag_end(struct vc4_compile *c)
}
uint32_t discard_cond = QPU_COND_ALWAYS;
- if (c->s->info->fs.uses_discard) {
+ if (c->s->info.fs.uses_discard) {
qir_SF(c, c->discard);
discard_cond = QPU_COND_ZS;
}
@@ -2158,7 +2158,7 @@ ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl)
static void
nir_to_qir(struct vc4_compile *c)
{
- if (c->stage == QSTAGE_FRAG && c->s->info->fs.uses_discard)
+ if (c->stage == QSTAGE_FRAG && c->s->info.fs.uses_discard)
c->discard = qir_MOV(c, qir_uniform_ui(c, 0));
ntq_setup_inputs(c);
@@ -2583,7 +2583,7 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
/* Note: the temporary clone in c->s has been freed. */
nir_shader *orig_shader = key->shader_state->base.ir.nir;
- if (orig_shader->info->outputs_written & (1 << FRAG_RESULT_DEPTH))
+ if (orig_shader->info.outputs_written & (1 << FRAG_RESULT_DEPTH))
shader->disable_early_z = true;
} else {
shader->num_inputs = c->num_inputs;
diff --git a/src/intel/blorp/blorp.c b/src/intel/blorp/blorp.c
index 0b2395d9f7a..98342755294 100644
--- a/src/intel/blorp/blorp.c
+++ b/src/intel/blorp/blorp.c
@@ -204,12 +204,12 @@ blorp_compile_vs(struct blorp_context *blorp, void *mem_ctx,
nir = brw_preprocess_nir(compiler, nir);
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
- vs_prog_data->inputs_read = nir->info->inputs_read;
+ vs_prog_data->inputs_read = nir->info.inputs_read;
brw_compute_vue_map(compiler->devinfo,
&vs_prog_data->base.vue_map,
- nir->info->outputs_written,
- nir->info->separate_shader);
+ nir->info.outputs_written,
+ nir->info.separate_shader);
struct brw_vs_prog_key vs_key = { 0, };
diff --git a/src/intel/blorp/blorp_blit.c b/src/intel/blorp/blorp_blit.c
index 691564c8788..04ea98aaf5b 100644
--- a/src/intel/blorp/blorp_blit.c
+++ b/src/intel/blorp/blorp_blit.c
@@ -1301,7 +1301,7 @@ brw_blorp_get_blit_kernel(struct blorp_context *blorp,
struct brw_wm_prog_data prog_data;
nir_shader *nir = brw_blorp_build_nir_shader(blorp, mem_ctx, prog_key);
- nir->info->name = ralloc_strdup(nir, "BLORP-blit");
+ nir->info.name = ralloc_strdup(nir, "BLORP-blit");
struct brw_wm_prog_key wm_key;
brw_blorp_init_wm_prog_key(&wm_key);
diff --git a/src/intel/blorp/blorp_clear.c b/src/intel/blorp/blorp_clear.c
index 4e834ba123e..a9eb6b95e45 100644
--- a/src/intel/blorp/blorp_clear.c
+++ b/src/intel/blorp/blorp_clear.c
@@ -58,7 +58,7 @@ blorp_params_get_clear_kernel(struct blorp_context *blorp,
nir_builder b;
nir_builder_init_simple_shader(&b, mem_ctx, MESA_SHADER_FRAGMENT, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, "BLORP-clear");
+ b.shader->info.name = ralloc_strdup(b.shader, "BLORP-clear");
nir_variable *v_color =
BLORP_CREATE_NIR_INPUT(b.shader, clear_color, glsl_vec4_type());
@@ -120,7 +120,7 @@ blorp_params_get_layer_offset_vs(struct blorp_context *blorp,
nir_builder b;
nir_builder_init_simple_shader(&b, mem_ctx, MESA_SHADER_VERTEX, NULL);
- b.shader->info->name = ralloc_strdup(b.shader, "BLORP-layer-offset-vs");
+ b.shader->info.name = ralloc_strdup(b.shader, "BLORP-layer-offset-vs");
const struct glsl_type *uvec4_type = glsl_vector_type(GLSL_TYPE_UINT, 4);
diff --git a/src/intel/compiler/brw_fs.cpp b/src/intel/compiler/brw_fs.cpp
index 4dcdc1b46de..329c15b8b0b 100644
--- a/src/intel/compiler/brw_fs.cpp
+++ b/src/intel/compiler/brw_fs.cpp
@@ -1433,7 +1433,7 @@ fs_visitor::calculate_urb_setup()
int urb_next = 0;
/* Figure out where each of the incoming setup attributes lands. */
if (devinfo->gen >= 6) {
- if (_mesa_bitcount_64(nir->info->inputs_read &
+ if (_mesa_bitcount_64(nir->info.inputs_read &
BRW_FS_VARYING_INPUT_MASK) <= 16) {
/* The SF/SBE pipeline stage can do arbitrary rearrangement of the
* first 16 varying inputs, so we can put them wherever we want.
@@ -1445,14 +1445,14 @@ fs_visitor::calculate_urb_setup()
* a different vertex (or geometry) shader.
*/
for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
- if (nir->info->inputs_read & BRW_FS_VARYING_INPUT_MASK &
+ if (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
BITFIELD64_BIT(i)) {
prog_data->urb_setup[i] = urb_next++;
}
}
} else {
bool include_vue_header =
- nir->info->inputs_read & (VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT);
+ nir->info.inputs_read & (VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT);
/* We have enough input varyings that the SF/SBE pipeline stage can't
* arbitrarily rearrange them to suit our whim; we have to put them
@@ -1462,7 +1462,7 @@ fs_visitor::calculate_urb_setup()
struct brw_vue_map prev_stage_vue_map;
brw_compute_vue_map(devinfo, &prev_stage_vue_map,
key->input_slots_valid,
- nir->info->separate_shader);
+ nir->info.separate_shader);
int first_slot =
include_vue_header ? 0 : 2 * BRW_SF_URB_ENTRY_READ_OFFSET;
@@ -1471,7 +1471,7 @@ fs_visitor::calculate_urb_setup()
slot++) {
int varying = prev_stage_vue_map.slot_to_varying[slot];
if (varying != BRW_VARYING_SLOT_PAD &&
- (nir->info->inputs_read & BRW_FS_VARYING_INPUT_MASK &
+ (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
BITFIELD64_BIT(varying))) {
prog_data->urb_setup[varying] = slot - first_slot;
}
@@ -1504,7 +1504,7 @@ fs_visitor::calculate_urb_setup()
*
* See compile_sf_prog() for more info.
*/
- if (nir->info->inputs_read & BITFIELD64_BIT(VARYING_SLOT_PNTC))
+ if (nir->info.inputs_read & BITFIELD64_BIT(VARYING_SLOT_PNTC))
prog_data->urb_setup[VARYING_SLOT_PNTC] = urb_next++;
}
@@ -1631,7 +1631,7 @@ fs_visitor::assign_gs_urb_setup()
struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
first_non_payload_grf +=
- 8 * vue_prog_data->urb_read_length * nir->info->gs.vertices_in;
+ 8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in;
foreach_block_and_inst(block, fs_inst, inst, cfg) {
/* Rewrite all ATTR file references to GRFs. */
@@ -5456,7 +5456,7 @@ fs_visitor::setup_fs_payload_gen6()
/* R27: interpolated depth if uses source depth */
prog_data->uses_src_depth =
- (nir->info->inputs_read & (1 << VARYING_SLOT_POS)) != 0;
+ (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
if (prog_data->uses_src_depth) {
payload.source_depth_reg = payload.num_regs;
payload.num_regs++;
@@ -5468,7 +5468,7 @@ fs_visitor::setup_fs_payload_gen6()
/* R29: interpolated W set if GEN6_WM_USES_SOURCE_W. */
prog_data->uses_src_w =
- (nir->info->inputs_read & (1 << VARYING_SLOT_POS)) != 0;
+ (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
if (prog_data->uses_src_w) {
payload.source_w_reg = payload.num_regs;
payload.num_regs++;
@@ -5480,7 +5480,7 @@ fs_visitor::setup_fs_payload_gen6()
/* R31: MSAA position offsets. */
if (prog_data->persample_dispatch &&
- (nir->info->system_values_read & SYSTEM_BIT_SAMPLE_POS)) {
+ (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS)) {
/* From the Ivy Bridge PRM documentation for 3DSTATE_PS:
*
* "MSDISPMODE_PERSAMPLE is required in order to select
@@ -5497,7 +5497,7 @@ fs_visitor::setup_fs_payload_gen6()
/* R32: MSAA input coverage mask */
prog_data->uses_sample_mask =
- (nir->info->system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
+ (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
if (prog_data->uses_sample_mask) {
assert(devinfo->gen >= 7);
payload.sample_mask_in_reg = payload.num_regs;
@@ -5511,7 +5511,7 @@ fs_visitor::setup_fs_payload_gen6()
/* R34-: bary for 32-pixel. */
/* R58-59: interp W for 32-pixel. */
- if (nir->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
+ if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
source_depth_to_render_target = true;
}
}
@@ -5548,15 +5548,15 @@ fs_visitor::setup_gs_payload()
* Note that the GS reads <URB Read Length> HWords for every vertex - so we
* have to multiply by VerticesIn to obtain the total storage requirement.
*/
- if (8 * vue_prog_data->urb_read_length * nir->info->gs.vertices_in >
+ if (8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in >
max_push_components || gs_prog_data->invocations > 1) {
gs_prog_data->base.include_vue_handles = true;
/* R3..RN: ICP Handles for each incoming vertex (when using pull model) */
- payload.num_regs += nir->info->gs.vertices_in;
+ payload.num_regs += nir->info.gs.vertices_in;
vue_prog_data->urb_read_length =
- ROUND_DOWN_TO(max_push_components / nir->info->gs.vertices_in, 8) / 8;
+ ROUND_DOWN_TO(max_push_components / nir->info.gs.vertices_in, 8) / 8;
}
}
@@ -5657,7 +5657,7 @@ fs_visitor::optimize()
if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
char filename[64]; \
snprintf(filename, 64, "%s%d-%s-%02d-%02d-" #pass, \
- stage_abbrev, dispatch_width, nir->info->name, iteration, pass_num); \
+ stage_abbrev, dispatch_width, nir->info.name, iteration, pass_num); \
\
backend_shader::dump_instructions(filename); \
} \
@@ -5671,7 +5671,7 @@ fs_visitor::optimize()
if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
char filename[64];
snprintf(filename, 64, "%s%d-%s-00-00-start",
- stage_abbrev, dispatch_width, nir->info->name);
+ stage_abbrev, dispatch_width, nir->info.name);
backend_shader::dump_instructions(filename);
}
@@ -5968,15 +5968,15 @@ fs_visitor::run_tcs_single_patch()
}
/* Fix the disptach mask */
- if (nir->info->tess.tcs_vertices_out % 8) {
+ if (nir->info.tess.tcs_vertices_out % 8) {
bld.CMP(bld.null_reg_ud(), invocation_id,
- brw_imm_ud(nir->info->tess.tcs_vertices_out), BRW_CONDITIONAL_L);
+ brw_imm_ud(nir->info.tess.tcs_vertices_out), BRW_CONDITIONAL_L);
bld.IF(BRW_PREDICATE_NORMAL);
}
emit_nir_code();
- if (nir->info->tess.tcs_vertices_out % 8) {
+ if (nir->info.tess.tcs_vertices_out % 8) {
bld.emit(BRW_OPCODE_ENDIF);
}
@@ -6119,8 +6119,8 @@ fs_visitor::run_fs(bool allow_spilling, bool do_rep_send)
emit_shader_time_begin();
calculate_urb_setup();
- if (nir->info->inputs_read > 0 ||
- (nir->info->outputs_read > 0 && !wm_key->coherent_fb_fetch)) {
+ if (nir->info.inputs_read > 0 ||
+ (nir->info.outputs_read > 0 && !wm_key->coherent_fb_fetch)) {
if (devinfo->gen < 6)
emit_interpolation_setup_gen4();
else
@@ -6284,8 +6284,8 @@ brw_compute_flat_inputs(struct brw_wm_prog_data *prog_data,
static uint8_t
computed_depth_mode(const nir_shader *shader)
{
- if (shader->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
- switch (shader->info->fs.depth_layout) {
+ if (shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
+ switch (shader->info.fs.depth_layout) {
case FRAG_DEPTH_LAYOUT_NONE:
case FRAG_DEPTH_LAYOUT_ANY:
return BRW_PSCDEPTH_ON;
@@ -6465,25 +6465,25 @@ brw_compile_fs(const struct brw_compiler *compiler, void *log_data,
/* key->alpha_test_func means simulating alpha testing via discards,
* so the shader definitely kills pixels.
*/
- prog_data->uses_kill = shader->info->fs.uses_discard ||
+ prog_data->uses_kill = shader->info.fs.uses_discard ||
key->alpha_test_func;
prog_data->uses_omask = key->multisample_fbo &&
- shader->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
+ shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
prog_data->computed_depth_mode = computed_depth_mode(shader);
prog_data->computed_stencil =
- shader->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
+ shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
prog_data->persample_dispatch =
key->multisample_fbo &&
(key->persample_interp ||
- (shader->info->system_values_read & (SYSTEM_BIT_SAMPLE_ID |
+ (shader->info.system_values_read & (SYSTEM_BIT_SAMPLE_ID |
SYSTEM_BIT_SAMPLE_POS)) ||
- shader->info->fs.uses_sample_qualifier ||
- shader->info->outputs_read);
+ shader->info.fs.uses_sample_qualifier ||
+ shader->info.outputs_read);
- prog_data->early_fragment_tests = shader->info->fs.early_fragment_tests;
- prog_data->post_depth_coverage = shader->info->fs.post_depth_coverage;
- prog_data->inner_coverage = shader->info->fs.inner_coverage;
+ prog_data->early_fragment_tests = shader->info.fs.early_fragment_tests;
+ prog_data->post_depth_coverage = shader->info.fs.post_depth_coverage;
+ prog_data->inner_coverage = shader->info.fs.inner_coverage;
prog_data->barycentric_interp_modes =
brw_compute_barycentric_interp_modes(compiler->devinfo, shader);
@@ -6566,9 +6566,9 @@ brw_compile_fs(const struct brw_compiler *compiler, void *log_data,
if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
g.enable_debug(ralloc_asprintf(mem_ctx, "%s fragment shader %s",
- shader->info->label ?
- shader->info->label : "unnamed",
- shader->info->name));
+ shader->info.label ?
+ shader->info.label : "unnamed",
+ shader->info.name));
}
if (simd8_cfg) {
@@ -6700,12 +6700,12 @@ brw_compile_cs(const struct brw_compiler *compiler, void *log_data,
brw_nir_lower_intrinsics(shader, &prog_data->base);
shader = brw_postprocess_nir(shader, compiler, true);
- prog_data->local_size[0] = shader->info->cs.local_size[0];
- prog_data->local_size[1] = shader->info->cs.local_size[1];
- prog_data->local_size[2] = shader->info->cs.local_size[2];
+ prog_data->local_size[0] = shader->info.cs.local_size[0];
+ prog_data->local_size[1] = shader->info.cs.local_size[1];
+ prog_data->local_size[2] = shader->info.cs.local_size[2];
unsigned local_workgroup_size =
- shader->info->cs.local_size[0] * shader->info->cs.local_size[1] *
- shader->info->cs.local_size[2];
+ shader->info.cs.local_size[0] * shader->info.cs.local_size[1] *
+ shader->info.cs.local_size[2];
unsigned max_cs_threads = compiler->devinfo->max_cs_threads;
unsigned simd_required = DIV_ROUND_UP(local_workgroup_size, max_cs_threads);
@@ -6795,9 +6795,9 @@ brw_compile_cs(const struct brw_compiler *compiler, void *log_data,
MESA_SHADER_COMPUTE);
if (INTEL_DEBUG & DEBUG_CS) {
char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
- shader->info->label ? shader->info->label :
+ shader->info.label ? shader->info.label :
"unnamed",
- shader->info->name);
+ shader->info.name);
g.enable_debug(name);
}
diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp
index 23cd4b73f07..2ea94ab6939 100644
--- a/src/intel/compiler/brw_fs_nir.cpp
+++ b/src/intel/compiler/brw_fs_nir.cpp
@@ -1853,7 +1853,7 @@ fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
* be recorded by transform feedback, we can simply discard all geometry
* bound to these streams when transform feedback is disabled.
*/
- if (stream_id > 0 && !nir->info->has_transform_feedback_varyings)
+ if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
return;
/* If we're outputting 32 control data bits or less, then we can wait
@@ -2008,12 +2008,12 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst,
/* Use first_icp_handle as the base offset. There is one register
* of URB handles per vertex, so inform the register allocator that
- * we might read up to nir->info->gs.vertices_in registers.
+ * we might read up to nir->info.gs.vertices_in registers.
*/
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
fs_reg(icp_offset_bytes),
- brw_imm_ud(nir->info->gs.vertices_in * REG_SIZE));
+ brw_imm_ud(nir->info.gs.vertices_in * REG_SIZE));
}
} else {
assert(gs_prog_data->invocations > 1);
@@ -2039,12 +2039,12 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst,
/* Use first_icp_handle as the base offset. There is one DWord
* of URB handles per vertex, so inform the register allocator that
- * we might read up to ceil(nir->info->gs.vertices_in / 8) registers.
+ * we might read up to ceil(nir->info.gs.vertices_in / 8) registers.
*/
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
fs_reg(icp_offset_bytes),
- brw_imm_ud(DIV_ROUND_UP(nir->info->gs.vertices_in, 8) *
+ brw_imm_ud(DIV_ROUND_UP(nir->info.gs.vertices_in, 8) *
REG_SIZE));
}
}
@@ -3849,7 +3849,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
*/
brw_mark_surface_used(prog_data,
stage_prog_data->binding_table.ubo_start +
- nir->info->num_ubos - 1);
+ nir->info.num_ubos - 1);
}
nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
@@ -3919,7 +3919,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
*/
brw_mark_surface_used(prog_data,
stage_prog_data->binding_table.ssbo_start +
- nir->info->num_ssbos - 1);
+ nir->info.num_ssbos - 1);
}
fs_reg offset_reg;
@@ -3959,7 +3959,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
brw_mark_surface_used(prog_data,
stage_prog_data->binding_table.ssbo_start +
- nir->info->num_ssbos - 1);
+ nir->info.num_ssbos - 1);
}
/* Value */
@@ -4171,7 +4171,7 @@ fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
*/
brw_mark_surface_used(prog_data,
stage_prog_data->binding_table.ssbo_start +
- nir->info->num_ssbos - 1);
+ nir->info.num_ssbos - 1);
}
fs_reg offset = get_nir_src(instr->src[1]);
diff --git a/src/intel/compiler/brw_fs_visitor.cpp b/src/intel/compiler/brw_fs_visitor.cpp
index cea38d86237..cd411481d84 100644
--- a/src/intel/compiler/brw_fs_visitor.cpp
+++ b/src/intel/compiler/brw_fs_visitor.cpp
@@ -36,7 +36,7 @@ fs_reg *
fs_visitor::emit_vs_system_value(int location)
{
fs_reg *reg = new(this->mem_ctx)
- fs_reg(ATTR, 4 * _mesa_bitcount_64(nir->info->inputs_read),
+ fs_reg(ATTR, 4 * _mesa_bitcount_64(nir->info.inputs_read),
BRW_REGISTER_TYPE_D);
struct brw_vs_prog_data *vs_prog_data = brw_vs_prog_data(prog_data);
@@ -60,7 +60,7 @@ fs_visitor::emit_vs_system_value(int location)
vs_prog_data->uses_instanceid = true;
break;
case SYSTEM_VALUE_DRAW_ID:
- if (nir->info->system_values_read &
+ if (nir->info.system_values_read &
(BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX) |
BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
@@ -414,13 +414,13 @@ fs_visitor::emit_single_fb_write(const fs_builder &bld,
fs_reg src_depth, src_stencil;
if (source_depth_to_render_target) {
- if (nir->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
+ if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
src_depth = frag_depth;
else
src_depth = fs_reg(brw_vec8_grf(payload.source_depth_reg, 0));
}
- if (nir->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL))
+ if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL))
src_stencil = frag_stencil;
const fs_reg sources[] = {
@@ -459,7 +459,7 @@ fs_visitor::emit_fb_writes()
limit_dispatch_width(8, "Depth writes unsupported in SIMD16+ mode.\n");
}
- if (nir->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
+ if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
/* From the 'Render Target Write message' section of the docs:
* "Output Stencil is not supported with SIMD16 Render Target Write
* Messages."
diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c
index 3c0a7ced572..1bd6d02aaef 100644
--- a/src/intel/compiler/brw_nir.c
+++ b/src/intel/compiler/brw_nir.c
@@ -199,8 +199,8 @@ remap_patch_urb_offsets(nir_block *block, nir_builder *b,
const struct brw_vue_map *vue_map,
GLenum tes_primitive_mode)
{
- const bool is_passthrough_tcs = b->shader->info->name &&
- strcmp(b->shader->info->name, "passthrough") == 0;
+ const bool is_passthrough_tcs = b->shader->info.name &&
+ strcmp(b->shader->info.name, "passthrough") == 0;
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
@@ -283,7 +283,7 @@ brw_nir_lower_vs_inputs(nir_shader *nir,
nir_foreach_function(function, nir) {
if (function->impl) {
nir_foreach_block(block, function->impl) {
- remap_vs_attrs(block, nir->info);
+ remap_vs_attrs(block, &nir->info);
}
}
}
@@ -337,7 +337,7 @@ brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
nir_builder_init(&b, function->impl);
nir_foreach_block(block, function->impl) {
remap_patch_urb_offsets(block, &b, vue_map,
- nir->info->tess.primitive_mode);
+ nir->info.tess.primitive_mode);
}
}
}
diff --git a/src/intel/compiler/brw_nir_intrinsics.c b/src/intel/compiler/brw_nir_intrinsics.c
index 901a1fb0ab9..d63570fa2a7 100644
--- a/src/intel/compiler/brw_nir_intrinsics.c
+++ b/src/intel/compiler/brw_nir_intrinsics.c
@@ -41,7 +41,7 @@ read_thread_local_id(struct lower_intrinsics_state *state)
{
nir_builder *b = &state->builder;
nir_shader *nir = state->nir;
- const unsigned *sizes = nir->info->cs.local_size;
+ const unsigned *sizes = nir->info.cs.local_size;
const unsigned group_size = sizes[0] * sizes[1] * sizes[2];
/* Some programs have local_size dimensions so small that the thread local
@@ -111,7 +111,7 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
* (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
* gl_WorkGroupSize.z;
*/
- unsigned *size = nir->info->cs.local_size;
+ unsigned *size = nir->info.cs.local_size;
nir_ssa_def *local_index = nir_load_local_invocation_index(b);
diff --git a/src/intel/compiler/brw_shader.cpp b/src/intel/compiler/brw_shader.cpp
index 304b4ecf4fa..269b8a099a4 100644
--- a/src/intel/compiler/brw_shader.cpp
+++ b/src/intel/compiler/brw_shader.cpp
@@ -1168,8 +1168,8 @@ brw_compile_tes(const struct brw_compiler *compiler,
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_EVAL];
nir_shader *nir = nir_shader_clone(mem_ctx, src_shader);
- nir->info->inputs_read = key->inputs_read;
- nir->info->patch_inputs_read = key->patch_inputs_read;
+ nir->info.inputs_read = key->inputs_read;
+ nir->info.patch_inputs_read = key->patch_inputs_read;
nir = brw_nir_apply_sampler_key(nir, compiler, &key->tex, is_scalar);
brw_nir_lower_tes_inputs(nir, input_vue_map);
@@ -1177,8 +1177,8 @@ brw_compile_tes(const struct brw_compiler *compiler,
nir = brw_postprocess_nir(nir, compiler, is_scalar);
brw_compute_vue_map(devinfo, &prog_data->base.vue_map,
- nir->info->outputs_written,
- nir->info->separate_shader);
+ nir->info.outputs_written,
+ nir->info.separate_shader);
unsigned output_size_bytes = prog_data->base.vue_map.num_slots * 4 * 4;
@@ -1190,10 +1190,10 @@ brw_compile_tes(const struct brw_compiler *compiler,
}
prog_data->base.clip_distance_mask =
- ((1 << nir->info->clip_distance_array_size) - 1);
+ ((1 << nir->info.clip_distance_array_size) - 1);
prog_data->base.cull_distance_mask =
- ((1 << nir->info->cull_distance_array_size) - 1) <<
- nir->info->clip_distance_array_size;
+ ((1 << nir->info.cull_distance_array_size) - 1) <<
+ nir->info.clip_distance_array_size;
/* URB entry sizes are stored as a multiple of 64 bytes. */
prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
@@ -1206,9 +1206,9 @@ brw_compile_tes(const struct brw_compiler *compiler,
TESS_SPACING_FRACTIONAL_EVEN - 1);
prog_data->partitioning =
- (enum brw_tess_partitioning) (nir->info->tess.spacing - 1);
+ (enum brw_tess_partitioning) (nir->info.tess.spacing - 1);
- switch (nir->info->tess.primitive_mode) {
+ switch (nir->info.tess.primitive_mode) {
case GL_QUADS:
prog_data->domain = BRW_TESS_DOMAIN_QUAD;
break;
@@ -1222,14 +1222,14 @@ brw_compile_tes(const struct brw_compiler *compiler,
unreachable("invalid domain shader primitive mode");
}
- if (nir->info->tess.point_mode) {
+ if (nir->info.tess.point_mode) {
prog_data->output_topology = BRW_TESS_OUTPUT_TOPOLOGY_POINT;
- } else if (nir->info->tess.primitive_mode == GL_ISOLINES) {
+ } else if (nir->info.tess.primitive_mode == GL_ISOLINES) {
prog_data->output_topology = BRW_TESS_OUTPUT_TOPOLOGY_LINE;
} else {
/* Hardware winding order is backwards from OpenGL */
prog_data->output_topology =
- nir->info->tess.ccw ? BRW_TESS_OUTPUT_TOPOLOGY_TRI_CW
+ nir->info.tess.ccw ? BRW_TESS_OUTPUT_TOPOLOGY_TRI_CW
: BRW_TESS_OUTPUT_TOPOLOGY_TRI_CCW;
}
@@ -1259,9 +1259,9 @@ brw_compile_tes(const struct brw_compiler *compiler,
if (unlikely(INTEL_DEBUG & DEBUG_TES)) {
g.enable_debug(ralloc_asprintf(mem_ctx,
"%s tessellation evaluation shader %s",
- nir->info->label ? nir->info->label
+ nir->info.label ? nir->info.label
: "unnamed",
- nir->info->name));
+ nir->info.name));
}
g.generate_code(v.cfg, 8);
diff --git a/src/intel/compiler/brw_vec4.cpp b/src/intel/compiler/brw_vec4.cpp
index 70487d3c151..9f280840091 100644
--- a/src/intel/compiler/brw_vec4.cpp
+++ b/src/intel/compiler/brw_vec4.cpp
@@ -2620,7 +2620,7 @@ vec4_visitor::run()
if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
char filename[64]; \
snprintf(filename, 64, "%s-%s-%02d-%02d-" #pass, \
- stage_abbrev, nir->info->name, iteration, pass_num); \
+ stage_abbrev, nir->info.name, iteration, pass_num); \
\
backend_shader::dump_instructions(filename); \
} \
@@ -2633,7 +2633,7 @@ vec4_visitor::run()
if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
char filename[64];
snprintf(filename, 64, "%s-%s-00-00-start",
- stage_abbrev, nir->info->name);
+ stage_abbrev, nir->info.name);
backend_shader::dump_instructions(filename);
}
@@ -2779,17 +2779,17 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
const unsigned *assembly = NULL;
prog_data->base.clip_distance_mask =
- ((1 << shader->info->clip_distance_array_size) - 1);
+ ((1 << shader->info.clip_distance_array_size) - 1);
prog_data->base.cull_distance_mask =
- ((1 << shader->info->cull_distance_array_size) - 1) <<
- shader->info->clip_distance_array_size;
+ ((1 << shader->info.cull_distance_array_size) - 1) <<
+ shader->info.clip_distance_array_size;
unsigned nr_attribute_slots = _mesa_bitcount_64(prog_data->inputs_read);
/* gl_VertexID and gl_InstanceID are system values, but arrive via an
* incoming vertex attribute. So, add an extra slot.
*/
- if (shader->info->system_values_read &
+ if (shader->info.system_values_read &
(BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX) |
BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
@@ -2798,13 +2798,13 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
}
/* gl_DrawID has its very own vec4 */
- if (shader->info->system_values_read &
+ if (shader->info.system_values_read &
BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID)) {
nr_attribute_slots++;
}
unsigned nr_attributes = nr_attribute_slots -
- DIV_ROUND_UP(_mesa_bitcount_64(shader->info->double_inputs_read), 2);
+ DIV_ROUND_UP(_mesa_bitcount_64(shader->info.double_inputs_read), 2);
/* The 3DSTATE_VS documentation lists the lower bound on "Vertex URB Entry
* Read Length" as 1 in vec4 mode, and 0 in SIMD8 mode. Empirically, in
@@ -2858,9 +2858,9 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
if (INTEL_DEBUG & DEBUG_VS) {
const char *debug_name =
ralloc_asprintf(mem_ctx, "%s vertex shader %s",
- shader->info->label ? shader->info->label :
+ shader->info.label ? shader->info.label :
"unnamed",
- shader->info->name);
+ shader->info.name);
g.enable_debug(debug_name);
}
diff --git a/src/intel/compiler/brw_vec4_generator.cpp b/src/intel/compiler/brw_vec4_generator.cpp
index 753b00c4ed1..8505f693499 100644
--- a/src/intel/compiler/brw_vec4_generator.cpp
+++ b/src/intel/compiler/brw_vec4_generator.cpp
@@ -2192,8 +2192,8 @@ generate_code(struct brw_codegen *p,
if (unlikely(debug_flag)) {
fprintf(stderr, "Native code for %s %s shader %s:\n",
- nir->info->label ? nir->info->label : "unnamed",
- _mesa_shader_stage_to_string(nir->stage), nir->info->name);
+ nir->info.label ? nir->info.label : "unnamed",
+ _mesa_shader_stage_to_string(nir->stage), nir->info.name);
fprintf(stderr, "%s vec4 shader: %d instructions. %d loops. %u cycles. %d:%d "
"spills:fills. Compacted %d to %d bytes (%.0f%%)\n",
diff --git a/src/intel/compiler/brw_vec4_gs_visitor.cpp b/src/intel/compiler/brw_vec4_gs_visitor.cpp
index 4a8b5be30e1..9793ef50125 100644
--- a/src/intel/compiler/brw_vec4_gs_visitor.cpp
+++ b/src/intel/compiler/brw_vec4_gs_visitor.cpp
@@ -85,7 +85,7 @@ vec4_gs_visitor::setup_varying_inputs(int payload_reg, int *attribute_map,
* so the total number of input slots that will be delivered to the GS (and
* thus the stride of the input arrays) is urb_read_length * 2.
*/
- const unsigned num_input_vertices = nir->info->gs.vertices_in;
+ const unsigned num_input_vertices = nir->info.gs.vertices_in;
assert(num_input_vertices <= MAX_GS_INPUT_VERTICES);
unsigned input_array_stride = prog_data->urb_read_length * 2;
@@ -455,7 +455,7 @@ vec4_gs_visitor::gs_emit_vertex(int stream_id)
* be recorded by transform feedback, we can simply discard all geometry
* bound to these streams when transform feedback is disabled.
*/
- if (stream_id > 0 && !nir->info->has_transform_feedback_varyings)
+ if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
return;
/* If we're outputting 32 control data bits or less, then we can wait
@@ -628,10 +628,10 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
* For SSO pipelines, we use a fixed VUE map layout based on variable
* locations, so we can rely on rendezvous-by-location making this work.
*/
- GLbitfield64 inputs_read = shader->info->inputs_read;
+ GLbitfield64 inputs_read = shader->info.inputs_read;
brw_compute_vue_map(compiler->devinfo,
&c.input_vue_map, inputs_read,
- shader->info->separate_shader);
+ shader->info.separate_shader);
shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, is_scalar);
brw_nir_lower_vue_inputs(shader, is_scalar, &c.input_vue_map);
@@ -639,21 +639,21 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
shader = brw_postprocess_nir(shader, compiler, is_scalar);
prog_data->base.clip_distance_mask =
- ((1 << shader->info->clip_distance_array_size) - 1);
+ ((1 << shader->info.clip_distance_array_size) - 1);
prog_data->base.cull_distance_mask =
- ((1 << shader->info->cull_distance_array_size) - 1) <<
- shader->info->clip_distance_array_size;
+ ((1 << shader->info.cull_distance_array_size) - 1) <<
+ shader->info.clip_distance_array_size;
prog_data->include_primitive_id =
- (shader->info->system_values_read & (1 << SYSTEM_VALUE_PRIMITIVE_ID)) != 0;
+ (shader->info.system_values_read & (1 << SYSTEM_VALUE_PRIMITIVE_ID)) != 0;
- prog_data->invocations = shader->info->gs.invocations;
+ prog_data->invocations = shader->info.gs.invocations;
if (compiler->devinfo->gen >= 8)
prog_data->static_vertex_count = nir_gs_count_vertices(shader);
if (compiler->devinfo->gen >= 7) {
- if (shader->info->gs.output_primitive == GL_POINTS) {
+ if (shader->info.gs.output_primitive == GL_POINTS) {
/* When the output type is points, the geometry shader may output data
* to multiple streams, and EndPrimitive() has no effect. So we
* configure the hardware to interpret the control data as stream ID.
@@ -678,14 +678,14 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
* EndPrimitive().
*/
c.control_data_bits_per_vertex =
- shader->info->gs.uses_end_primitive ? 1 : 0;
+ shader->info.gs.uses_end_primitive ? 1 : 0;
}
} else {
/* There are no control data bits in gen6. */
c.control_data_bits_per_vertex = 0;
}
c.control_data_header_size_bits =
- shader->info->gs.vertices_out * c.control_data_bits_per_vertex;
+ shader->info.gs.vertices_out * c.control_data_bits_per_vertex;
/* 1 HWORD = 32 bytes = 256 bits */
prog_data->control_data_header_size_hwords =
@@ -780,7 +780,7 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
unsigned output_size_bytes;
if (compiler->devinfo->gen >= 7) {
output_size_bytes =
- prog_data->output_vertex_size_hwords * 32 * shader->info->gs.vertices_out;
+ prog_data->output_vertex_size_hwords * 32 * shader->info.gs.vertices_out;
output_size_bytes += 32 * prog_data->control_data_header_size_hwords;
} else {
output_size_bytes = prog_data->output_vertex_size_hwords * 32;
@@ -814,11 +814,11 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
else
prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 128) / 128;
- assert(shader->info->gs.output_primitive < ARRAY_SIZE(gl_prim_to_hw_prim));
+ assert(shader->info.gs.output_primitive < ARRAY_SIZE(gl_prim_to_hw_prim));
prog_data->output_topology =
- gl_prim_to_hw_prim[shader->info->gs.output_primitive];
+ gl_prim_to_hw_prim[shader->info.gs.output_primitive];
- prog_data->vertices_in = shader->info->gs.vertices_in;
+ prog_data->vertices_in = shader->info.gs.vertices_in;
/* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
* need to program a URB read length of ceiling(num_slots / 2).
@@ -847,9 +847,9 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
false, MESA_SHADER_GEOMETRY);
if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
const char *label =
- shader->info->label ? shader->info->label : "unnamed";
+ shader->info.label ? shader->info.label : "unnamed";
char *name = ralloc_asprintf(mem_ctx, "%s geometry shader %s",
- label, shader->info->name);
+ label, shader->info.name);
g.enable_debug(name);
}
g.generate_code(v.cfg, 8);
diff --git a/src/intel/compiler/brw_vec4_nir.cpp b/src/intel/compiler/brw_vec4_nir.cpp
index a82d52088a8..8424e17e25c 100644
--- a/src/intel/compiler/brw_vec4_nir.cpp
+++ b/src/intel/compiler/brw_vec4_nir.cpp
@@ -570,7 +570,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
brw_mark_surface_used(&prog_data->base,
prog_data->base.binding_table.ssbo_start +
- nir->info->num_ssbos - 1);
+ nir->info.num_ssbos - 1);
}
/* Offset */
@@ -736,7 +736,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
*/
brw_mark_surface_used(&prog_data->base,
prog_data->base.binding_table.ssbo_start +
- nir->info->num_ssbos - 1);
+ nir->info.num_ssbos - 1);
}
src_reg offset_reg;
@@ -948,7 +948,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
*/
brw_mark_surface_used(&prog_data->base,
prog_data->base.binding_table.ubo_start +
- nir->info->num_ubos - 1);
+ nir->info.num_ubos - 1);
}
src_reg offset_reg;
@@ -1046,7 +1046,7 @@ vec4_visitor::nir_emit_ssbo_atomic(int op, nir_intrinsic_instr *instr)
*/
brw_mark_surface_used(&prog_data->base,
prog_data->base.binding_table.ssbo_start +
- nir->info->num_ssbos - 1);
+ nir->info.num_ssbos - 1);
}
src_reg offset = get_nir_src(instr->src[1], 1);
diff --git a/src/intel/compiler/brw_vec4_tcs.cpp b/src/intel/compiler/brw_vec4_tcs.cpp
index d4a647d029f..c362a0a5f14 100644
--- a/src/intel/compiler/brw_vec4_tcs.cpp
+++ b/src/intel/compiler/brw_vec4_tcs.cpp
@@ -95,9 +95,9 @@ vec4_tcs_visitor::emit_prolog()
* HS instance dispatched will only have its bottom half doing real
* work, and so we need to disable the upper half:
*/
- if (nir->info->tess.tcs_vertices_out % 2) {
+ if (nir->info.tess.tcs_vertices_out % 2) {
emit(CMP(dst_null_d(), invocation_id,
- brw_imm_ud(nir->info->tess.tcs_vertices_out),
+ brw_imm_ud(nir->info.tess.tcs_vertices_out),
BRW_CONDITIONAL_L));
/* Matching ENDIF is in emit_thread_end() */
@@ -112,7 +112,7 @@ vec4_tcs_visitor::emit_thread_end()
vec4_instruction *inst;
current_annotation = "thread end";
- if (nir->info->tess.tcs_vertices_out % 2) {
+ if (nir->info.tess.tcs_vertices_out % 2) {
emit(BRW_OPCODE_ENDIF);
}
@@ -402,15 +402,15 @@ brw_compile_tcs(const struct brw_compiler *compiler,
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_CTRL];
nir_shader *nir = nir_shader_clone(mem_ctx, src_shader);
- nir->info->outputs_written = key->outputs_written;
- nir->info->patch_outputs_written = key->patch_outputs_written;
+ nir->info.outputs_written = key->outputs_written;
+ nir->info.patch_outputs_written = key->patch_outputs_written;
struct brw_vue_map input_vue_map;
- brw_compute_vue_map(devinfo, &input_vue_map, nir->info->inputs_read,
- nir->info->separate_shader);
+ brw_compute_vue_map(devinfo, &input_vue_map, nir->info.inputs_read,
+ nir->info.separate_shader);
brw_compute_tess_vue_map(&vue_prog_data->vue_map,
- nir->info->outputs_written,
- nir->info->patch_outputs_written);
+ nir->info.outputs_written,
+ nir->info.patch_outputs_written);
nir = brw_nir_apply_sampler_key(nir, compiler, &key->tex, is_scalar);
brw_nir_lower_vue_inputs(nir, is_scalar, &input_vue_map);
@@ -422,9 +422,9 @@ brw_compile_tcs(const struct brw_compiler *compiler,
nir = brw_postprocess_nir(nir, compiler, is_scalar);
if (is_scalar)
- prog_data->instances = DIV_ROUND_UP(nir->info->tess.tcs_vertices_out, 8);
+ prog_data->instances = DIV_ROUND_UP(nir->info.tess.tcs_vertices_out, 8);
else
- prog_data->instances = DIV_ROUND_UP(nir->info->tess.tcs_vertices_out, 2);
+ prog_data->instances = DIV_ROUND_UP(nir->info.tess.tcs_vertices_out, 2);
/* Compute URB entry size. The maximum allowed URB entry size is 32k.
* That divides up as follows:
@@ -443,7 +443,7 @@ brw_compile_tcs(const struct brw_compiler *compiler,
unsigned output_size_bytes = 0;
/* Note that the patch header is counted in num_per_patch_slots. */
output_size_bytes += num_per_patch_slots * 16;
- output_size_bytes += nir->info->tess.tcs_vertices_out *
+ output_size_bytes += nir->info.tess.tcs_vertices_out *
num_per_vertex_slots * 16;
assert(output_size_bytes >= 1);
@@ -485,9 +485,9 @@ brw_compile_tcs(const struct brw_compiler *compiler,
if (unlikely(INTEL_DEBUG & DEBUG_TCS)) {
g.enable_debug(ralloc_asprintf(mem_ctx,
"%s tessellation control shader %s",
- nir->info->label ? nir->info->label
+ nir->info.label ? nir->info.label
: "unnamed",
- nir->info->name));
+ nir->info.name));
}
g.generate_code(v.cfg, 8);
diff --git a/src/intel/compiler/brw_wm_iz.cpp b/src/intel/compiler/brw_wm_iz.cpp
index 11d4f76b368..fead16586b6 100644
--- a/src/intel/compiler/brw_wm_iz.cpp
+++ b/src/intel/compiler/brw_wm_iz.cpp
@@ -142,7 +142,7 @@ void fs_visitor::setup_fs_payload_gen4()
}
prog_data->uses_src_depth =
- (nir->info->inputs_read & (1 << VARYING_SLOT_POS)) != 0;
+ (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
if (wm_iz_table[lookup].sd_present || prog_data->uses_src_depth ||
kill_stats_promoted_workaround) {
payload.source_depth_reg = reg;
diff --git a/src/intel/compiler/gen6_gs_visitor.cpp b/src/intel/compiler/gen6_gs_visitor.cpp
index 075bc4ad487..f76cdf02556 100644
--- a/src/intel/compiler/gen6_gs_visitor.cpp
+++ b/src/intel/compiler/gen6_gs_visitor.cpp
@@ -64,7 +64,7 @@ gen6_gs_visitor::emit_prolog()
this->vertex_output = src_reg(this,
glsl_type::uint_type,
(prog_data->vue_map.num_slots + 1) *
- nir->info->gs.vertices_out);
+ nir->info.gs.vertices_out);
this->vertex_output_offset = src_reg(this, glsl_type::uint_type);
emit(MOV(dst_reg(this->vertex_output_offset), brw_imm_ud(0u)));
@@ -178,7 +178,7 @@ gen6_gs_visitor::gs_emit_vertex(int stream_id)
dst_reg dst(this->vertex_output);
dst.reladdr = ralloc(mem_ctx, src_reg);
memcpy(dst.reladdr, &this->vertex_output_offset, sizeof(src_reg));
- if (nir->info->gs.output_primitive == GL_POINTS) {
+ if (nir->info.gs.output_primitive == GL_POINTS) {
/* If we are outputting points, then every vertex has PrimStart and
* PrimEnd set.
*/
@@ -207,7 +207,7 @@ gen6_gs_visitor::gs_end_primitive()
/* Calling EndPrimitive() is optional for point output. In this case we set
* the PrimEnd flag when we process EmitVertex().
*/
- if (nir->info->gs.output_primitive == GL_POINTS)
+ if (nir->info.gs.output_primitive == GL_POINTS)
return;
/* Otherwise we know that the last vertex we have processed was the last
@@ -219,7 +219,7 @@ gen6_gs_visitor::gs_end_primitive()
* comparison below (hence the num_output_vertices + 1 in the comparison
* below).
*/
- unsigned num_output_vertices = nir->info->gs.vertices_out;
+ unsigned num_output_vertices = nir->info.gs.vertices_out;
emit(CMP(dst_null_ud(), this->vertex_count,
brw_imm_ud(num_output_vertices + 1), BRW_CONDITIONAL_L));
vec4_instruction *inst = emit(CMP(dst_null_ud(),
@@ -323,7 +323,7 @@ gen6_gs_visitor::emit_thread_end()
* first_vertex is not zero. This is only relevant for outputs other than
* points because in the point case we set PrimEnd on all vertices.
*/
- if (nir->info->gs.output_primitive != GL_POINTS) {
+ if (nir->info.gs.output_primitive != GL_POINTS) {
emit(CMP(dst_null_ud(), this->first_vertex, brw_imm_ud(0u), BRW_CONDITIONAL_Z));
emit(IF(BRW_PREDICATE_NORMAL));
gs_end_primitive();
@@ -625,7 +625,7 @@ gen6_gs_visitor::xfb_write()
emit(BRW_OPCODE_ENDIF);
/* Write transform feedback data for all processed vertices. */
- for (int i = 0; i < (int)nir->info->gs.vertices_out; i++) {
+ for (int i = 0; i < (int)nir->info.gs.vertices_out; i++) {
emit(MOV(dst_reg(sol_temp), brw_imm_d(i)));
emit(CMP(dst_null_d(), sol_temp, this->vertex_count,
BRW_CONDITIONAL_L));
diff --git a/src/intel/vulkan/anv_pipeline.c b/src/intel/vulkan/anv_pipeline.c
index e96e2fcedba..5b69d1b9e08 100644
--- a/src/intel/vulkan/anv_pipeline.c
+++ b/src/intel/vulkan/anv_pipeline.c
@@ -173,7 +173,7 @@ anv_shader_compile_to_nir(struct anv_pipeline *pipeline,
NIR_PASS_V(nir, nir_lower_system_values);
/* Vulkan uses the separate-shader linking model */
- nir->info->separate_shader = true;
+ nir->info.separate_shader = true;
nir = brw_preprocess_nir(compiler, nir);
@@ -393,8 +393,8 @@ anv_pipeline_compile(struct anv_pipeline *pipeline,
prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
}
- if (nir->info->num_images > 0) {
- prog_data->nr_params += nir->info->num_images * BRW_IMAGE_PARAM_SIZE;
+ if (nir->info.num_images > 0) {
+ prog_data->nr_params += nir->info.num_images * BRW_IMAGE_PARAM_SIZE;
pipeline->needs_data_cache = true;
}
@@ -402,7 +402,7 @@ anv_pipeline_compile(struct anv_pipeline *pipeline,
((struct brw_cs_prog_data *)prog_data)->thread_local_id_index =
prog_data->nr_params++; /* The CS Thread ID uniform */
- if (nir->info->num_ssbos > 0)
+ if (nir->info.num_ssbos > 0)
pipeline->needs_data_cache = true;
if (prog_data->nr_params > 0) {
@@ -525,13 +525,13 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
ralloc_steal(mem_ctx, nir);
- prog_data.inputs_read = nir->info->inputs_read;
- prog_data.double_inputs_read = nir->info->double_inputs_read;
+ prog_data.inputs_read = nir->info.inputs_read;
+ prog_data.double_inputs_read = nir->info.double_inputs_read;
brw_compute_vue_map(&pipeline->device->info,
&prog_data.base.vue_map,
- nir->info->outputs_written,
- nir->info->separate_shader);
+ nir->info.outputs_written,
+ nir->info.separate_shader);
unsigned code_size;
const unsigned *shader_code =
@@ -663,10 +663,10 @@ anv_pipeline_compile_tcs_tes(struct anv_pipeline *pipeline,
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
nir_lower_tes_patch_vertices(tes_nir,
- tcs_nir->info->tess.tcs_vertices_out);
+ tcs_nir->info.tess.tcs_vertices_out);
/* Copy TCS info into the TES info */
- merge_tess_info(tes_nir->info, tcs_nir->info);
+ merge_tess_info(&tes_nir->info, &tcs_nir->info);
anv_fill_binding_table(&tcs_prog_data.base.base, 0);
anv_fill_binding_table(&tes_prog_data.base.base, 0);
@@ -680,13 +680,13 @@ anv_pipeline_compile_tcs_tes(struct anv_pipeline *pipeline,
* this comes from the SPIR-V, which is part of the hash used for the
* pipeline cache. So it should be safe.
*/
- tcs_key.tes_primitive_mode = tes_nir->info->tess.primitive_mode;
- tcs_key.outputs_written = tcs_nir->info->outputs_written;
- tcs_key.patch_outputs_written = tcs_nir->info->patch_outputs_written;
+ tcs_key.tes_primitive_mode = tes_nir->info.tess.primitive_mode;
+ tcs_key.outputs_written = tcs_nir->info.outputs_written;
+ tcs_key.patch_outputs_written = tcs_nir->info.patch_outputs_written;
tcs_key.quads_workaround =
devinfo->gen < 9 &&
- tes_nir->info->tess.primitive_mode == 7 /* GL_QUADS */ &&
- tes_nir->info->tess.spacing == TESS_SPACING_EQUAL;
+ tes_nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
+ tes_nir->info.tess.spacing == TESS_SPACING_EQUAL;
tes_key.inputs_read = tcs_key.outputs_written;
tes_key.patch_inputs_read = tcs_key.patch_outputs_written;
@@ -791,8 +791,8 @@ anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
brw_compute_vue_map(&pipeline->device->info,
&prog_data.base.vue_map,
- nir->info->outputs_written,
- nir->info->separate_shader);
+ nir->info.outputs_written,
+ nir->info.separate_shader);
unsigned code_size;
const unsigned *shader_code =
diff --git a/src/mesa/drivers/dri/i965/brw_link.cpp b/src/mesa/drivers/dri/i965/brw_link.cpp
index da38ec2516a..57aaf6b9dc0 100644
--- a/src/mesa/drivers/dri/i965/brw_link.cpp
+++ b/src/mesa/drivers/dri/i965/brw_link.cpp
@@ -211,7 +211,7 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
prog->nir = brw_create_nir(brw, shProg, prog, (gl_shader_stage) stage,
compiler->scalar_stage[stage]);
- infos[stage] = prog->nir->info;
+ infos[stage] = &prog->nir->info;
/* Make a pass over the IR to add state references for any built-in
* uniforms that are used. This has to be done now (during linking).
diff --git a/src/mesa/drivers/dri/i965/brw_program.c b/src/mesa/drivers/dri/i965/brw_program.c
index 4641cfe8d4e..d26dce07f97 100644
--- a/src/mesa/drivers/dri/i965/brw_program.c
+++ b/src/mesa/drivers/dri/i965/brw_program.c
@@ -109,14 +109,12 @@ brw_create_nir(struct brw_context *brw,
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
- /* nir_shader may have been cloned so make sure shader_info is in sync */
- if (nir->info != &prog->info) {
- const char *name = prog->info.name;
- const char *label = prog->info.label;
- prog->info = *nir->info;
- prog->info.name = name;
- prog->info.label = label;
- }
+ /* Copy the info we just generated back into the gl_program */
+ const char *prog_name = prog->info.name;
+ const char *prog_label = prog->info.label;
+ prog->info = nir->info;
+ prog->info.name = prog_name;
+ prog->info.label = prog_label;
if (shader_prog) {
NIR_PASS_V(nir, nir_lower_samplers, shader_prog);
@@ -726,7 +724,7 @@ brw_assign_common_binding_table_offsets(const struct gen_device_info *devinfo,
stage_prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
}
- if (prog->nir->info->uses_texture_gather) {
+ if (prog->nir->info.uses_texture_gather) {
if (devinfo->gen >= 8) {
stage_prog_data->binding_table.gather_texture_start =
stage_prog_data->binding_table.texture_start;
diff --git a/src/mesa/drivers/dri/i965/brw_tcs.c b/src/mesa/drivers/dri/i965/brw_tcs.c
index 3cc6cdbf3c0..72c5872bcfd 100644
--- a/src/mesa/drivers/dri/i965/brw_tcs.c
+++ b/src/mesa/drivers/dri/i965/brw_tcs.c
@@ -50,11 +50,11 @@ create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compiler,
nir_ssa_def *invoc_id =
nir_load_system_value(&b, nir_intrinsic_load_invocation_id, 0);
- nir->info->inputs_read = key->outputs_written &
+ nir->info.inputs_read = key->outputs_written &
~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
- nir->info->outputs_written = key->outputs_written;
- nir->info->tess.tcs_vertices_out = key->input_vertices;
- nir->info->name = ralloc_strdup(nir, "passthrough");
+ nir->info.outputs_written = key->outputs_written;
+ nir->info.tess.tcs_vertices_out = key->input_vertices;
+ nir->info.name = ralloc_strdup(nir, "passthrough");
nir->num_uniforms = 8 * sizeof(uint32_t);
var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_0");
@@ -81,7 +81,7 @@ create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compiler,
}
/* Copy inputs to outputs. */
- uint64_t varyings = nir->info->inputs_read;
+ uint64_t varyings = nir->info.inputs_read;
while (varyings != 0) {
const int varying = ffsll(varyings) - 1;
@@ -394,8 +394,8 @@ brw_tcs_precompile(struct gl_context *ctx,
key.tes_primitive_mode = GL_TRIANGLES;
}
- key.outputs_written = prog->nir->info->outputs_written;
- key.patch_outputs_written = prog->nir->info->patch_outputs_written;
+ key.outputs_written = prog->nir->info.outputs_written;
+ key.patch_outputs_written = prog->nir->info.patch_outputs_written;
success = brw_codegen_tcs_prog(brw, btcp, btep, &key);
diff --git a/src/mesa/drivers/dri/i965/brw_tes.c b/src/mesa/drivers/dri/i965/brw_tes.c
index 449f946d854..372ef516a84 100644
--- a/src/mesa/drivers/dri/i965/brw_tes.c
+++ b/src/mesa/drivers/dri/i965/brw_tes.c
@@ -234,15 +234,15 @@ brw_tes_precompile(struct gl_context *ctx,
memset(&key, 0, sizeof(key));
key.program_string_id = btep->id;
- key.inputs_read = prog->nir->info->inputs_read;
- key.patch_inputs_read = prog->nir->info->patch_inputs_read;
+ key.inputs_read = prog->nir->info.inputs_read;
+ key.patch_inputs_read = prog->nir->info.patch_inputs_read;
if (shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL]) {
struct gl_program *tcp =
shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL]->Program;
- key.inputs_read |= tcp->nir->info->outputs_written &
+ key.inputs_read |= tcp->nir->info.outputs_written &
~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
- key.patch_inputs_read |= tcp->nir->info->patch_outputs_written;
+ key.patch_inputs_read |= tcp->nir->info.patch_outputs_written;
}
brw_setup_tex_for_precompile(brw, &key.tex, prog);
diff --git a/src/mesa/drivers/dri/i965/brw_vs.c b/src/mesa/drivers/dri/i965/brw_vs.c
index 74b07cb3ccc..b1ea01a9add 100644
--- a/src/mesa/drivers/dri/i965/brw_vs.c
+++ b/src/mesa/drivers/dri/i965/brw_vs.c
@@ -219,7 +219,7 @@ brw_codegen_vs_prog(struct brw_context *brw,
brw_compute_vue_map(devinfo,
&prog_data.base.vue_map, outputs_written,
- vp->program.nir->info->separate_shader);
+ vp->program.nir->info.separate_shader);
if (0) {
_mesa_fprint_program_opt(stderr, &vp->program, PROG_PRINT_DEBUG, true);
diff --git a/src/mesa/drivers/dri/i965/brw_wm.c b/src/mesa/drivers/dri/i965/brw_wm.c
index 59d503e746b..6fac3c4a849 100644
--- a/src/mesa/drivers/dri/i965/brw_wm.c
+++ b/src/mesa/drivers/dri/i965/brw_wm.c
@@ -58,7 +58,7 @@ assign_fs_binding_table_offsets(const struct gen_device_info *devinfo,
brw_assign_common_binding_table_offsets(devinfo, prog, &prog_data->base,
next_binding_table_offset);
- if (prog->nir->info->outputs_read && !key->coherent_fb_fetch) {
+ if (prog->nir->info.outputs_read && !key->coherent_fb_fetch) {
prog_data->binding_table.render_target_read_start =
next_binding_table_offset;
next_binding_table_offset += key->nr_color_regions;
@@ -335,7 +335,7 @@ brw_populate_sampler_prog_key_data(struct gl_context *ctx,
}
/* gather4 for RG32* is broken in multiple ways on Gen7. */
- if (brw->gen == 7 && prog->nir->info->uses_texture_gather) {
+ if (brw->gen == 7 && prog->nir->info.uses_texture_gather) {
switch (img->InternalFormat) {
case GL_RG32I:
case GL_RG32UI: {
@@ -373,7 +373,7 @@ brw_populate_sampler_prog_key_data(struct gl_context *ctx,
/* Gen6's gather4 is broken for UINT/SINT; we treat them as
* UNORM/FLOAT instead and fix it in the shader.
*/
- if (brw->gen == 6 && prog->nir->info->uses_texture_gather) {
+ if (brw->gen == 6 && prog->nir->info.uses_texture_gather) {
key->gen6_gather_wa[s] = gen6_gather_workaround(img->InternalFormat);
}
diff --git a/src/mesa/drivers/dri/i965/brw_wm_surface_state.c b/src/mesa/drivers/dri/i965/brw_wm_surface_state.c
index 49383c7463b..c95fb3739b3 100644
--- a/src/mesa/drivers/dri/i965/brw_wm_surface_state.c
+++ b/src/mesa/drivers/dri/i965/brw_wm_surface_state.c
@@ -1303,15 +1303,15 @@ brw_update_texture_surfaces(struct brw_context *brw)
* allows the surface format to be overriden for only the
* gather4 messages. */
if (brw->gen < 8) {
- if (vs && vs->nir->info->uses_texture_gather)
+ if (vs && vs->nir->info.uses_texture_gather)
update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
- if (tcs && tcs->nir->info->uses_texture_gather)
+ if (tcs && tcs->nir->info.uses_texture_gather)
update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
- if (tes && tes->nir->info->uses_texture_gather)
+ if (tes && tes->nir->info.uses_texture_gather)
update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
- if (gs && gs->nir->info->uses_texture_gather)
+ if (gs && gs->nir->info.uses_texture_gather)
update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
- if (fs && fs->nir->info->uses_texture_gather)
+ if (fs && fs->nir->info.uses_texture_gather)
update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
}
@@ -1356,7 +1356,7 @@ brw_update_cs_texture_surfaces(struct brw_context *brw)
* gather4 messages.
*/
if (brw->gen < 8) {
- if (cs && cs->nir->info->uses_texture_gather)
+ if (cs && cs->nir->info.uses_texture_gather)
update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
}
diff --git a/src/mesa/program/prog_to_nir.c b/src/mesa/program/prog_to_nir.c
index a1c5ba6ac20..851b3f2e5ea 100644
--- a/src/mesa/program/prog_to_nir.c
+++ b/src/mesa/program/prog_to_nir.c
@@ -1018,10 +1018,8 @@ prog_to_nir(const struct gl_program *prog,
nir_builder_init_simple_shader(&c->build, NULL, stage, options);
- /* Use the shader_info from gl_program rather than the one nir_builder
- * created for us. nir_sweep should clean up the other one for us.
- */
- c->build.shader->info = (shader_info *) &prog->info;
+ /* Copy the shader_info from the gl_program */
+ c->build.shader->info = prog->info;
s = c->build.shader;
@@ -1048,16 +1046,16 @@ prog_to_nir(const struct gl_program *prog,
ptn_add_output_stores(c);
- s->info->name = ralloc_asprintf(s, "ARB%d", prog->Id);
- s->info->num_textures = util_last_bit(prog->SamplersUsed);
- s->info->num_ubos = 0;
- s->info->num_abos = 0;
- s->info->num_ssbos = 0;
- s->info->num_images = 0;
- s->info->uses_texture_gather = false;
- s->info->clip_distance_array_size = 0;
- s->info->cull_distance_array_size = 0;
- s->info->separate_shader = false;
+ s->info.name = ralloc_asprintf(s, "ARB%d", prog->Id);
+ s->info.num_textures = util_last_bit(prog->SamplersUsed);
+ s->info.num_ubos = 0;
+ s->info.num_abos = 0;
+ s->info.num_ssbos = 0;
+ s->info.num_images = 0;
+ s->info.uses_texture_gather = false;
+ s->info.clip_distance_array_size = 0;
+ s->info.cull_distance_array_size = 0;
+ s->info.separate_shader = false;
fail:
if (c->error) {