summaryrefslogtreecommitdiffstats
path: root/src/intel/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'src/intel/compiler')
-rw-r--r--src/intel/compiler/brw_fs.cpp86
-rw-r--r--src/intel/compiler/brw_fs_nir.cpp18
-rw-r--r--src/intel/compiler/brw_fs_visitor.cpp10
-rw-r--r--src/intel/compiler/brw_nir.c8
-rw-r--r--src/intel/compiler/brw_nir_intrinsics.c4
-rw-r--r--src/intel/compiler/brw_shader.cpp28
-rw-r--r--src/intel/compiler/brw_vec4.cpp20
-rw-r--r--src/intel/compiler/brw_vec4_generator.cpp4
-rw-r--r--src/intel/compiler/brw_vec4_gs_visitor.cpp36
-rw-r--r--src/intel/compiler/brw_vec4_nir.cpp8
-rw-r--r--src/intel/compiler/brw_vec4_tcs.cpp28
-rw-r--r--src/intel/compiler/brw_wm_iz.cpp2
-rw-r--r--src/intel/compiler/gen6_gs_visitor.cpp12
13 files changed, 132 insertions, 132 deletions
diff --git a/src/intel/compiler/brw_fs.cpp b/src/intel/compiler/brw_fs.cpp
index 4dcdc1b46de..329c15b8b0b 100644
--- a/src/intel/compiler/brw_fs.cpp
+++ b/src/intel/compiler/brw_fs.cpp
@@ -1433,7 +1433,7 @@ fs_visitor::calculate_urb_setup()
int urb_next = 0;
/* Figure out where each of the incoming setup attributes lands. */
if (devinfo->gen >= 6) {
- if (_mesa_bitcount_64(nir->info->inputs_read &
+ if (_mesa_bitcount_64(nir->info.inputs_read &
BRW_FS_VARYING_INPUT_MASK) <= 16) {
/* The SF/SBE pipeline stage can do arbitrary rearrangement of the
* first 16 varying inputs, so we can put them wherever we want.
@@ -1445,14 +1445,14 @@ fs_visitor::calculate_urb_setup()
* a different vertex (or geometry) shader.
*/
for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
- if (nir->info->inputs_read & BRW_FS_VARYING_INPUT_MASK &
+ if (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
BITFIELD64_BIT(i)) {
prog_data->urb_setup[i] = urb_next++;
}
}
} else {
bool include_vue_header =
- nir->info->inputs_read & (VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT);
+ nir->info.inputs_read & (VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT);
/* We have enough input varyings that the SF/SBE pipeline stage can't
* arbitrarily rearrange them to suit our whim; we have to put them
@@ -1462,7 +1462,7 @@ fs_visitor::calculate_urb_setup()
struct brw_vue_map prev_stage_vue_map;
brw_compute_vue_map(devinfo, &prev_stage_vue_map,
key->input_slots_valid,
- nir->info->separate_shader);
+ nir->info.separate_shader);
int first_slot =
include_vue_header ? 0 : 2 * BRW_SF_URB_ENTRY_READ_OFFSET;
@@ -1471,7 +1471,7 @@ fs_visitor::calculate_urb_setup()
slot++) {
int varying = prev_stage_vue_map.slot_to_varying[slot];
if (varying != BRW_VARYING_SLOT_PAD &&
- (nir->info->inputs_read & BRW_FS_VARYING_INPUT_MASK &
+ (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
BITFIELD64_BIT(varying))) {
prog_data->urb_setup[varying] = slot - first_slot;
}
@@ -1504,7 +1504,7 @@ fs_visitor::calculate_urb_setup()
*
* See compile_sf_prog() for more info.
*/
- if (nir->info->inputs_read & BITFIELD64_BIT(VARYING_SLOT_PNTC))
+ if (nir->info.inputs_read & BITFIELD64_BIT(VARYING_SLOT_PNTC))
prog_data->urb_setup[VARYING_SLOT_PNTC] = urb_next++;
}
@@ -1631,7 +1631,7 @@ fs_visitor::assign_gs_urb_setup()
struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
first_non_payload_grf +=
- 8 * vue_prog_data->urb_read_length * nir->info->gs.vertices_in;
+ 8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in;
foreach_block_and_inst(block, fs_inst, inst, cfg) {
/* Rewrite all ATTR file references to GRFs. */
@@ -5456,7 +5456,7 @@ fs_visitor::setup_fs_payload_gen6()
/* R27: interpolated depth if uses source depth */
prog_data->uses_src_depth =
- (nir->info->inputs_read & (1 << VARYING_SLOT_POS)) != 0;
+ (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
if (prog_data->uses_src_depth) {
payload.source_depth_reg = payload.num_regs;
payload.num_regs++;
@@ -5468,7 +5468,7 @@ fs_visitor::setup_fs_payload_gen6()
/* R29: interpolated W set if GEN6_WM_USES_SOURCE_W. */
prog_data->uses_src_w =
- (nir->info->inputs_read & (1 << VARYING_SLOT_POS)) != 0;
+ (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
if (prog_data->uses_src_w) {
payload.source_w_reg = payload.num_regs;
payload.num_regs++;
@@ -5480,7 +5480,7 @@ fs_visitor::setup_fs_payload_gen6()
/* R31: MSAA position offsets. */
if (prog_data->persample_dispatch &&
- (nir->info->system_values_read & SYSTEM_BIT_SAMPLE_POS)) {
+ (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS)) {
/* From the Ivy Bridge PRM documentation for 3DSTATE_PS:
*
* "MSDISPMODE_PERSAMPLE is required in order to select
@@ -5497,7 +5497,7 @@ fs_visitor::setup_fs_payload_gen6()
/* R32: MSAA input coverage mask */
prog_data->uses_sample_mask =
- (nir->info->system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
+ (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
if (prog_data->uses_sample_mask) {
assert(devinfo->gen >= 7);
payload.sample_mask_in_reg = payload.num_regs;
@@ -5511,7 +5511,7 @@ fs_visitor::setup_fs_payload_gen6()
/* R34-: bary for 32-pixel. */
/* R58-59: interp W for 32-pixel. */
- if (nir->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
+ if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
source_depth_to_render_target = true;
}
}
@@ -5548,15 +5548,15 @@ fs_visitor::setup_gs_payload()
* Note that the GS reads <URB Read Length> HWords for every vertex - so we
* have to multiply by VerticesIn to obtain the total storage requirement.
*/
- if (8 * vue_prog_data->urb_read_length * nir->info->gs.vertices_in >
+ if (8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in >
max_push_components || gs_prog_data->invocations > 1) {
gs_prog_data->base.include_vue_handles = true;
/* R3..RN: ICP Handles for each incoming vertex (when using pull model) */
- payload.num_regs += nir->info->gs.vertices_in;
+ payload.num_regs += nir->info.gs.vertices_in;
vue_prog_data->urb_read_length =
- ROUND_DOWN_TO(max_push_components / nir->info->gs.vertices_in, 8) / 8;
+ ROUND_DOWN_TO(max_push_components / nir->info.gs.vertices_in, 8) / 8;
}
}
@@ -5657,7 +5657,7 @@ fs_visitor::optimize()
if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
char filename[64]; \
snprintf(filename, 64, "%s%d-%s-%02d-%02d-" #pass, \
- stage_abbrev, dispatch_width, nir->info->name, iteration, pass_num); \
+ stage_abbrev, dispatch_width, nir->info.name, iteration, pass_num); \
\
backend_shader::dump_instructions(filename); \
} \
@@ -5671,7 +5671,7 @@ fs_visitor::optimize()
if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
char filename[64];
snprintf(filename, 64, "%s%d-%s-00-00-start",
- stage_abbrev, dispatch_width, nir->info->name);
+ stage_abbrev, dispatch_width, nir->info.name);
backend_shader::dump_instructions(filename);
}
@@ -5968,15 +5968,15 @@ fs_visitor::run_tcs_single_patch()
}
/* Fix the disptach mask */
- if (nir->info->tess.tcs_vertices_out % 8) {
+ if (nir->info.tess.tcs_vertices_out % 8) {
bld.CMP(bld.null_reg_ud(), invocation_id,
- brw_imm_ud(nir->info->tess.tcs_vertices_out), BRW_CONDITIONAL_L);
+ brw_imm_ud(nir->info.tess.tcs_vertices_out), BRW_CONDITIONAL_L);
bld.IF(BRW_PREDICATE_NORMAL);
}
emit_nir_code();
- if (nir->info->tess.tcs_vertices_out % 8) {
+ if (nir->info.tess.tcs_vertices_out % 8) {
bld.emit(BRW_OPCODE_ENDIF);
}
@@ -6119,8 +6119,8 @@ fs_visitor::run_fs(bool allow_spilling, bool do_rep_send)
emit_shader_time_begin();
calculate_urb_setup();
- if (nir->info->inputs_read > 0 ||
- (nir->info->outputs_read > 0 && !wm_key->coherent_fb_fetch)) {
+ if (nir->info.inputs_read > 0 ||
+ (nir->info.outputs_read > 0 && !wm_key->coherent_fb_fetch)) {
if (devinfo->gen < 6)
emit_interpolation_setup_gen4();
else
@@ -6284,8 +6284,8 @@ brw_compute_flat_inputs(struct brw_wm_prog_data *prog_data,
static uint8_t
computed_depth_mode(const nir_shader *shader)
{
- if (shader->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
- switch (shader->info->fs.depth_layout) {
+ if (shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
+ switch (shader->info.fs.depth_layout) {
case FRAG_DEPTH_LAYOUT_NONE:
case FRAG_DEPTH_LAYOUT_ANY:
return BRW_PSCDEPTH_ON;
@@ -6465,25 +6465,25 @@ brw_compile_fs(const struct brw_compiler *compiler, void *log_data,
/* key->alpha_test_func means simulating alpha testing via discards,
* so the shader definitely kills pixels.
*/
- prog_data->uses_kill = shader->info->fs.uses_discard ||
+ prog_data->uses_kill = shader->info.fs.uses_discard ||
key->alpha_test_func;
prog_data->uses_omask = key->multisample_fbo &&
- shader->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
+ shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
prog_data->computed_depth_mode = computed_depth_mode(shader);
prog_data->computed_stencil =
- shader->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
+ shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
prog_data->persample_dispatch =
key->multisample_fbo &&
(key->persample_interp ||
- (shader->info->system_values_read & (SYSTEM_BIT_SAMPLE_ID |
+ (shader->info.system_values_read & (SYSTEM_BIT_SAMPLE_ID |
SYSTEM_BIT_SAMPLE_POS)) ||
- shader->info->fs.uses_sample_qualifier ||
- shader->info->outputs_read);
+ shader->info.fs.uses_sample_qualifier ||
+ shader->info.outputs_read);
- prog_data->early_fragment_tests = shader->info->fs.early_fragment_tests;
- prog_data->post_depth_coverage = shader->info->fs.post_depth_coverage;
- prog_data->inner_coverage = shader->info->fs.inner_coverage;
+ prog_data->early_fragment_tests = shader->info.fs.early_fragment_tests;
+ prog_data->post_depth_coverage = shader->info.fs.post_depth_coverage;
+ prog_data->inner_coverage = shader->info.fs.inner_coverage;
prog_data->barycentric_interp_modes =
brw_compute_barycentric_interp_modes(compiler->devinfo, shader);
@@ -6566,9 +6566,9 @@ brw_compile_fs(const struct brw_compiler *compiler, void *log_data,
if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
g.enable_debug(ralloc_asprintf(mem_ctx, "%s fragment shader %s",
- shader->info->label ?
- shader->info->label : "unnamed",
- shader->info->name));
+ shader->info.label ?
+ shader->info.label : "unnamed",
+ shader->info.name));
}
if (simd8_cfg) {
@@ -6700,12 +6700,12 @@ brw_compile_cs(const struct brw_compiler *compiler, void *log_data,
brw_nir_lower_intrinsics(shader, &prog_data->base);
shader = brw_postprocess_nir(shader, compiler, true);
- prog_data->local_size[0] = shader->info->cs.local_size[0];
- prog_data->local_size[1] = shader->info->cs.local_size[1];
- prog_data->local_size[2] = shader->info->cs.local_size[2];
+ prog_data->local_size[0] = shader->info.cs.local_size[0];
+ prog_data->local_size[1] = shader->info.cs.local_size[1];
+ prog_data->local_size[2] = shader->info.cs.local_size[2];
unsigned local_workgroup_size =
- shader->info->cs.local_size[0] * shader->info->cs.local_size[1] *
- shader->info->cs.local_size[2];
+ shader->info.cs.local_size[0] * shader->info.cs.local_size[1] *
+ shader->info.cs.local_size[2];
unsigned max_cs_threads = compiler->devinfo->max_cs_threads;
unsigned simd_required = DIV_ROUND_UP(local_workgroup_size, max_cs_threads);
@@ -6795,9 +6795,9 @@ brw_compile_cs(const struct brw_compiler *compiler, void *log_data,
MESA_SHADER_COMPUTE);
if (INTEL_DEBUG & DEBUG_CS) {
char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
- shader->info->label ? shader->info->label :
+ shader->info.label ? shader->info.label :
"unnamed",
- shader->info->name);
+ shader->info.name);
g.enable_debug(name);
}
diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp
index 23cd4b73f07..2ea94ab6939 100644
--- a/src/intel/compiler/brw_fs_nir.cpp
+++ b/src/intel/compiler/brw_fs_nir.cpp
@@ -1853,7 +1853,7 @@ fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
* be recorded by transform feedback, we can simply discard all geometry
* bound to these streams when transform feedback is disabled.
*/
- if (stream_id > 0 && !nir->info->has_transform_feedback_varyings)
+ if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
return;
/* If we're outputting 32 control data bits or less, then we can wait
@@ -2008,12 +2008,12 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst,
/* Use first_icp_handle as the base offset. There is one register
* of URB handles per vertex, so inform the register allocator that
- * we might read up to nir->info->gs.vertices_in registers.
+ * we might read up to nir->info.gs.vertices_in registers.
*/
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
fs_reg(icp_offset_bytes),
- brw_imm_ud(nir->info->gs.vertices_in * REG_SIZE));
+ brw_imm_ud(nir->info.gs.vertices_in * REG_SIZE));
}
} else {
assert(gs_prog_data->invocations > 1);
@@ -2039,12 +2039,12 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst,
/* Use first_icp_handle as the base offset. There is one DWord
* of URB handles per vertex, so inform the register allocator that
- * we might read up to ceil(nir->info->gs.vertices_in / 8) registers.
+ * we might read up to ceil(nir->info.gs.vertices_in / 8) registers.
*/
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
fs_reg(icp_offset_bytes),
- brw_imm_ud(DIV_ROUND_UP(nir->info->gs.vertices_in, 8) *
+ brw_imm_ud(DIV_ROUND_UP(nir->info.gs.vertices_in, 8) *
REG_SIZE));
}
}
@@ -3849,7 +3849,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
*/
brw_mark_surface_used(prog_data,
stage_prog_data->binding_table.ubo_start +
- nir->info->num_ubos - 1);
+ nir->info.num_ubos - 1);
}
nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
@@ -3919,7 +3919,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
*/
brw_mark_surface_used(prog_data,
stage_prog_data->binding_table.ssbo_start +
- nir->info->num_ssbos - 1);
+ nir->info.num_ssbos - 1);
}
fs_reg offset_reg;
@@ -3959,7 +3959,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
brw_mark_surface_used(prog_data,
stage_prog_data->binding_table.ssbo_start +
- nir->info->num_ssbos - 1);
+ nir->info.num_ssbos - 1);
}
/* Value */
@@ -4171,7 +4171,7 @@ fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
*/
brw_mark_surface_used(prog_data,
stage_prog_data->binding_table.ssbo_start +
- nir->info->num_ssbos - 1);
+ nir->info.num_ssbos - 1);
}
fs_reg offset = get_nir_src(instr->src[1]);
diff --git a/src/intel/compiler/brw_fs_visitor.cpp b/src/intel/compiler/brw_fs_visitor.cpp
index cea38d86237..cd411481d84 100644
--- a/src/intel/compiler/brw_fs_visitor.cpp
+++ b/src/intel/compiler/brw_fs_visitor.cpp
@@ -36,7 +36,7 @@ fs_reg *
fs_visitor::emit_vs_system_value(int location)
{
fs_reg *reg = new(this->mem_ctx)
- fs_reg(ATTR, 4 * _mesa_bitcount_64(nir->info->inputs_read),
+ fs_reg(ATTR, 4 * _mesa_bitcount_64(nir->info.inputs_read),
BRW_REGISTER_TYPE_D);
struct brw_vs_prog_data *vs_prog_data = brw_vs_prog_data(prog_data);
@@ -60,7 +60,7 @@ fs_visitor::emit_vs_system_value(int location)
vs_prog_data->uses_instanceid = true;
break;
case SYSTEM_VALUE_DRAW_ID:
- if (nir->info->system_values_read &
+ if (nir->info.system_values_read &
(BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX) |
BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
@@ -414,13 +414,13 @@ fs_visitor::emit_single_fb_write(const fs_builder &bld,
fs_reg src_depth, src_stencil;
if (source_depth_to_render_target) {
- if (nir->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
+ if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
src_depth = frag_depth;
else
src_depth = fs_reg(brw_vec8_grf(payload.source_depth_reg, 0));
}
- if (nir->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL))
+ if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL))
src_stencil = frag_stencil;
const fs_reg sources[] = {
@@ -459,7 +459,7 @@ fs_visitor::emit_fb_writes()
limit_dispatch_width(8, "Depth writes unsupported in SIMD16+ mode.\n");
}
- if (nir->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
+ if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
/* From the 'Render Target Write message' section of the docs:
* "Output Stencil is not supported with SIMD16 Render Target Write
* Messages."
diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c
index 3c0a7ced572..1bd6d02aaef 100644
--- a/src/intel/compiler/brw_nir.c
+++ b/src/intel/compiler/brw_nir.c
@@ -199,8 +199,8 @@ remap_patch_urb_offsets(nir_block *block, nir_builder *b,
const struct brw_vue_map *vue_map,
GLenum tes_primitive_mode)
{
- const bool is_passthrough_tcs = b->shader->info->name &&
- strcmp(b->shader->info->name, "passthrough") == 0;
+ const bool is_passthrough_tcs = b->shader->info.name &&
+ strcmp(b->shader->info.name, "passthrough") == 0;
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
@@ -283,7 +283,7 @@ brw_nir_lower_vs_inputs(nir_shader *nir,
nir_foreach_function(function, nir) {
if (function->impl) {
nir_foreach_block(block, function->impl) {
- remap_vs_attrs(block, nir->info);
+ remap_vs_attrs(block, &nir->info);
}
}
}
@@ -337,7 +337,7 @@ brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
nir_builder_init(&b, function->impl);
nir_foreach_block(block, function->impl) {
remap_patch_urb_offsets(block, &b, vue_map,
- nir->info->tess.primitive_mode);
+ nir->info.tess.primitive_mode);
}
}
}
diff --git a/src/intel/compiler/brw_nir_intrinsics.c b/src/intel/compiler/brw_nir_intrinsics.c
index 901a1fb0ab9..d63570fa2a7 100644
--- a/src/intel/compiler/brw_nir_intrinsics.c
+++ b/src/intel/compiler/brw_nir_intrinsics.c
@@ -41,7 +41,7 @@ read_thread_local_id(struct lower_intrinsics_state *state)
{
nir_builder *b = &state->builder;
nir_shader *nir = state->nir;
- const unsigned *sizes = nir->info->cs.local_size;
+ const unsigned *sizes = nir->info.cs.local_size;
const unsigned group_size = sizes[0] * sizes[1] * sizes[2];
/* Some programs have local_size dimensions so small that the thread local
@@ -111,7 +111,7 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
* (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
* gl_WorkGroupSize.z;
*/
- unsigned *size = nir->info->cs.local_size;
+ unsigned *size = nir->info.cs.local_size;
nir_ssa_def *local_index = nir_load_local_invocation_index(b);
diff --git a/src/intel/compiler/brw_shader.cpp b/src/intel/compiler/brw_shader.cpp
index 304b4ecf4fa..269b8a099a4 100644
--- a/src/intel/compiler/brw_shader.cpp
+++ b/src/intel/compiler/brw_shader.cpp
@@ -1168,8 +1168,8 @@ brw_compile_tes(const struct brw_compiler *compiler,
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_EVAL];
nir_shader *nir = nir_shader_clone(mem_ctx, src_shader);
- nir->info->inputs_read = key->inputs_read;
- nir->info->patch_inputs_read = key->patch_inputs_read;
+ nir->info.inputs_read = key->inputs_read;
+ nir->info.patch_inputs_read = key->patch_inputs_read;
nir = brw_nir_apply_sampler_key(nir, compiler, &key->tex, is_scalar);
brw_nir_lower_tes_inputs(nir, input_vue_map);
@@ -1177,8 +1177,8 @@ brw_compile_tes(const struct brw_compiler *compiler,
nir = brw_postprocess_nir(nir, compiler, is_scalar);
brw_compute_vue_map(devinfo, &prog_data->base.vue_map,
- nir->info->outputs_written,
- nir->info->separate_shader);
+ nir->info.outputs_written,
+ nir->info.separate_shader);
unsigned output_size_bytes = prog_data->base.vue_map.num_slots * 4 * 4;
@@ -1190,10 +1190,10 @@ brw_compile_tes(const struct brw_compiler *compiler,
}
prog_data->base.clip_distance_mask =
- ((1 << nir->info->clip_distance_array_size) - 1);
+ ((1 << nir->info.clip_distance_array_size) - 1);
prog_data->base.cull_distance_mask =
- ((1 << nir->info->cull_distance_array_size) - 1) <<
- nir->info->clip_distance_array_size;
+ ((1 << nir->info.cull_distance_array_size) - 1) <<
+ nir->info.clip_distance_array_size;
/* URB entry sizes are stored as a multiple of 64 bytes. */
prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
@@ -1206,9 +1206,9 @@ brw_compile_tes(const struct brw_compiler *compiler,
TESS_SPACING_FRACTIONAL_EVEN - 1);
prog_data->partitioning =
- (enum brw_tess_partitioning) (nir->info->tess.spacing - 1);
+ (enum brw_tess_partitioning) (nir->info.tess.spacing - 1);
- switch (nir->info->tess.primitive_mode) {
+ switch (nir->info.tess.primitive_mode) {
case GL_QUADS:
prog_data->domain = BRW_TESS_DOMAIN_QUAD;
break;
@@ -1222,14 +1222,14 @@ brw_compile_tes(const struct brw_compiler *compiler,
unreachable("invalid domain shader primitive mode");
}
- if (nir->info->tess.point_mode) {
+ if (nir->info.tess.point_mode) {
prog_data->output_topology = BRW_TESS_OUTPUT_TOPOLOGY_POINT;
- } else if (nir->info->tess.primitive_mode == GL_ISOLINES) {
+ } else if (nir->info.tess.primitive_mode == GL_ISOLINES) {
prog_data->output_topology = BRW_TESS_OUTPUT_TOPOLOGY_LINE;
} else {
/* Hardware winding order is backwards from OpenGL */
prog_data->output_topology =
- nir->info->tess.ccw ? BRW_TESS_OUTPUT_TOPOLOGY_TRI_CW
+ nir->info.tess.ccw ? BRW_TESS_OUTPUT_TOPOLOGY_TRI_CW
: BRW_TESS_OUTPUT_TOPOLOGY_TRI_CCW;
}
@@ -1259,9 +1259,9 @@ brw_compile_tes(const struct brw_compiler *compiler,
if (unlikely(INTEL_DEBUG & DEBUG_TES)) {
g.enable_debug(ralloc_asprintf(mem_ctx,
"%s tessellation evaluation shader %s",
- nir->info->label ? nir->info->label
+ nir->info.label ? nir->info.label
: "unnamed",
- nir->info->name));
+ nir->info.name));
}
g.generate_code(v.cfg, 8);
diff --git a/src/intel/compiler/brw_vec4.cpp b/src/intel/compiler/brw_vec4.cpp
index 70487d3c151..9f280840091 100644
--- a/src/intel/compiler/brw_vec4.cpp
+++ b/src/intel/compiler/brw_vec4.cpp
@@ -2620,7 +2620,7 @@ vec4_visitor::run()
if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
char filename[64]; \
snprintf(filename, 64, "%s-%s-%02d-%02d-" #pass, \
- stage_abbrev, nir->info->name, iteration, pass_num); \
+ stage_abbrev, nir->info.name, iteration, pass_num); \
\
backend_shader::dump_instructions(filename); \
} \
@@ -2633,7 +2633,7 @@ vec4_visitor::run()
if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
char filename[64];
snprintf(filename, 64, "%s-%s-00-00-start",
- stage_abbrev, nir->info->name);
+ stage_abbrev, nir->info.name);
backend_shader::dump_instructions(filename);
}
@@ -2779,17 +2779,17 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
const unsigned *assembly = NULL;
prog_data->base.clip_distance_mask =
- ((1 << shader->info->clip_distance_array_size) - 1);
+ ((1 << shader->info.clip_distance_array_size) - 1);
prog_data->base.cull_distance_mask =
- ((1 << shader->info->cull_distance_array_size) - 1) <<
- shader->info->clip_distance_array_size;
+ ((1 << shader->info.cull_distance_array_size) - 1) <<
+ shader->info.clip_distance_array_size;
unsigned nr_attribute_slots = _mesa_bitcount_64(prog_data->inputs_read);
/* gl_VertexID and gl_InstanceID are system values, but arrive via an
* incoming vertex attribute. So, add an extra slot.
*/
- if (shader->info->system_values_read &
+ if (shader->info.system_values_read &
(BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX) |
BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
@@ -2798,13 +2798,13 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
}
/* gl_DrawID has its very own vec4 */
- if (shader->info->system_values_read &
+ if (shader->info.system_values_read &
BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID)) {
nr_attribute_slots++;
}
unsigned nr_attributes = nr_attribute_slots -
- DIV_ROUND_UP(_mesa_bitcount_64(shader->info->double_inputs_read), 2);
+ DIV_ROUND_UP(_mesa_bitcount_64(shader->info.double_inputs_read), 2);
/* The 3DSTATE_VS documentation lists the lower bound on "Vertex URB Entry
* Read Length" as 1 in vec4 mode, and 0 in SIMD8 mode. Empirically, in
@@ -2858,9 +2858,9 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
if (INTEL_DEBUG & DEBUG_VS) {
const char *debug_name =
ralloc_asprintf(mem_ctx, "%s vertex shader %s",
- shader->info->label ? shader->info->label :
+ shader->info.label ? shader->info.label :
"unnamed",
- shader->info->name);
+ shader->info.name);
g.enable_debug(debug_name);
}
diff --git a/src/intel/compiler/brw_vec4_generator.cpp b/src/intel/compiler/brw_vec4_generator.cpp
index 753b00c4ed1..8505f693499 100644
--- a/src/intel/compiler/brw_vec4_generator.cpp
+++ b/src/intel/compiler/brw_vec4_generator.cpp
@@ -2192,8 +2192,8 @@ generate_code(struct brw_codegen *p,
if (unlikely(debug_flag)) {
fprintf(stderr, "Native code for %s %s shader %s:\n",
- nir->info->label ? nir->info->label : "unnamed",
- _mesa_shader_stage_to_string(nir->stage), nir->info->name);
+ nir->info.label ? nir->info.label : "unnamed",
+ _mesa_shader_stage_to_string(nir->stage), nir->info.name);
fprintf(stderr, "%s vec4 shader: %d instructions. %d loops. %u cycles. %d:%d "
"spills:fills. Compacted %d to %d bytes (%.0f%%)\n",
diff --git a/src/intel/compiler/brw_vec4_gs_visitor.cpp b/src/intel/compiler/brw_vec4_gs_visitor.cpp
index 4a8b5be30e1..9793ef50125 100644
--- a/src/intel/compiler/brw_vec4_gs_visitor.cpp
+++ b/src/intel/compiler/brw_vec4_gs_visitor.cpp
@@ -85,7 +85,7 @@ vec4_gs_visitor::setup_varying_inputs(int payload_reg, int *attribute_map,
* so the total number of input slots that will be delivered to the GS (and
* thus the stride of the input arrays) is urb_read_length * 2.
*/
- const unsigned num_input_vertices = nir->info->gs.vertices_in;
+ const unsigned num_input_vertices = nir->info.gs.vertices_in;
assert(num_input_vertices <= MAX_GS_INPUT_VERTICES);
unsigned input_array_stride = prog_data->urb_read_length * 2;
@@ -455,7 +455,7 @@ vec4_gs_visitor::gs_emit_vertex(int stream_id)
* be recorded by transform feedback, we can simply discard all geometry
* bound to these streams when transform feedback is disabled.
*/
- if (stream_id > 0 && !nir->info->has_transform_feedback_varyings)
+ if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
return;
/* If we're outputting 32 control data bits or less, then we can wait
@@ -628,10 +628,10 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
* For SSO pipelines, we use a fixed VUE map layout based on variable
* locations, so we can rely on rendezvous-by-location making this work.
*/
- GLbitfield64 inputs_read = shader->info->inputs_read;
+ GLbitfield64 inputs_read = shader->info.inputs_read;
brw_compute_vue_map(compiler->devinfo,
&c.input_vue_map, inputs_read,
- shader->info->separate_shader);
+ shader->info.separate_shader);
shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, is_scalar);
brw_nir_lower_vue_inputs(shader, is_scalar, &c.input_vue_map);
@@ -639,21 +639,21 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
shader = brw_postprocess_nir(shader, compiler, is_scalar);
prog_data->base.clip_distance_mask =
- ((1 << shader->info->clip_distance_array_size) - 1);
+ ((1 << shader->info.clip_distance_array_size) - 1);
prog_data->base.cull_distance_mask =
- ((1 << shader->info->cull_distance_array_size) - 1) <<
- shader->info->clip_distance_array_size;
+ ((1 << shader->info.cull_distance_array_size) - 1) <<
+ shader->info.clip_distance_array_size;
prog_data->include_primitive_id =
- (shader->info->system_values_read & (1 << SYSTEM_VALUE_PRIMITIVE_ID)) != 0;
+ (shader->info.system_values_read & (1 << SYSTEM_VALUE_PRIMITIVE_ID)) != 0;
- prog_data->invocations = shader->info->gs.invocations;
+ prog_data->invocations = shader->info.gs.invocations;
if (compiler->devinfo->gen >= 8)
prog_data->static_vertex_count = nir_gs_count_vertices(shader);
if (compiler->devinfo->gen >= 7) {
- if (shader->info->gs.output_primitive == GL_POINTS) {
+ if (shader->info.gs.output_primitive == GL_POINTS) {
/* When the output type is points, the geometry shader may output data
* to multiple streams, and EndPrimitive() has no effect. So we
* configure the hardware to interpret the control data as stream ID.
@@ -678,14 +678,14 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
* EndPrimitive().
*/
c.control_data_bits_per_vertex =
- shader->info->gs.uses_end_primitive ? 1 : 0;
+ shader->info.gs.uses_end_primitive ? 1 : 0;
}
} else {
/* There are no control data bits in gen6. */
c.control_data_bits_per_vertex = 0;
}
c.control_data_header_size_bits =
- shader->info->gs.vertices_out * c.control_data_bits_per_vertex;
+ shader->info.gs.vertices_out * c.control_data_bits_per_vertex;
/* 1 HWORD = 32 bytes = 256 bits */
prog_data->control_data_header_size_hwords =
@@ -780,7 +780,7 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
unsigned output_size_bytes;
if (compiler->devinfo->gen >= 7) {
output_size_bytes =
- prog_data->output_vertex_size_hwords * 32 * shader->info->gs.vertices_out;
+ prog_data->output_vertex_size_hwords * 32 * shader->info.gs.vertices_out;
output_size_bytes += 32 * prog_data->control_data_header_size_hwords;
} else {
output_size_bytes = prog_data->output_vertex_size_hwords * 32;
@@ -814,11 +814,11 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
else
prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 128) / 128;
- assert(shader->info->gs.output_primitive < ARRAY_SIZE(gl_prim_to_hw_prim));
+ assert(shader->info.gs.output_primitive < ARRAY_SIZE(gl_prim_to_hw_prim));
prog_data->output_topology =
- gl_prim_to_hw_prim[shader->info->gs.output_primitive];
+ gl_prim_to_hw_prim[shader->info.gs.output_primitive];
- prog_data->vertices_in = shader->info->gs.vertices_in;
+ prog_data->vertices_in = shader->info.gs.vertices_in;
/* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
* need to program a URB read length of ceiling(num_slots / 2).
@@ -847,9 +847,9 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
false, MESA_SHADER_GEOMETRY);
if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
const char *label =
- shader->info->label ? shader->info->label : "unnamed";
+ shader->info.label ? shader->info.label : "unnamed";
char *name = ralloc_asprintf(mem_ctx, "%s geometry shader %s",
- label, shader->info->name);
+ label, shader->info.name);
g.enable_debug(name);
}
g.generate_code(v.cfg, 8);
diff --git a/src/intel/compiler/brw_vec4_nir.cpp b/src/intel/compiler/brw_vec4_nir.cpp
index a82d52088a8..8424e17e25c 100644
--- a/src/intel/compiler/brw_vec4_nir.cpp
+++ b/src/intel/compiler/brw_vec4_nir.cpp
@@ -570,7 +570,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
brw_mark_surface_used(&prog_data->base,
prog_data->base.binding_table.ssbo_start +
- nir->info->num_ssbos - 1);
+ nir->info.num_ssbos - 1);
}
/* Offset */
@@ -736,7 +736,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
*/
brw_mark_surface_used(&prog_data->base,
prog_data->base.binding_table.ssbo_start +
- nir->info->num_ssbos - 1);
+ nir->info.num_ssbos - 1);
}
src_reg offset_reg;
@@ -948,7 +948,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
*/
brw_mark_surface_used(&prog_data->base,
prog_data->base.binding_table.ubo_start +
- nir->info->num_ubos - 1);
+ nir->info.num_ubos - 1);
}
src_reg offset_reg;
@@ -1046,7 +1046,7 @@ vec4_visitor::nir_emit_ssbo_atomic(int op, nir_intrinsic_instr *instr)
*/
brw_mark_surface_used(&prog_data->base,
prog_data->base.binding_table.ssbo_start +
- nir->info->num_ssbos - 1);
+ nir->info.num_ssbos - 1);
}
src_reg offset = get_nir_src(instr->src[1], 1);
diff --git a/src/intel/compiler/brw_vec4_tcs.cpp b/src/intel/compiler/brw_vec4_tcs.cpp
index d4a647d029f..c362a0a5f14 100644
--- a/src/intel/compiler/brw_vec4_tcs.cpp
+++ b/src/intel/compiler/brw_vec4_tcs.cpp
@@ -95,9 +95,9 @@ vec4_tcs_visitor::emit_prolog()
* HS instance dispatched will only have its bottom half doing real
* work, and so we need to disable the upper half:
*/
- if (nir->info->tess.tcs_vertices_out % 2) {
+ if (nir->info.tess.tcs_vertices_out % 2) {
emit(CMP(dst_null_d(), invocation_id,
- brw_imm_ud(nir->info->tess.tcs_vertices_out),
+ brw_imm_ud(nir->info.tess.tcs_vertices_out),
BRW_CONDITIONAL_L));
/* Matching ENDIF is in emit_thread_end() */
@@ -112,7 +112,7 @@ vec4_tcs_visitor::emit_thread_end()
vec4_instruction *inst;
current_annotation = "thread end";
- if (nir->info->tess.tcs_vertices_out % 2) {
+ if (nir->info.tess.tcs_vertices_out % 2) {
emit(BRW_OPCODE_ENDIF);
}
@@ -402,15 +402,15 @@ brw_compile_tcs(const struct brw_compiler *compiler,
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_CTRL];
nir_shader *nir = nir_shader_clone(mem_ctx, src_shader);
- nir->info->outputs_written = key->outputs_written;
- nir->info->patch_outputs_written = key->patch_outputs_written;
+ nir->info.outputs_written = key->outputs_written;
+ nir->info.patch_outputs_written = key->patch_outputs_written;
struct brw_vue_map input_vue_map;
- brw_compute_vue_map(devinfo, &input_vue_map, nir->info->inputs_read,
- nir->info->separate_shader);
+ brw_compute_vue_map(devinfo, &input_vue_map, nir->info.inputs_read,
+ nir->info.separate_shader);
brw_compute_tess_vue_map(&vue_prog_data->vue_map,
- nir->info->outputs_written,
- nir->info->patch_outputs_written);
+ nir->info.outputs_written,
+ nir->info.patch_outputs_written);
nir = brw_nir_apply_sampler_key(nir, compiler, &key->tex, is_scalar);
brw_nir_lower_vue_inputs(nir, is_scalar, &input_vue_map);
@@ -422,9 +422,9 @@ brw_compile_tcs(const struct brw_compiler *compiler,
nir = brw_postprocess_nir(nir, compiler, is_scalar);
if (is_scalar)
- prog_data->instances = DIV_ROUND_UP(nir->info->tess.tcs_vertices_out, 8);
+ prog_data->instances = DIV_ROUND_UP(nir->info.tess.tcs_vertices_out, 8);
else
- prog_data->instances = DIV_ROUND_UP(nir->info->tess.tcs_vertices_out, 2);
+ prog_data->instances = DIV_ROUND_UP(nir->info.tess.tcs_vertices_out, 2);
/* Compute URB entry size. The maximum allowed URB entry size is 32k.
* That divides up as follows:
@@ -443,7 +443,7 @@ brw_compile_tcs(const struct brw_compiler *compiler,
unsigned output_size_bytes = 0;
/* Note that the patch header is counted in num_per_patch_slots. */
output_size_bytes += num_per_patch_slots * 16;
- output_size_bytes += nir->info->tess.tcs_vertices_out *
+ output_size_bytes += nir->info.tess.tcs_vertices_out *
num_per_vertex_slots * 16;
assert(output_size_bytes >= 1);
@@ -485,9 +485,9 @@ brw_compile_tcs(const struct brw_compiler *compiler,
if (unlikely(INTEL_DEBUG & DEBUG_TCS)) {
g.enable_debug(ralloc_asprintf(mem_ctx,
"%s tessellation control shader %s",
- nir->info->label ? nir->info->label
+ nir->info.label ? nir->info.label
: "unnamed",
- nir->info->name));
+ nir->info.name));
}
g.generate_code(v.cfg, 8);
diff --git a/src/intel/compiler/brw_wm_iz.cpp b/src/intel/compiler/brw_wm_iz.cpp
index 11d4f76b368..fead16586b6 100644
--- a/src/intel/compiler/brw_wm_iz.cpp
+++ b/src/intel/compiler/brw_wm_iz.cpp
@@ -142,7 +142,7 @@ void fs_visitor::setup_fs_payload_gen4()
}
prog_data->uses_src_depth =
- (nir->info->inputs_read & (1 << VARYING_SLOT_POS)) != 0;
+ (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
if (wm_iz_table[lookup].sd_present || prog_data->uses_src_depth ||
kill_stats_promoted_workaround) {
payload.source_depth_reg = reg;
diff --git a/src/intel/compiler/gen6_gs_visitor.cpp b/src/intel/compiler/gen6_gs_visitor.cpp
index 075bc4ad487..f76cdf02556 100644
--- a/src/intel/compiler/gen6_gs_visitor.cpp
+++ b/src/intel/compiler/gen6_gs_visitor.cpp
@@ -64,7 +64,7 @@ gen6_gs_visitor::emit_prolog()
this->vertex_output = src_reg(this,
glsl_type::uint_type,
(prog_data->vue_map.num_slots + 1) *
- nir->info->gs.vertices_out);
+ nir->info.gs.vertices_out);
this->vertex_output_offset = src_reg(this, glsl_type::uint_type);
emit(MOV(dst_reg(this->vertex_output_offset), brw_imm_ud(0u)));
@@ -178,7 +178,7 @@ gen6_gs_visitor::gs_emit_vertex(int stream_id)
dst_reg dst(this->vertex_output);
dst.reladdr = ralloc(mem_ctx, src_reg);
memcpy(dst.reladdr, &this->vertex_output_offset, sizeof(src_reg));
- if (nir->info->gs.output_primitive == GL_POINTS) {
+ if (nir->info.gs.output_primitive == GL_POINTS) {
/* If we are outputting points, then every vertex has PrimStart and
* PrimEnd set.
*/
@@ -207,7 +207,7 @@ gen6_gs_visitor::gs_end_primitive()
/* Calling EndPrimitive() is optional for point output. In this case we set
* the PrimEnd flag when we process EmitVertex().
*/
- if (nir->info->gs.output_primitive == GL_POINTS)
+ if (nir->info.gs.output_primitive == GL_POINTS)
return;
/* Otherwise we know that the last vertex we have processed was the last
@@ -219,7 +219,7 @@ gen6_gs_visitor::gs_end_primitive()
* comparison below (hence the num_output_vertices + 1 in the comparison
* below).
*/
- unsigned num_output_vertices = nir->info->gs.vertices_out;
+ unsigned num_output_vertices = nir->info.gs.vertices_out;
emit(CMP(dst_null_ud(), this->vertex_count,
brw_imm_ud(num_output_vertices + 1), BRW_CONDITIONAL_L));
vec4_instruction *inst = emit(CMP(dst_null_ud(),
@@ -323,7 +323,7 @@ gen6_gs_visitor::emit_thread_end()
* first_vertex is not zero. This is only relevant for outputs other than
* points because in the point case we set PrimEnd on all vertices.
*/
- if (nir->info->gs.output_primitive != GL_POINTS) {
+ if (nir->info.gs.output_primitive != GL_POINTS) {
emit(CMP(dst_null_ud(), this->first_vertex, brw_imm_ud(0u), BRW_CONDITIONAL_Z));
emit(IF(BRW_PREDICATE_NORMAL));
gs_end_primitive();
@@ -625,7 +625,7 @@ gen6_gs_visitor::xfb_write()
emit(BRW_OPCODE_ENDIF);
/* Write transform feedback data for all processed vertices. */
- for (int i = 0; i < (int)nir->info->gs.vertices_out; i++) {
+ for (int i = 0; i < (int)nir->info.gs.vertices_out; i++) {
emit(MOV(dst_reg(sol_temp), brw_imm_d(i)));
emit(CMP(dst_null_d(), sol_temp, this->vertex_count,
BRW_CONDITIONAL_L));