summaryrefslogtreecommitdiffstats
path: root/src/glsl/nir
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2015-11-25 14:14:05 -0800
committerJason Ekstrand <[email protected]>2015-12-10 12:25:16 -0800
commit78b81be627734ea7fa50ea246c07b0d4a3a1638a (patch)
tree10b0b098de5b3a111d076e9d8c5fca440fad45ad /src/glsl/nir
parentf3970fad9e5b04e04de366a65fed5a30da618f9d (diff)
nir: Get rid of *_indirect variants of input/output load/store intrinsics
There is some special-casing needed in a competent back-end. However, they can do their special-casing easily enough based on whether or not the offset is a constant. In the mean time, having the *_indirect variants adds special cases a number of places where they don't need to be and, in general, only complicates things. To complicate matters, NIR had no way to convdert an indirect load/store to a direct one in the case that the indirect was a constant so we would still not really get what the back-ends wanted. The best solution seems to be to get rid of the *_indirect variants entirely. This commit is a bunch of different changes squashed together: - nir: Get rid of *_indirect variants of input/output load/store intrinsics - nir/glsl: Stop handling UBO/SSBO load/stores differently depending on indirect - nir/lower_io: Get rid of load/store_foo_indirect - i965/fs: Get rid of load/store_foo_indirect - i965/vec4: Get rid of load/store_foo_indirect - tgsi_to_nir: Get rid of load/store_foo_indirect - ir3/nir: Use the new unified io intrinsics - vc4: Do all uniform loads with byte offsets - vc4/nir: Use the new unified io intrinsics - vc4: Fix load_user_clip_plane crash - vc4: add missing src for store outputs - vc4: Fix state uniforms - nir/lower_clip: Update to the new load/store intrinsics - nir/lower_two_sided_color: Update to the new load intrinsic NIR and i965 changes are Reviewed-by: Kenneth Graunke <[email protected]> NIR indirect declarations and vc4 changes are Reviewed-by: Eric Anholt <[email protected]> ir3 changes are Reviewed-by: Rob Clark <[email protected]> NIR changes are Acked-by: Rob Clark <[email protected]>
Diffstat (limited to 'src/glsl/nir')
-rw-r--r--src/glsl/nir/glsl_to_nir.cpp74
-rw-r--r--src/glsl/nir/nir.h2
-rw-r--r--src/glsl/nir/nir_intrinsics.h88
-rw-r--r--src/glsl/nir/nir_lower_clip.c3
-rw-r--r--src/glsl/nir/nir_lower_io.c113
-rw-r--r--src/glsl/nir/nir_lower_phis_to_scalar.c4
-rw-r--r--src/glsl/nir/nir_lower_two_sided_color.c2
-rw-r--r--src/glsl/nir/nir_print.c6
8 files changed, 107 insertions, 185 deletions
diff --git a/src/glsl/nir/glsl_to_nir.cpp b/src/glsl/nir/glsl_to_nir.cpp
index fc0f4049941..db8b0cae814 100644
--- a/src/glsl/nir/glsl_to_nir.cpp
+++ b/src/glsl/nir/glsl_to_nir.cpp
@@ -885,24 +885,12 @@ nir_visitor::visit(ir_call *ir)
ir_constant *write_mask = ((ir_instruction *)param)->as_constant();
assert(write_mask);
- /* Check if we need the indirect version */
- ir_constant *const_offset = offset->as_constant();
- if (!const_offset) {
- op = nir_intrinsic_store_ssbo_indirect;
- ralloc_free(instr);
- instr = nir_intrinsic_instr_create(shader, op);
- instr->src[2] = nir_src_for_ssa(evaluate_rvalue(offset));
- instr->const_index[0] = 0;
- } else {
- instr->const_index[0] = const_offset->value.u[0];
- }
-
- instr->const_index[1] = write_mask->value.u[0];
-
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(val));
+ instr->src[1] = nir_src_for_ssa(evaluate_rvalue(block));
+ instr->src[2] = nir_src_for_ssa(evaluate_rvalue(offset));
+ instr->const_index[0] = write_mask->value.u[0];
instr->num_components = val->type->vector_elements;
- instr->src[1] = nir_src_for_ssa(evaluate_rvalue(block));
nir_builder_instr_insert(&b, &instr->instr);
break;
}
@@ -913,20 +901,8 @@ nir_visitor::visit(ir_call *ir)
param = param->get_next();
ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
- /* Check if we need the indirect version */
- ir_constant *const_offset = offset->as_constant();
- if (!const_offset) {
- op = nir_intrinsic_load_ssbo_indirect;
- ralloc_free(instr);
- instr = nir_intrinsic_instr_create(shader, op);
- instr->src[1] = nir_src_for_ssa(evaluate_rvalue(offset));
- instr->const_index[0] = 0;
- dest = &instr->dest;
- } else {
- instr->const_index[0] = const_offset->value.u[0];
- }
-
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(block));
+ instr->src[1] = nir_src_for_ssa(evaluate_rvalue(offset));
const glsl_type *type = ir->return_deref->var->type;
instr->num_components = type->vector_elements;
@@ -1010,18 +986,8 @@ nir_visitor::visit(ir_call *ir)
exec_node *param = ir->actual_parameters.get_head();
ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
- /* Check if we need the indirect version */
- ir_constant *const_offset = offset->as_constant();
- if (!const_offset) {
- op = nir_intrinsic_load_shared_indirect;
- ralloc_free(instr);
- instr = nir_intrinsic_instr_create(shader, op);
- instr->src[0] = nir_src_for_ssa(evaluate_rvalue(offset));
- instr->const_index[0] = 0;
- dest = &instr->dest;
- } else {
- instr->const_index[0] = const_offset->value.u[0];
- }
+ instr->const_index[0] = 0;
+ instr->src[0] = nir_src_for_ssa(evaluate_rvalue(offset));
const glsl_type *type = ir->return_deref->var->type;
instr->num_components = type->vector_elements;
@@ -1044,17 +1010,8 @@ nir_visitor::visit(ir_call *ir)
ir_constant *write_mask = ((ir_instruction *)param)->as_constant();
assert(write_mask);
- /* Check if we need the indirect version */
- ir_constant *const_offset = offset->as_constant();
- if (!const_offset) {
- op = nir_intrinsic_store_shared_indirect;
- ralloc_free(instr);
- instr = nir_intrinsic_instr_create(shader, op);
- instr->src[1] = nir_src_for_ssa(evaluate_rvalue(offset));
- instr->const_index[0] = 0;
- } else {
- instr->const_index[0] = const_offset->value.u[0];
- }
+ instr->const_index[0] = 0;
+ instr->src[1] = nir_src_for_ssa(evaluate_rvalue(offset));
instr->const_index[1] = write_mask->value.u[0];
@@ -1303,20 +1260,11 @@ nir_visitor::visit(ir_expression *ir)
/* Some special cases */
switch (ir->operation) {
case ir_binop_ubo_load: {
- ir_constant *const_index = ir->operands[1]->as_constant();
-
- nir_intrinsic_op op;
- if (const_index) {
- op = nir_intrinsic_load_ubo;
- } else {
- op = nir_intrinsic_load_ubo_indirect;
- }
- nir_intrinsic_instr *load = nir_intrinsic_instr_create(this->shader, op);
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(this->shader, nir_intrinsic_load_ubo);
load->num_components = ir->type->vector_elements;
- load->const_index[0] = const_index ? const_index->value.u[0] : 0; /* base offset */
load->src[0] = nir_src_for_ssa(evaluate_rvalue(ir->operands[0]));
- if (!const_index)
- load->src[1] = nir_src_for_ssa(evaluate_rvalue(ir->operands[1]));
+ load->src[1] = nir_src_for_ssa(evaluate_rvalue(ir->operands[1]));
add_instr(&load->instr, ir->type->vector_elements);
/*
diff --git a/src/glsl/nir/nir.h b/src/glsl/nir/nir.h
index e161b70fa18..2e72e66699c 100644
--- a/src/glsl/nir/nir.h
+++ b/src/glsl/nir/nir.h
@@ -1969,7 +1969,7 @@ void nir_assign_var_locations(struct exec_list *var_list,
void nir_lower_io(nir_shader *shader,
nir_variable_mode mode,
int (*type_size)(const struct glsl_type *));
-nir_src *nir_get_io_indirect_src(nir_intrinsic_instr *instr);
+nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
void nir_lower_vars_to_ssa(nir_shader *shader);
diff --git a/src/glsl/nir/nir_intrinsics.h b/src/glsl/nir/nir_intrinsics.h
index 6b6cb32096b..9811fb391de 100644
--- a/src/glsl/nir/nir_intrinsics.h
+++ b/src/glsl/nir/nir_intrinsics.h
@@ -255,56 +255,60 @@ SYSTEM_VALUE(num_work_groups, 3, 0)
SYSTEM_VALUE(helper_invocation, 1, 0)
/*
- * The format of the indices depends on the type of the load. For uniforms,
- * the first index is the base address and the second index is an offset that
- * should be added to the base address. (This way you can determine in the
- * back-end which variable is being accessed even in an array.) For inputs,
- * the one and only index corresponds to the attribute slot. UBO loads also
- * have a single index which is the base address to load from.
+ * Load operations pull data from some piece of GPU memory. All load
+ * operations operate in terms of offsets into some piece of theoretical
+ * memory. Loads from externally visible memory (UBO and SSBO) simply take a
+ * byte offset as a source. Loads from opaque memory (uniforms, inputs, etc.)
+ * take a base+offset pair where the base (const_index[0]) gives the location
+ * of the start of the variable being loaded and and the offset source is a
+ * offset into that variable.
*
- * UBO loads have a (possibly constant) source which is the UBO buffer index.
- * For each type of load, the _indirect variant has one additional source
- * (the second in the case of UBO's) that is the is an indirect to be added to
- * the constant address or base offset to compute the final offset.
+ * Some load operations such as UBO/SSBO load and per_vertex loads take an
+ * additional source to specify which UBO/SSBO/vertex to load from.
*
- * For vector backends, the address is in terms of one vec4, and so each array
- * element is +4 scalar components from the previous array element. For scalar
- * backends, the address is in terms of a single 4-byte float/int and arrays
- * elements begin immediately after the previous array element.
+ * The exact address type depends on the lowering pass that generates the
+ * load/store intrinsics. Typically, this is vec4 units for things such as
+ * varying slots and float units for fragment shader inputs. UBO and SSBO
+ * offsets are always in bytes.
*/
-#define LOAD(name, extra_srcs, indices, flags) \
- INTRINSIC(load_##name, extra_srcs, ARR(1), true, 0, 0, indices, flags) \
- INTRINSIC(load_##name##_indirect, extra_srcs + 1, ARR(1, 1), \
- true, 0, 0, indices, flags)
+#define LOAD(name, srcs, indices, flags) \
+ INTRINSIC(load_##name, srcs, ARR(1, 1, 1, 1), true, 0, 0, indices, flags)
-LOAD(uniform, 0, 2, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
-LOAD(ubo, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
-LOAD(input, 0, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
-LOAD(per_vertex_input, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
-LOAD(ssbo, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE)
-LOAD(output, 0, 1, NIR_INTRINSIC_CAN_ELIMINATE)
-LOAD(per_vertex_output, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE)
-LOAD(shared, 0, 1, NIR_INTRINSIC_CAN_ELIMINATE)
+/* src[] = { offset }. const_index[] = { base } */
+LOAD(uniform, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+/* src[] = { buffer_index, offset }. No const_index */
+LOAD(ubo, 2, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+/* src[] = { offset }. const_index[] = { base } */
+LOAD(input, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+/* src[] = { vertex, offset }. const_index[] = { base } */
+LOAD(per_vertex_input, 2, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+/* src[] = { buffer_index, offset }. No const_index */
+LOAD(ssbo, 2, 0, NIR_INTRINSIC_CAN_ELIMINATE)
+/* src[] = { offset }. const_index[] = { base } */
+LOAD(output, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE)
+/* src[] = { vertex, offset }. const_index[] = { base } */
+LOAD(per_vertex_output, 2, 1, NIR_INTRINSIC_CAN_ELIMINATE)
+/* src[] = { offset }. const_index[] = { base } */
+LOAD(shared, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE)
/*
- * Stores work the same way as loads, except now the first register input is
- * the value or array to store and the optional second input is the indirect
- * offset. SSBO stores are similar, but they accept an extra source for the
- * block index and an extra index with the writemask to use.
+ * Stores work the same way as loads, except now the first source is the value
+ * to store and the second (and possibly third) source specify where to store
+ * the value. SSBO and shared memory stores also have a write mask as
+ * const_index[0].
*/
-#define STORE(name, extra_srcs, extra_srcs_size, extra_indices, flags) \
- INTRINSIC(store_##name, 1 + extra_srcs, \
- ARR(0, extra_srcs_size, extra_srcs_size, extra_srcs_size), \
- false, 0, 0, 1 + extra_indices, flags) \
- INTRINSIC(store_##name##_indirect, 2 + extra_srcs, \
- ARR(0, 1, extra_srcs_size, extra_srcs_size), \
- false, 0, 0, 1 + extra_indices, flags)
+#define STORE(name, srcs, indices, flags) \
+ INTRINSIC(store_##name, srcs, ARR(0, 1, 1, 1), false, 0, 0, indices, flags)
-STORE(output, 0, 0, 0, 0)
-STORE(per_vertex_output, 1, 1, 0, 0)
-STORE(ssbo, 1, 1, 1, 0)
-STORE(shared, 0, 0, 1, 0)
+/* src[] = { value, offset }. const_index[] = { base } */
+STORE(output, 2, 1, 0)
+/* src[] = { value, vertex, offset }. const_index[] = { base } */
+STORE(per_vertex_output, 3, 1, 0)
+/* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
+STORE(ssbo, 3, 1, 0)
+/* src[] = { value, offset }. const_index[] = { base, write_mask } */
+STORE(shared, 2, 1, 0)
-LAST_INTRINSIC(store_shared_indirect)
+LAST_INTRINSIC(store_shared)
diff --git a/src/glsl/nir/nir_lower_clip.c b/src/glsl/nir/nir_lower_clip.c
index c58c7785b3f..e2a2bb689a8 100644
--- a/src/glsl/nir/nir_lower_clip.c
+++ b/src/glsl/nir/nir_lower_clip.c
@@ -74,6 +74,7 @@ store_clipdist_output(nir_builder *b, nir_variable *out, nir_ssa_def **val)
store->const_index[0] = out->data.driver_location;
store->src[0].ssa = nir_vec4(b, val[0], val[1], val[2], val[3]);
store->src[0].is_ssa = true;
+ store->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
nir_builder_instr_insert(b, &store->instr);
}
@@ -85,6 +86,7 @@ load_clipdist_input(nir_builder *b, nir_variable *in, nir_ssa_def **val)
load = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_input);
load->num_components = 4;
load->const_index[0] = in->data.driver_location;
+ load->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
nir_ssa_dest_init(&load->instr, &load->dest, 4, NULL);
nir_builder_instr_insert(b, &load->instr);
@@ -112,6 +114,7 @@ find_output_in_block(nir_block *block, void *void_state)
intr->const_index[0] == state->drvloc) {
assert(state->def == NULL);
assert(intr->src[0].is_ssa);
+ assert(nir_src_as_const_value(intr->src[1]));
state->def = intr->src[0].ssa;
#if !defined(DEBUG)
diff --git a/src/glsl/nir/nir_lower_io.c b/src/glsl/nir/nir_lower_io.c
index f64ac696fa2..3d646eb14b4 100644
--- a/src/glsl/nir/nir_lower_io.c
+++ b/src/glsl/nir/nir_lower_io.c
@@ -86,10 +86,9 @@ is_per_vertex_output(struct lower_io_state *state, nir_variable *var)
stage == MESA_SHADER_TESS_CTRL;
}
-static unsigned
+static nir_ssa_def *
get_io_offset(nir_builder *b, nir_deref_var *deref,
nir_ssa_def **vertex_index,
- nir_ssa_def **out_indirect,
int (*type_size)(const struct glsl_type *))
{
nir_deref *tail = &deref->deref;
@@ -109,8 +108,8 @@ get_io_offset(nir_builder *b, nir_deref_var *deref,
*vertex_index = vtx;
}
- nir_ssa_def *indirect = NULL;
- unsigned base_offset = 0;
+ /* Just emit code and let constant-folding go to town */
+ nir_ssa_def *offset = nir_imm_int(b, 0);
while (tail->child != NULL) {
const struct glsl_type *parent_type = tail->type;
@@ -120,55 +119,46 @@ get_io_offset(nir_builder *b, nir_deref_var *deref,
nir_deref_array *deref_array = nir_deref_as_array(tail);
unsigned size = type_size(tail->type);
- base_offset += size * deref_array->base_offset;
+ offset = nir_iadd(b, offset,
+ nir_imm_int(b, size * deref_array->base_offset));
if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
nir_ssa_def *mul =
nir_imul(b, nir_imm_int(b, size),
nir_ssa_for_src(b, deref_array->indirect, 1));
- indirect = indirect ? nir_iadd(b, indirect, mul) : mul;
+ offset = nir_iadd(b, offset, mul);
}
} else if (tail->deref_type == nir_deref_type_struct) {
nir_deref_struct *deref_struct = nir_deref_as_struct(tail);
+ unsigned field_offset = 0;
for (unsigned i = 0; i < deref_struct->index; i++) {
- base_offset += type_size(glsl_get_struct_field(parent_type, i));
+ field_offset += type_size(glsl_get_struct_field(parent_type, i));
}
+ offset = nir_iadd(b, offset, nir_imm_int(b, field_offset));
}
}
- *out_indirect = indirect;
- return base_offset;
+ return offset;
}
static nir_intrinsic_op
load_op(struct lower_io_state *state,
- nir_variable_mode mode, bool per_vertex, bool has_indirect)
+ nir_variable_mode mode, bool per_vertex)
{
nir_intrinsic_op op;
switch (mode) {
case nir_var_shader_in:
- if (per_vertex) {
- op = has_indirect ? nir_intrinsic_load_per_vertex_input_indirect :
- nir_intrinsic_load_per_vertex_input;
- } else {
- op = has_indirect ? nir_intrinsic_load_input_indirect :
- nir_intrinsic_load_input;
- }
+ op = per_vertex ? nir_intrinsic_load_per_vertex_input :
+ nir_intrinsic_load_input;
break;
case nir_var_shader_out:
- if (per_vertex) {
- op = has_indirect ? nir_intrinsic_load_per_vertex_output_indirect :
- nir_intrinsic_load_per_vertex_output;
- } else {
- op = has_indirect ? nir_intrinsic_load_output_indirect :
- nir_intrinsic_load_output;
- }
+ op = per_vertex ? nir_intrinsic_load_per_vertex_output :
+ nir_intrinsic_load_output;
break;
case nir_var_uniform:
- op = has_indirect ? nir_intrinsic_load_uniform_indirect :
- nir_intrinsic_load_uniform;
+ op = nir_intrinsic_load_uniform;
break;
default:
unreachable("Unknown variable mode");
@@ -211,32 +201,25 @@ nir_lower_io_block(nir_block *block, void *void_state)
is_per_vertex_input(state, intrin->variables[0]->var) ||
is_per_vertex_output(state, intrin->variables[0]->var);
- nir_ssa_def *indirect;
+ nir_ssa_def *offset;
nir_ssa_def *vertex_index;
- unsigned offset = get_io_offset(b, intrin->variables[0],
- per_vertex ? &vertex_index : NULL,
- &indirect, state->type_size);
+ offset = get_io_offset(b, intrin->variables[0],
+ per_vertex ? &vertex_index : NULL,
+ state->type_size);
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(state->mem_ctx,
- load_op(state, mode, per_vertex,
- indirect));
+ load_op(state, mode, per_vertex));
load->num_components = intrin->num_components;
- unsigned location = intrin->variables[0]->var->data.driver_location;
- if (mode == nir_var_uniform) {
- load->const_index[0] = location;
- load->const_index[1] = offset;
- } else {
- load->const_index[0] = location + offset;
- }
+ load->const_index[0] =
+ intrin->variables[0]->var->data.driver_location;
if (per_vertex)
load->src[0] = nir_src_for_ssa(vertex_index);
- if (indirect)
- load->src[per_vertex ? 1 : 0] = nir_src_for_ssa(indirect);
+ load->src[per_vertex ? 1 : 0] = nir_src_for_ssa(offset);
if (intrin->dest.is_ssa) {
nir_ssa_dest_init(&load->instr, &load->dest,
@@ -255,38 +238,33 @@ nir_lower_io_block(nir_block *block, void *void_state)
case nir_intrinsic_store_var: {
assert(mode == nir_var_shader_out);
- nir_ssa_def *indirect;
+ nir_ssa_def *offset;
nir_ssa_def *vertex_index;
bool per_vertex =
is_per_vertex_output(state, intrin->variables[0]->var);
- unsigned offset = get_io_offset(b, intrin->variables[0],
- per_vertex ? &vertex_index : NULL,
- &indirect, state->type_size);
- offset += intrin->variables[0]->var->data.driver_location;
+ offset = get_io_offset(b, intrin->variables[0],
+ per_vertex ? &vertex_index : NULL,
+ state->type_size);
- nir_intrinsic_op store_op;
- if (per_vertex) {
- store_op = indirect ? nir_intrinsic_store_per_vertex_output_indirect
- : nir_intrinsic_store_per_vertex_output;
- } else {
- store_op = indirect ? nir_intrinsic_store_output_indirect
- : nir_intrinsic_store_output;
- }
+ nir_intrinsic_op store_op =
+ per_vertex ? nir_intrinsic_store_per_vertex_output :
+ nir_intrinsic_store_output;
nir_intrinsic_instr *store = nir_intrinsic_instr_create(state->mem_ctx,
store_op);
store->num_components = intrin->num_components;
- store->const_index[0] = offset;
nir_src_copy(&store->src[0], &intrin->src[0], store);
+ store->const_index[0] =
+ intrin->variables[0]->var->data.driver_location;
+
if (per_vertex)
store->src[1] = nir_src_for_ssa(vertex_index);
- if (indirect)
- store->src[per_vertex ? 2 : 1] = nir_src_for_ssa(indirect);
+ store->src[per_vertex ? 2 : 1] = nir_src_for_ssa(offset);
nir_instr_insert_before(&intrin->instr, &store->instr);
nir_instr_remove(&intrin->instr);
@@ -330,21 +308,21 @@ nir_lower_io(nir_shader *shader, nir_variable_mode mode,
}
/**
- * Return the indirect source for a load/store indirect intrinsic.
+ * Return the offset soruce for a load/store intrinsic.
*/
nir_src *
-nir_get_io_indirect_src(nir_intrinsic_instr *instr)
+nir_get_io_offset_src(nir_intrinsic_instr *instr)
{
switch (instr->intrinsic) {
- case nir_intrinsic_load_input_indirect:
- case nir_intrinsic_load_output_indirect:
- case nir_intrinsic_load_uniform_indirect:
+ case nir_intrinsic_load_input:
+ case nir_intrinsic_load_output:
+ case nir_intrinsic_load_uniform:
return &instr->src[0];
- case nir_intrinsic_load_per_vertex_input_indirect:
- case nir_intrinsic_load_per_vertex_output_indirect:
- case nir_intrinsic_store_output_indirect:
+ case nir_intrinsic_load_per_vertex_input:
+ case nir_intrinsic_load_per_vertex_output:
+ case nir_intrinsic_store_output:
return &instr->src[1];
- case nir_intrinsic_store_per_vertex_output_indirect:
+ case nir_intrinsic_store_per_vertex_output:
return &instr->src[2];
default:
return NULL;
@@ -360,11 +338,8 @@ nir_get_io_vertex_index_src(nir_intrinsic_instr *instr)
switch (instr->intrinsic) {
case nir_intrinsic_load_per_vertex_input:
case nir_intrinsic_load_per_vertex_output:
- case nir_intrinsic_load_per_vertex_input_indirect:
- case nir_intrinsic_load_per_vertex_output_indirect:
return &instr->src[0];
case nir_intrinsic_store_per_vertex_output:
- case nir_intrinsic_store_per_vertex_output_indirect:
return &instr->src[1];
default:
return NULL;
diff --git a/src/glsl/nir/nir_lower_phis_to_scalar.c b/src/glsl/nir/nir_lower_phis_to_scalar.c
index aa124d9e6cc..2f5927f6406 100644
--- a/src/glsl/nir/nir_lower_phis_to_scalar.c
+++ b/src/glsl/nir/nir_lower_phis_to_scalar.c
@@ -91,13 +91,9 @@ is_phi_src_scalarizable(nir_phi_src *src,
case nir_intrinsic_interp_var_at_sample:
case nir_intrinsic_interp_var_at_offset:
case nir_intrinsic_load_uniform:
- case nir_intrinsic_load_uniform_indirect:
case nir_intrinsic_load_ubo:
- case nir_intrinsic_load_ubo_indirect:
case nir_intrinsic_load_ssbo:
- case nir_intrinsic_load_ssbo_indirect:
case nir_intrinsic_load_input:
- case nir_intrinsic_load_input_indirect:
return true;
default:
break;
diff --git a/src/glsl/nir/nir_lower_two_sided_color.c b/src/glsl/nir/nir_lower_two_sided_color.c
index 6995b9d6bc1..7df12e070f1 100644
--- a/src/glsl/nir/nir_lower_two_sided_color.c
+++ b/src/glsl/nir/nir_lower_two_sided_color.c
@@ -73,6 +73,7 @@ load_input(nir_builder *b, nir_variable *in)
load = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_input);
load->num_components = 4;
load->const_index[0] = in->data.driver_location;
+ load->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
nir_ssa_dest_init(&load->instr, &load->dest, 4, NULL);
nir_builder_instr_insert(b, &load->instr);
@@ -151,6 +152,7 @@ nir_lower_two_sided_color_block(nir_block *block, void *void_state)
unsigned drvloc =
state->colors[idx].front->data.driver_location;
if (intr->const_index[0] == drvloc) {
+ assert(nir_src_as_const_value(intr->src[0]));
break;
}
}
diff --git a/src/glsl/nir/nir_print.c b/src/glsl/nir/nir_print.c
index c98a0476ef9..1a4cc695d5a 100644
--- a/src/glsl/nir/nir_print.c
+++ b/src/glsl/nir/nir_print.c
@@ -439,21 +439,15 @@ print_intrinsic_instr(nir_intrinsic_instr *instr, print_state *state)
switch (instr->intrinsic) {
case nir_intrinsic_load_uniform:
- case nir_intrinsic_load_uniform_indirect:
var_list = &state->shader->uniforms;
break;
case nir_intrinsic_load_input:
- case nir_intrinsic_load_input_indirect:
case nir_intrinsic_load_per_vertex_input:
- case nir_intrinsic_load_per_vertex_input_indirect:
var_list = &state->shader->inputs;
break;
case nir_intrinsic_load_output:
- case nir_intrinsic_load_output_indirect:
case nir_intrinsic_store_output:
- case nir_intrinsic_store_output_indirect:
case nir_intrinsic_store_per_vertex_output:
- case nir_intrinsic_store_per_vertex_output_indirect:
var_list = &state->shader->outputs;
break;
default: