summaryrefslogtreecommitdiffstats
path: root/src/glsl/nir
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2015-12-27 23:23:05 -0800
committerJason Ekstrand <[email protected]>2015-12-27 23:23:05 -0800
commitea77b384e8c575922eca1c05398e19fcbfda9b09 (patch)
tree4f8659bd8b48af785896daa224f6698a5ee269ec /src/glsl/nir
parentf948767471ba83427cbcdc244a511fbb954ca9e0 (diff)
parent109c348284843054f708f4403260739b7db18275 (diff)
Merge remote-tracking branch 'mesa-public/master' into vulkan
This pulls in tessellation and the store_var changes that go with it.
Diffstat (limited to 'src/glsl/nir')
-rw-r--r--src/glsl/nir/glsl_to_nir.cpp58
-rw-r--r--src/glsl/nir/glsl_types.cpp18
-rw-r--r--src/glsl/nir/glsl_types.h13
-rw-r--r--src/glsl/nir/nir.h13
-rw-r--r--src/glsl/nir/nir_builder.h4
-rw-r--r--src/glsl/nir/nir_intrinsics.h12
-rw-r--r--src/glsl/nir/nir_lower_alu_to_scalar.c4
-rw-r--r--src/glsl/nir/nir_lower_gs_intrinsics.c3
-rw-r--r--src/glsl/nir/nir_lower_io.c3
-rw-r--r--src/glsl/nir/nir_lower_locals_to_regs.c2
-rw-r--r--src/glsl/nir/nir_lower_returns.c2
-rw-r--r--src/glsl/nir/nir_lower_var_copies.c1
-rw-r--r--src/glsl/nir/nir_lower_vars_to_ssa.c46
-rw-r--r--src/glsl/nir/nir_opcodes.py7
-rw-r--r--src/glsl/nir/nir_print.c53
-rw-r--r--src/glsl/nir/nir_types.cpp23
-rw-r--r--src/glsl/nir/nir_types.h6
-rw-r--r--src/glsl/nir/nir_validate.c1
-rw-r--r--src/glsl/nir/spirv_to_nir.c31
19 files changed, 184 insertions, 116 deletions
diff --git a/src/glsl/nir/glsl_to_nir.cpp b/src/glsl/nir/glsl_to_nir.cpp
index 9a25f2fc905..fe8aa278380 100644
--- a/src/glsl/nir/glsl_to_nir.cpp
+++ b/src/glsl/nir/glsl_to_nir.cpp
@@ -367,7 +367,6 @@ nir_visitor::visit(ir_variable *ir)
var->data.explicit_index = ir->data.explicit_index;
var->data.explicit_binding = ir->data.explicit_binding;
var->data.has_initializer = ir->data.has_initializer;
- var->data.is_unmatched_generic_inout = ir->data.is_unmatched_generic_inout;
var->data.location_frac = ir->data.location_frac;
var->data.from_named_ifc_block_array = ir->data.from_named_ifc_block_array;
var->data.from_named_ifc_block_nonarray = ir->data.from_named_ifc_block_nonarray;
@@ -1072,6 +1071,7 @@ nir_visitor::visit(ir_call *ir)
nir_intrinsic_instr *store_instr =
nir_intrinsic_instr_create(shader, nir_intrinsic_store_var);
store_instr->num_components = ir->return_deref->type->vector_elements;
+ store_instr->const_index[0] = (1 << store_instr->num_components) - 1;
store_instr->variables[0] =
evaluate_deref(&store_instr->instr, ir->return_deref);
@@ -1133,43 +1133,23 @@ nir_visitor::visit(ir_assignment *ir)
nir_ssa_def *src = evaluate_rvalue(ir->rhs);
if (ir->write_mask != (1 << num_components) - 1 && ir->write_mask != 0) {
- /*
- * We have no good way to update only part of a variable, so just load
- * the LHS and do a vec operation to combine the old with the new, and
- * then store it
- * back into the LHS. Copy propagation should get rid of the mess.
+ /* GLSL IR will give us the input to the write-masked assignment in a
+ * single packed vector. So, for example, if the writemask is xzw, then
+ * we have to swizzle x -> x, y -> z, and z -> w and get the y component
+ * from the load.
*/
-
- nir_intrinsic_instr *load =
- nir_intrinsic_instr_create(this->shader, nir_intrinsic_load_var);
- load->num_components = ir->lhs->type->vector_elements;
- nir_ssa_dest_init(&load->instr, &load->dest, num_components, NULL);
- load->variables[0] = lhs_deref;
- ralloc_steal(load, load->variables[0]);
- nir_builder_instr_insert(&b, &load->instr);
-
- nir_ssa_def *srcs[4];
-
+ unsigned swiz[4];
unsigned component = 0;
- for (unsigned i = 0; i < ir->lhs->type->vector_elements; i++) {
- if (ir->write_mask & (1 << i)) {
- /* GLSL IR will give us the input to the write-masked assignment
- * in a single packed vector. So, for example, if the
- * writemask is xzw, then we have to swizzle x -> x, y -> z,
- * and z -> w and get the y component from the load.
- */
- srcs[i] = nir_channel(&b, src, component++);
- } else {
- srcs[i] = nir_channel(&b, &load->dest.ssa, i);
- }
+ for (unsigned i = 0; i < 4; i++) {
+ swiz[i] = ir->write_mask & (1 << i) ? component++ : 0;
}
-
- src = nir_vec(&b, srcs, ir->lhs->type->vector_elements);
+ src = nir_swizzle(&b, src, swiz, num_components, !supports_ints);
}
nir_intrinsic_instr *store =
nir_intrinsic_instr_create(this->shader, nir_intrinsic_store_var);
store->num_components = ir->lhs->type->vector_elements;
+ store->const_index[0] = ir->write_mask;
nir_deref *store_deref = nir_copy_deref(store, &lhs_deref->deref);
store->variables[0] = nir_deref_as_var(store_deref);
store->src[0] = nir_src_for_ssa(src);
@@ -1421,24 +1401,6 @@ nir_visitor::visit(ir_expression *ir)
/* no-op */
result = nir_imov(&b, srcs[0]);
break;
- case ir_unop_any:
- switch (ir->operands[0]->type->vector_elements) {
- case 2:
- result = supports_ints ? nir_bany2(&b, srcs[0])
- : nir_fany2(&b, srcs[0]);
- break;
- case 3:
- result = supports_ints ? nir_bany3(&b, srcs[0])
- : nir_fany3(&b, srcs[0]);
- break;
- case 4:
- result = supports_ints ? nir_bany4(&b, srcs[0])
- : nir_fany4(&b, srcs[0]);
- break;
- default:
- unreachable("not reached");
- }
- break;
case ir_unop_trunc: result = nir_ftrunc(&b, srcs[0]); break;
case ir_unop_ceil: result = nir_fceil(&b, srcs[0]); break;
case ir_unop_floor: result = nir_ffloor(&b, srcs[0]); break;
diff --git a/src/glsl/nir/glsl_types.cpp b/src/glsl/nir/glsl_types.cpp
index bc8677ba6fc..d86609718ea 100644
--- a/src/glsl/nir/glsl_types.cpp
+++ b/src/glsl/nir/glsl_types.cpp
@@ -1831,7 +1831,7 @@ glsl_type::std430_size(bool row_major) const
}
unsigned
-glsl_type::count_attribute_slots() const
+glsl_type::count_attribute_slots(bool vertex_input_slots) const
{
/* From page 31 (page 37 of the PDF) of the GLSL 1.50 spec:
*
@@ -1852,27 +1852,35 @@ glsl_type::count_attribute_slots() const
* allows varying structs, the number of varying slots taken up by a
* varying struct is simply equal to the sum of the number of slots taken
* up by each element.
+ *
+ * Doubles are counted different depending on whether they are vertex
+ * inputs or everything else. Vertex inputs from ARB_vertex_attrib_64bit
+ * take one location no matter what size they are, otherwise dvec3/4
+ * take two locations.
*/
switch (this->base_type) {
case GLSL_TYPE_UINT:
case GLSL_TYPE_INT:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_BOOL:
- case GLSL_TYPE_DOUBLE:
return this->matrix_columns;
-
+ case GLSL_TYPE_DOUBLE:
+ if (this->vector_elements > 2 && !vertex_input_slots)
+ return this->matrix_columns * 2;
+ else
+ return this->matrix_columns;
case GLSL_TYPE_STRUCT:
case GLSL_TYPE_INTERFACE: {
unsigned size = 0;
for (unsigned i = 0; i < this->length; i++)
- size += this->fields.structure[i].type->count_attribute_slots();
+ size += this->fields.structure[i].type->count_attribute_slots(vertex_input_slots);
return size;
}
case GLSL_TYPE_ARRAY:
- return this->length * this->fields.array->count_attribute_slots();
+ return this->length * this->fields.array->count_attribute_slots(vertex_input_slots);
case GLSL_TYPE_FUNCTION:
case GLSL_TYPE_SAMPLER:
diff --git a/src/glsl/nir/glsl_types.h b/src/glsl/nir/glsl_types.h
index 1aafa5cd547..ff8dcc7a5f6 100644
--- a/src/glsl/nir/glsl_types.h
+++ b/src/glsl/nir/glsl_types.h
@@ -334,8 +334,11 @@ struct glsl_type {
* varying slots the type will use up in the absence of varying packing
* (and thus, it can be used to measure the number of varying slots used by
* the varyings that are generated by lower_packed_varyings).
+ *
+ * For vertex shader attributes - doubles only take one slot.
+ * For inter-shader varyings - dvec3/dvec4 take two slots.
*/
- unsigned count_attribute_slots() const;
+ unsigned count_attribute_slots(bool vertex_input_slots) const;
/**
* Alignment in bytes of the start of this type in a std140 uniform
@@ -481,6 +484,14 @@ struct glsl_type {
}
/**
+ * Query whether a double takes two slots.
+ */
+ bool is_dual_slot_double() const
+ {
+ return base_type == GLSL_TYPE_DOUBLE && vector_elements > 2;
+ }
+
+ /**
* Query whether or not a type is a non-array boolean type
*/
bool is_boolean() const
diff --git a/src/glsl/nir/nir.h b/src/glsl/nir/nir.h
index 904e444487a..c05df101f29 100644
--- a/src/glsl/nir/nir.h
+++ b/src/glsl/nir/nir.h
@@ -216,15 +216,6 @@ typedef struct {
unsigned has_initializer:1;
/**
- * Is this variable a generic output or input that has not yet been matched
- * up to a variable in another stage of the pipeline?
- *
- * This is used by the linker as scratch storage while assigning locations
- * to generic inputs and outputs.
- */
- unsigned is_unmatched_generic_inout:1;
-
- /**
* If non-zero, then this variable may be packed along with other variables
* into a single varying slot, so this offset should be applied when
* accessing components. For example, an offset of 1 means that the x
@@ -388,7 +379,9 @@ nir_variable_get_io_mask(nir_variable *var, gl_shader_stage stage)
var_type = glsl_get_array_element(var_type);
}
- unsigned slots = glsl_count_attribute_slots(var_type);
+ bool is_vertex_input = (var->data.mode == nir_var_shader_in &&
+ stage == MESA_SHADER_VERTEX);
+ unsigned slots = glsl_count_attribute_slots(var_type, is_vertex_input);
return ((1ull << slots) - 1) << var->data.location;
}
diff --git a/src/glsl/nir/nir_builder.h b/src/glsl/nir/nir_builder.h
index 423bddd7a9a..038d5d0e240 100644
--- a/src/glsl/nir/nir_builder.h
+++ b/src/glsl/nir/nir_builder.h
@@ -341,13 +341,15 @@ nir_load_var(nir_builder *build, nir_variable *var)
}
static inline void
-nir_store_var(nir_builder *build, nir_variable *var, nir_ssa_def *value)
+nir_store_var(nir_builder *build, nir_variable *var, nir_ssa_def *value,
+ unsigned writemask)
{
const unsigned num_components = glsl_get_vector_elements(var->type);
nir_intrinsic_instr *store =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_var);
store->num_components = num_components;
+ store->const_index[0] = writemask;
store->variables[0] = nir_deref_var_create(store, var);
store->src[0] = nir_src_for_ssa(value);
nir_builder_instr_insert(build, &store->instr);
diff --git a/src/glsl/nir/nir_intrinsics.h b/src/glsl/nir/nir_intrinsics.h
index 5086e297e8e..e30750804dc 100644
--- a/src/glsl/nir/nir_intrinsics.h
+++ b/src/glsl/nir/nir_intrinsics.h
@@ -43,7 +43,7 @@
INTRINSIC(load_var, 0, ARR(), true, 0, 1, 0, NIR_INTRINSIC_CAN_ELIMINATE)
-INTRINSIC(store_var, 1, ARR(0), false, 0, 1, 0, 0)
+INTRINSIC(store_var, 1, ARR(0), false, 0, 1, 1, 0)
INTRINSIC(copy_var, 0, ARR(), false, 0, 2, 0, 0)
/*
@@ -323,13 +323,13 @@ LOAD(push_constant, 1, 2, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDE
#define STORE(name, srcs, indices, flags) \
INTRINSIC(store_##name, srcs, ARR(0, 1, 1, 1), false, 0, 0, indices, flags)
-/* src[] = { value, offset }. const_index[] = { base } */
-STORE(output, 2, 1, 0)
-/* src[] = { value, vertex, offset }. const_index[] = { base } */
-STORE(per_vertex_output, 3, 1, 0)
+/* src[] = { value, offset }. const_index[] = { base, write_mask } */
+STORE(output, 2, 2, 0)
+/* src[] = { value, vertex, offset }. const_index[] = { base, write_mask } */
+STORE(per_vertex_output, 3, 2, 0)
/* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
STORE(ssbo, 3, 1, 0)
/* src[] = { value, offset }. const_index[] = { base, write_mask } */
-STORE(shared, 2, 1, 0)
+STORE(shared, 2, 2, 0)
LAST_INTRINSIC(store_shared)
diff --git a/src/glsl/nir/nir_lower_alu_to_scalar.c b/src/glsl/nir/nir_lower_alu_to_scalar.c
index 9313fc0f97e..d267ca383ab 100644
--- a/src/glsl/nir/nir_lower_alu_to_scalar.c
+++ b/src/glsl/nir/nir_lower_alu_to_scalar.c
@@ -137,10 +137,6 @@ lower_alu_instr_scalar(nir_alu_instr *instr, nir_builder *b)
LOWER_REDUCTION(nir_op_bany_inequal, nir_op_ine, nir_op_ior);
LOWER_REDUCTION(nir_op_fall_equal, nir_op_seq, nir_op_fand);
LOWER_REDUCTION(nir_op_fany_nequal, nir_op_sne, nir_op_for);
- LOWER_REDUCTION(nir_op_ball, nir_op_imov, nir_op_iand);
- LOWER_REDUCTION(nir_op_bany, nir_op_imov, nir_op_ior);
- LOWER_REDUCTION(nir_op_fall, nir_op_fmov, nir_op_fand);
- LOWER_REDUCTION(nir_op_fany, nir_op_fmov, nir_op_for);
default:
break;
diff --git a/src/glsl/nir/nir_lower_gs_intrinsics.c b/src/glsl/nir/nir_lower_gs_intrinsics.c
index e0d067885d8..13254599088 100644
--- a/src/glsl/nir/nir_lower_gs_intrinsics.c
+++ b/src/glsl/nir/nir_lower_gs_intrinsics.c
@@ -99,7 +99,8 @@ rewrite_emit_vertex(nir_intrinsic_instr *intrin, struct state *state)
/* Increment the vertex count by 1 */
nir_store_var(b, state->vertex_count_var,
- nir_iadd(b, count, nir_imm_int(b, 1)));
+ nir_iadd(b, count, nir_imm_int(b, 1)),
+ 0x1); /* .x */
nir_instr_remove(&intrin->instr);
diff --git a/src/glsl/nir/nir_lower_io.c b/src/glsl/nir/nir_lower_io.c
index ec6d09d5b6d..36165a8b765 100644
--- a/src/glsl/nir/nir_lower_io.c
+++ b/src/glsl/nir/nir_lower_io.c
@@ -261,6 +261,9 @@ nir_lower_io_block(nir_block *block, void *void_state)
store->const_index[0] =
intrin->variables[0]->var->data.driver_location;
+ /* Copy the writemask */
+ store->const_index[1] = intrin->const_index[0];
+
if (per_vertex)
store->src[1] = nir_src_for_ssa(vertex_index);
diff --git a/src/glsl/nir/nir_lower_locals_to_regs.c b/src/glsl/nir/nir_lower_locals_to_regs.c
index 17b53ca36f3..3e21ac0cdd5 100644
--- a/src/glsl/nir/nir_lower_locals_to_regs.c
+++ b/src/glsl/nir/nir_lower_locals_to_regs.c
@@ -243,7 +243,7 @@ lower_locals_to_regs_block(nir_block *block, void *void_state)
nir_alu_instr *mov = nir_alu_instr_create(state->shader, nir_op_imov);
nir_src_copy(&mov->src[0].src, &intrin->src[0], mov);
- mov->dest.write_mask = (1 << intrin->num_components) - 1;
+ mov->dest.write_mask = intrin->const_index[0];
mov->dest.dest.is_ssa = false;
mov->dest.dest.reg.reg = reg_src.reg.reg;
mov->dest.dest.reg.base_offset = reg_src.reg.base_offset;
diff --git a/src/glsl/nir/nir_lower_returns.c b/src/glsl/nir/nir_lower_returns.c
index ce0512c770a..f1e8b143840 100644
--- a/src/glsl/nir/nir_lower_returns.c
+++ b/src/glsl/nir/nir_lower_returns.c
@@ -156,7 +156,7 @@ lower_returns_in_block(nir_block *block, struct lower_returns_state *state)
state->return_flag->constant_initializer =
rzalloc(state->return_flag, nir_constant);
}
- nir_store_var(b, state->return_flag, nir_imm_int(b, NIR_TRUE));
+ nir_store_var(b, state->return_flag, nir_imm_int(b, NIR_TRUE), 1);
if (state->loop) {
/* We're in a loop. Make the return a break. */
diff --git a/src/glsl/nir/nir_lower_var_copies.c b/src/glsl/nir/nir_lower_var_copies.c
index 98c107aa50e..a9017de5449 100644
--- a/src/glsl/nir/nir_lower_var_copies.c
+++ b/src/glsl/nir/nir_lower_var_copies.c
@@ -128,6 +128,7 @@ emit_copy_load_store(nir_intrinsic_instr *copy_instr,
nir_intrinsic_instr *store =
nir_intrinsic_instr_create(mem_ctx, nir_intrinsic_store_var);
store->num_components = num_components;
+ store->const_index[0] = (1 << num_components) - 1;
store->variables[0] = nir_deref_as_var(nir_copy_deref(store, &dest_head->deref));
store->src[0].is_ssa = true;
diff --git a/src/glsl/nir/nir_lower_vars_to_ssa.c b/src/glsl/nir/nir_lower_vars_to_ssa.c
index e670dbdc7e7..3ec0e1d9960 100644
--- a/src/glsl/nir/nir_lower_vars_to_ssa.c
+++ b/src/glsl/nir/nir_lower_vars_to_ssa.c
@@ -26,6 +26,7 @@
*/
#include "nir.h"
+#include "nir_builder.h"
#include "nir_vla.h"
@@ -590,6 +591,9 @@ add_phi_sources(nir_block *block, nir_block *pred,
static bool
rename_variables_block(nir_block *block, struct lower_variables_state *state)
{
+ nir_builder b;
+ nir_builder_init(&b, state->impl);
+
nir_foreach_instr_safe(block, instr) {
if (instr->type == nir_instr_type_phi) {
nir_phi_instr *phi = nir_instr_as_phi(instr);
@@ -675,20 +679,40 @@ rename_variables_block(nir_block *block, struct lower_variables_state *state)
assert(intrin->src[0].is_ssa);
- nir_alu_instr *mov = nir_alu_instr_create(state->shader,
- nir_op_imov);
- mov->src[0].src.is_ssa = true;
- mov->src[0].src.ssa = intrin->src[0].ssa;
- for (unsigned i = intrin->num_components; i < 4; i++)
- mov->src[0].swizzle[i] = 0;
+ nir_ssa_def *new_def;
+ b.cursor = nir_before_instr(&intrin->instr);
- mov->dest.write_mask = (1 << intrin->num_components) - 1;
- nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
- intrin->num_components, NULL);
+ if (intrin->const_index[0] == (1 << intrin->num_components) - 1) {
+ /* Whole variable store - just copy the source. Note that
+ * intrin->num_components and intrin->src[0].ssa->num_components
+ * may differ.
+ */
+ unsigned swiz[4];
+ for (unsigned i = 0; i < 4; i++)
+ swiz[i] = i < intrin->num_components ? i : 0;
+
+ new_def = nir_swizzle(&b, intrin->src[0].ssa, swiz,
+ intrin->num_components, false);
+ } else {
+ nir_ssa_def *old_def = get_ssa_def_for_block(node, block, state);
+ /* For writemasked store_var intrinsics, we combine the newly
+ * written values with the existing contents of unwritten
+ * channels, creating a new SSA value for the whole vector.
+ */
+ nir_ssa_def *srcs[4];
+ for (unsigned i = 0; i < intrin->num_components; i++) {
+ if (intrin->const_index[0] & (1 << i)) {
+ srcs[i] = nir_channel(&b, intrin->src[0].ssa, i);
+ } else {
+ srcs[i] = nir_channel(&b, old_def, i);
+ }
+ }
+ new_def = nir_vec(&b, srcs, intrin->num_components);
+ }
- nir_instr_insert_before(&intrin->instr, &mov->instr);
+ assert(new_def->num_components == intrin->num_components);
- def_stack_push(node, &mov->dest.dest.ssa, state);
+ def_stack_push(node, new_def, state);
/* We'll wait to remove the instruction until the next pass
* where we pop the node we just pushed back off the stack.
diff --git a/src/glsl/nir/nir_opcodes.py b/src/glsl/nir/nir_opcodes.py
index 37d3dfc4588..1cd01a4fe92 100644
--- a/src/glsl/nir/nir_opcodes.py
+++ b/src/glsl/nir/nir_opcodes.py
@@ -167,13 +167,6 @@ unop_convert("i2b", tint, tbool, "src0 != 0")
unop_convert("b2i", tbool, tint, "src0 ? 1 : 0") # Boolean-to-int conversion
unop_convert("u2f", tuint, tfloat, "src0") # Unsigned-to-float conversion.
-unop_reduce("bany", 1, tbool, tbool, "{src}", "{src0} || {src1}", "{src}")
-unop_reduce("ball", 1, tbool, tbool, "{src}", "{src0} && {src1}", "{src}")
-unop_reduce("fany", 1, tfloat, tfloat, "{src} != 0.0f", "{src0} || {src1}",
- "{src} ? 1.0f : 0.0f")
-unop_reduce("fall", 1, tfloat, tfloat, "{src} != 0.0f", "{src0} && {src1}",
- "{src} ? 1.0f : 0.0f")
-
# Unary floating-point rounding operations.
diff --git a/src/glsl/nir/nir_print.c b/src/glsl/nir/nir_print.c
index 26b1cbb467d..2691cbdf213 100644
--- a/src/glsl/nir/nir_print.c
+++ b/src/glsl/nir/nir_print.c
@@ -249,6 +249,53 @@ get_var_name(nir_variable *var, print_state *state)
}
static void
+print_constant(nir_constant *c, const struct glsl_type *type, print_state *state)
+{
+ FILE *fp = state->fp;
+ unsigned total_elems = glsl_get_components(type);
+ unsigned i;
+
+ switch (glsl_get_base_type(type)) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_BOOL:
+ for (i = 0; i < total_elems; i++) {
+ if (i > 0) fprintf(fp, ", ");
+ fprintf(fp, "0x%08x", c->value.u[i]);
+ }
+ break;
+
+ case GLSL_TYPE_FLOAT:
+ for (i = 0; i < total_elems; i++) {
+ if (i > 0) fprintf(fp, ", ");
+ fprintf(fp, "%f", c->value.f[i]);
+ }
+ break;
+
+ case GLSL_TYPE_STRUCT:
+ for (i = 0; i < c->num_elements; i++) {
+ if (i > 0) fprintf(fp, ", ");
+ fprintf(fp, "{ ");
+ print_constant(c->elements[i], glsl_get_struct_field(type, i), state);
+ fprintf(fp, " }");
+ }
+ break;
+
+ case GLSL_TYPE_ARRAY:
+ for (i = 0; i < c->num_elements; i++) {
+ if (i > 0) fprintf(fp, ", ");
+ fprintf(fp, "{ ");
+ print_constant(c->elements[i], glsl_get_array_element(type), state);
+ fprintf(fp, " }");
+ }
+ break;
+
+ default:
+ unreachable("not reached");
+ }
+}
+
+static void
print_var_decl(nir_variable *var, print_state *state)
{
FILE *fp = state->fp;
@@ -311,6 +358,12 @@ print_var_decl(nir_variable *var, print_state *state)
fprintf(fp, " (%s, %u)", loc, var->data.driver_location);
}
+ if (var->constant_initializer) {
+ fprintf(fp, " = { ");
+ print_constant(var->constant_initializer, var->type, state);
+ fprintf(fp, " }");
+ }
+
fprintf(fp, "\n");
}
diff --git a/src/glsl/nir/nir_types.cpp b/src/glsl/nir/nir_types.cpp
index 54751cbcb5f..86f8508b859 100644
--- a/src/glsl/nir/nir_types.cpp
+++ b/src/glsl/nir/nir_types.cpp
@@ -125,9 +125,10 @@ glsl_get_aoa_size(const struct glsl_type *type)
}
unsigned
-glsl_count_attribute_slots(const struct glsl_type *type)
+glsl_count_attribute_slots(const struct glsl_type *type,
+ bool vertex_input_slots)
{
- return type->count_attribute_slots();
+ return type->count_attribute_slots(vertex_input_slots);
}
const char *
@@ -238,6 +239,18 @@ glsl_float_type(void)
}
const glsl_type *
+glsl_vec_type(unsigned n)
+{
+ return glsl_type::vec(n);
+}
+
+const glsl_type *
+glsl_vec4_type(void)
+{
+ return glsl_type::vec4_type;
+}
+
+const glsl_type *
glsl_int_type(void)
{
return glsl_type::int_type;
@@ -256,12 +269,6 @@ glsl_bool_type(void)
}
const glsl_type *
-glsl_vec4_type(void)
-{
- return glsl_type::vec4_type;
-}
-
-const glsl_type *
glsl_scalar_type(enum glsl_base_type base_type)
{
return glsl_type::get_instance(base_type, 1, 1);
diff --git a/src/glsl/nir/nir_types.h b/src/glsl/nir/nir_types.h
index 1bae84a356e..535d36373de 100644
--- a/src/glsl/nir/nir_types.h
+++ b/src/glsl/nir/nir_types.h
@@ -67,7 +67,8 @@ unsigned glsl_get_length(const struct glsl_type *type);
unsigned glsl_get_aoa_size(const struct glsl_type *type);
-unsigned glsl_count_attribute_slots(const struct glsl_type *type);
+unsigned glsl_count_attribute_slots(const struct glsl_type *type,
+ bool vertex_input_slots);
const char *glsl_get_struct_elem_name(const struct glsl_type *type,
unsigned index);
@@ -92,11 +93,12 @@ bool glsl_sampler_type_is_array(const struct glsl_type *type);
const struct glsl_type *glsl_void_type(void);
const struct glsl_type *glsl_float_type(void);
+const struct glsl_type *glsl_vec_type(unsigned n);
+const struct glsl_type *glsl_vec4_type(void);
const struct glsl_type *glsl_int_type(void);
const struct glsl_type *glsl_uint_type(void);
const struct glsl_type *glsl_bool_type(void);
-const struct glsl_type *glsl_vec4_type(void);
const struct glsl_type *glsl_scalar_type(enum glsl_base_type base_type);
const struct glsl_type *glsl_vector_type(enum glsl_base_type base_type,
unsigned components);
diff --git a/src/glsl/nir/nir_validate.c b/src/glsl/nir/nir_validate.c
index 06879d64ee2..da920557d20 100644
--- a/src/glsl/nir/nir_validate.c
+++ b/src/glsl/nir/nir_validate.c
@@ -417,6 +417,7 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
assert(instr->variables[0]->var->data.mode != nir_var_shader_in &&
instr->variables[0]->var->data.mode != nir_var_uniform &&
instr->variables[0]->var->data.mode != nir_var_shader_storage);
+ assert((instr->const_index[0] & ~((1 << instr->num_components) - 1)) == 0);
break;
}
case nir_intrinsic_copy_var:
diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c
index 48960b70373..91710ba45f9 100644
--- a/src/glsl/nir/spirv_to_nir.c
+++ b/src/glsl/nir/spirv_to_nir.c
@@ -1133,6 +1133,7 @@ _vtn_variable_store(struct vtn_builder *b,
store->variables[0] =
nir_deref_as_var(nir_copy_deref(store, &dest_deref->deref));
store->num_components = glsl_get_vector_elements(src->type);
+ store->const_index[0] = (1 << store->num_components) - 1;
store->src[0] = nir_src_for_ssa(src->def);
nir_builder_instr_insert(&b->nb, &store->instr);
@@ -2574,20 +2575,30 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
case SpvOpNot: op = nir_op_inot; break;
case SpvOpAny:
- switch (src[0]->num_components) {
- case 1: op = nir_op_imov; break;
- case 2: op = nir_op_bany2; break;
- case 3: op = nir_op_bany3; break;
- case 4: op = nir_op_bany4; break;
+ if (src[0]->num_components == 1) {
+ op = nir_op_imov;
+ } else {
+ switch (src[0]->num_components) {
+ case 2: op = nir_op_bany_inequal2; break;
+ case 3: op = nir_op_bany_inequal3; break;
+ case 4: op = nir_op_bany_inequal4; break;
+ }
+ num_inputs = 2;
+ src[1] = nir_imm_int(&b->nb, NIR_FALSE);
}
break;
case SpvOpAll:
- switch (src[0]->num_components) {
- case 1: op = nir_op_imov; break;
- case 2: op = nir_op_ball2; break;
- case 3: op = nir_op_ball3; break;
- case 4: op = nir_op_ball4; break;
+ if (src[0]->num_components == 1) {
+ op = nir_op_imov;
+ } else {
+ switch (src[0]->num_components) {
+ case 2: op = nir_op_ball_iequal2; break;
+ case 3: op = nir_op_ball_iequal3; break;
+ case 4: op = nir_op_ball_iequal4; break;
+ }
+ num_inputs = 2;
+ src[1] = nir_imm_int(&b->nb, NIR_TRUE);
}
break;