summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/compiler/spirv/spirv_to_nir.c55
-rw-r--r--src/compiler/spirv/vtn_cfg.c8
-rw-r--r--src/compiler/spirv/vtn_variables.c29
3 files changed, 78 insertions, 14 deletions
diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c
index a9fd543e12f..cf07eedd972 100644
--- a/src/compiler/spirv/spirv_to_nir.c
+++ b/src/compiler/spirv/spirv_to_nir.c
@@ -683,6 +683,19 @@ vtn_handle_access_qualifier(struct vtn_builder *b, struct vtn_type *type,
}
static void
+array_stride_decoration_cb(struct vtn_builder *b,
+ struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *void_ctx)
+{
+ struct vtn_type *type = val->type;
+
+ if (dec->decoration == SpvDecorationArrayStride) {
+ vtn_fail_if(dec->literals[0] == 0, "ArrayStride must be non-zero");
+ type->stride = dec->literals[0];
+ }
+}
+
+static void
struct_member_decoration_cb(struct vtn_builder *b,
struct vtn_value *val, int member,
const struct vtn_decoration *dec, void *void_ctx)
@@ -739,6 +752,7 @@ struct_member_decoration_cb(struct vtn_builder *b,
break;
case SpvDecorationOffset:
ctx->type->offsets[member] = dec->literals[0];
+ ctx->fields[member].offset = dec->literals[0];
break;
case SpvDecorationMatrixStride:
/* Handled as a second pass */
@@ -796,6 +810,21 @@ struct_member_decoration_cb(struct vtn_builder *b,
}
}
+/** Chases the array type all the way down to the tail and rewrites the
+ * glsl_types to be based off the tail's glsl_type.
+ */
+static void
+vtn_array_type_rewrite_glsl_type(struct vtn_type *type)
+{
+ if (type->base_type != vtn_base_type_array)
+ return;
+
+ vtn_array_type_rewrite_glsl_type(type->array_element);
+
+ type->type = glsl_array_type(type->array_element->type,
+ type->length, type->stride);
+}
+
/* Matrix strides are handled as a separate pass because we need to know
* whether the matrix is row-major or not first.
*/
@@ -811,6 +840,7 @@ struct_member_matrix_stride_cb(struct vtn_builder *b,
vtn_fail_if(member < 0,
"The MatrixStride decoration is only allowed on members "
"of OpTypeStruct");
+ vtn_fail_if(dec->literals[0] == 0, "MatrixStride must be non-zero");
struct member_decoration_ctx *ctx = void_ctx;
@@ -819,10 +849,24 @@ struct_member_matrix_stride_cb(struct vtn_builder *b,
mat_type->array_element = vtn_type_copy(b, mat_type->array_element);
mat_type->stride = mat_type->array_element->stride;
mat_type->array_element->stride = dec->literals[0];
+
+ mat_type->type = glsl_explicit_matrix_type(mat_type->type,
+ dec->literals[0], true);
+ mat_type->array_element->type = glsl_get_column_type(mat_type->type);
} else {
vtn_assert(mat_type->array_element->stride > 0);
mat_type->stride = dec->literals[0];
+
+ mat_type->type = glsl_explicit_matrix_type(mat_type->type,
+ dec->literals[0], false);
}
+
+ /* Now that we've replaced the glsl_type with a properly strided matrix
+ * type, rewrite the member type so that it's an array of the proper kind
+ * of glsl_type.
+ */
+ vtn_array_type_rewrite_glsl_type(ctx->type->members[member]);
+ ctx->fields[member].type = ctx->type->members[member]->type;
}
static void
@@ -841,10 +885,8 @@ type_decoration_cb(struct vtn_builder *b,
switch (dec->decoration) {
case SpvDecorationArrayStride:
- vtn_assert(type->base_type == vtn_base_type_matrix ||
- type->base_type == vtn_base_type_array ||
+ vtn_assert(type->base_type == vtn_base_type_array ||
type->base_type == vtn_base_type_pointer);
- type->stride = dec->literals[0];
break;
case SpvDecorationBlock:
vtn_assert(type->base_type == vtn_base_type_struct);
@@ -1145,9 +1187,12 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
}
val->type->base_type = vtn_base_type_array;
- val->type->type = glsl_array_type(array_element->type, val->type->length, 0);
val->type->array_element = array_element;
val->type->stride = 0;
+
+ vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
+ val->type->type = glsl_array_type(array_element->type, val->type->length,
+ val->type->stride);
break;
}
@@ -1209,6 +1254,8 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
val->type->storage_class = storage_class;
val->type->deref = deref_type;
+ vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
+
if (storage_class == SpvStorageClassUniform ||
storage_class == SpvStorageClassStorageBuffer) {
/* These can actually be stored to nir_variables and used as SSA
diff --git a/src/compiler/spirv/vtn_cfg.c b/src/compiler/spirv/vtn_cfg.c
index 47273b2d27a..ab0d42942f2 100644
--- a/src/compiler/spirv/vtn_cfg.c
+++ b/src/compiler/spirv/vtn_cfg.c
@@ -194,7 +194,9 @@ vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
struct vtn_type *ret_type = vtn_callee->type->return_type;
if (ret_type->base_type != vtn_base_type_void) {
nir_variable *ret_tmp =
- nir_local_variable_create(b->nb.impl, ret_type->type, "return_tmp");
+ nir_local_variable_create(b->nb.impl,
+ glsl_get_bare_type(ret_type->type),
+ "return_tmp");
ret_deref = nir_build_deref_var(&b->nb, ret_tmp);
call->params[param_idx++] = nir_src_for_ssa(&ret_deref->dest.ssa);
}
@@ -878,9 +880,11 @@ vtn_emit_cf_list(struct vtn_builder *b, struct list_head *cf_list,
vtn_base_type_void,
"Return with a value from a function returning void");
struct vtn_ssa_value *src = vtn_ssa_value(b, block->branch[1]);
+ const struct glsl_type *ret_type =
+ glsl_get_bare_type(b->func->type->return_type->type);
nir_deref_instr *ret_deref =
nir_build_deref_cast(&b->nb, nir_load_param(&b->nb, 0),
- nir_var_local, src->type);
+ nir_var_local, ret_type);
vtn_local_store(b, src, ret_deref);
}
diff --git a/src/compiler/spirv/vtn_variables.c b/src/compiler/spirv/vtn_variables.c
index fb497fc2d06..6e96cb606b4 100644
--- a/src/compiler/spirv/vtn_variables.c
+++ b/src/compiler/spirv/vtn_variables.c
@@ -1624,8 +1624,10 @@ vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa,
ptr->block_index = NULL;
ptr->offset = ssa;
} else {
+ assert(!vtn_pointer_is_external_block(b, ptr));
+ const struct glsl_type *deref_type = ptr_type->deref->type;
ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
- ptr_type->deref->type);
+ glsl_get_bare_type(deref_type));
}
return ptr;
@@ -1701,14 +1703,17 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
var->var = rzalloc(b->shader, nir_variable);
var->var->name = ralloc_strdup(var->var, val->name);
- /* Need to tweak the nir type here as at vtn_handle_type we don't have
- * the access to storage_class, that is the one that points us that is
- * an atomic uint.
- */
if (storage_class == SpvStorageClassAtomicCounter) {
+ /* Need to tweak the nir type here as at vtn_handle_type we don't
+ * have the access to storage_class, that is the one that points us
+ * that is an atomic uint.
+ */
var->var->type = repair_atomic_type(var->type->type);
} else {
- var->var->type = var->type->type;
+ /* Private variables don't have any explicit layout but some layouts
+ * may have leaked through due to type deduplication in the SPIR-V.
+ */
+ var->var->type = glsl_get_bare_type(var->type->type);
}
var->var->data.mode = nir_mode;
var->var->data.location = -1;
@@ -1722,7 +1727,11 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
/* Create the variable normally */
var->var = rzalloc(b->shader, nir_variable);
var->var->name = ralloc_strdup(var->var, val->name);
- var->var->type = var->type->type;
+ /* Workgroup variables don't have any explicit layout but some
+ * layouts may have leaked through due to type deduplication in the
+ * SPIR-V.
+ */
+ var->var->type = glsl_get_bare_type(var->type->type);
var->var->data.mode = nir_var_shared;
}
break;
@@ -1775,7 +1784,11 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
var->var = rzalloc(b->shader, nir_variable);
var->var->name = ralloc_strdup(var->var, val->name);
- var->var->type = var->type->type;
+ /* In Vulkan, shader I/O variables don't have any explicit layout but
+ * some layouts may have leaked through due to type deduplication in
+ * the SPIR-V.
+ */
+ var->var->type = glsl_get_bare_type(var->type->type);
var->var->interface_type = interface_type->type;
var->var->data.mode = nir_mode;
var->var->data.patch = var->patch;