summaryrefslogtreecommitdiffstats
path: root/src/compiler/spirv/spirv_to_nir.c
diff options
context:
space:
mode:
authorLionel Landwerlin <[email protected]>2019-07-01 14:57:54 +0300
committerLionel Landwerlin <[email protected]>2019-07-26 14:09:55 +0000
commit86b53770e1ea6e79452ccc97bab829ad58ffc5fd (patch)
tree441f97e2885e2c3a43435e3b8fa94eebc1fc4afe /src/compiler/spirv/spirv_to_nir.c
parent8c330728f3094f2c836e022e57f003d0c82953ef (diff)
spirv: wrap push ssa/pointer values
This refactor allows for common code to apply decoration on all ssa/pointer values. In particular this will allow to propagage access qualifiers. Signed-off-by: Lionel Landwerlin <[email protected]> Suggested-by: Caio Marcelo de Oliveira Filho <[email protected]> Reviewed-by: Caio Marcelo de Oliveira Filho <[email protected]>
Diffstat (limited to 'src/compiler/spirv/spirv_to_nir.c')
-rw-r--r--src/compiler/spirv/spirv_to_nir.c82
1 files changed, 41 insertions, 41 deletions
diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c
index 76ccbfad514..c2528cef35a 100644
--- a/src/compiler/spirv/spirv_to_nir.c
+++ b/src/compiler/spirv/spirv_to_nir.c
@@ -2060,19 +2060,17 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
vtn_value(b, w[4], vtn_value_type_pointer)->pointer;
return;
} else if (opcode == SpvOpImage) {
- struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer);
struct vtn_value *src_val = vtn_untyped_value(b, w[3]);
if (src_val->value_type == vtn_value_type_sampled_image) {
- val->pointer = src_val->sampled_image->image;
+ vtn_push_value_pointer(b, w[2], src_val->sampled_image->image);
} else {
vtn_assert(src_val->value_type == vtn_value_type_pointer);
- val->pointer = src_val->pointer;
+ vtn_push_value_pointer(b, w[2], src_val->pointer);
}
return;
}
struct vtn_type *ret_type = vtn_value(b, w[1], vtn_value_type_type)->type;
- struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
struct vtn_sampled_image sampled;
struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
@@ -2385,8 +2383,9 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
}
}
- val->ssa = vtn_create_ssa_value(b, ret_type->type);
- val->ssa->def = &instr->dest.ssa;
+ struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, ret_type->type);
+ ssa->def = &instr->dest.ssa;
+ vtn_push_ssa(b, w[2], ret_type, ssa);
nir_builder_instr_insert(&b->nb, &instr->instr);
}
@@ -2614,7 +2613,6 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
}
if (opcode != SpvOpImageWrite && opcode != SpvOpAtomicStore) {
- struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
unsigned dest_components = glsl_get_vector_elements(type->type);
@@ -2631,7 +2629,8 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
if (intrin->num_components != dest_components)
result = nir_channels(&b->nb, result, (1 << dest_components) - 1);
- val->ssa = vtn_create_ssa_value(b, type->type);
+ struct vtn_value *val =
+ vtn_push_ssa(b, w[2], type, vtn_create_ssa_value(b, type->type));
val->ssa->def = result;
} else {
nir_builder_instr_insert(&b->nb, &intrin->instr);
@@ -2942,10 +2941,10 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
glsl_get_vector_elements(type->type),
glsl_get_bit_size(type->type), NULL);
- struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
- val->ssa = rzalloc(b, struct vtn_ssa_value);
- val->ssa->def = &atomic->dest.ssa;
- val->ssa->type = type->type;
+ struct vtn_ssa_value *ssa = rzalloc(b, struct vtn_ssa_value);
+ ssa->def = &atomic->dest.ssa;
+ ssa->type = type->type;
+ vtn_push_ssa(b, w[2], type, ssa);
}
nir_builder_instr_insert(&b->nb, &atomic->instr);
@@ -3185,66 +3184,66 @@ static void
vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
{
- struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
- const struct glsl_type *type =
- vtn_value(b, w[1], vtn_value_type_type)->type->type;
- val->ssa = vtn_create_ssa_value(b, type);
+ struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
+ struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, type->type);
switch (opcode) {
case SpvOpVectorExtractDynamic:
- val->ssa->def = vtn_vector_extract_dynamic(b, vtn_ssa_value(b, w[3])->def,
- vtn_ssa_value(b, w[4])->def);
+ ssa->def = vtn_vector_extract_dynamic(b, vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def);
break;
case SpvOpVectorInsertDynamic:
- val->ssa->def = vtn_vector_insert_dynamic(b, vtn_ssa_value(b, w[3])->def,
- vtn_ssa_value(b, w[4])->def,
- vtn_ssa_value(b, w[5])->def);
+ ssa->def = vtn_vector_insert_dynamic(b, vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def,
+ vtn_ssa_value(b, w[5])->def);
break;
case SpvOpVectorShuffle:
- val->ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(type),
- vtn_ssa_value(b, w[3])->def,
- vtn_ssa_value(b, w[4])->def,
- w + 5);
+ ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(type->type),
+ vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def,
+ w + 5);
break;
case SpvOpCompositeConstruct: {
unsigned elems = count - 3;
assume(elems >= 1);
- if (glsl_type_is_vector_or_scalar(type)) {
+ if (glsl_type_is_vector_or_scalar(type->type)) {
nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < elems; i++)
srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
- val->ssa->def =
- vtn_vector_construct(b, glsl_get_vector_elements(type),
+ ssa->def =
+ vtn_vector_construct(b, glsl_get_vector_elements(type->type),
elems, srcs);
} else {
- val->ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
for (unsigned i = 0; i < elems; i++)
- val->ssa->elems[i] = vtn_ssa_value(b, w[3 + i]);
+ ssa->elems[i] = vtn_ssa_value(b, w[3 + i]);
}
break;
}
case SpvOpCompositeExtract:
- val->ssa = vtn_composite_extract(b, vtn_ssa_value(b, w[3]),
- w + 4, count - 4);
+ ssa = vtn_composite_extract(b, vtn_ssa_value(b, w[3]),
+ w + 4, count - 4);
break;
case SpvOpCompositeInsert:
- val->ssa = vtn_composite_insert(b, vtn_ssa_value(b, w[4]),
- vtn_ssa_value(b, w[3]),
- w + 5, count - 5);
+ ssa = vtn_composite_insert(b, vtn_ssa_value(b, w[4]),
+ vtn_ssa_value(b, w[3]),
+ w + 5, count - 5);
break;
case SpvOpCopyLogical:
case SpvOpCopyObject:
- val->ssa = vtn_composite_copy(b, vtn_ssa_value(b, w[3]));
+ ssa = vtn_composite_copy(b, vtn_ssa_value(b, w[3]));
break;
default:
vtn_fail_with_opcode("unknown composite operation", opcode);
}
+
+ vtn_push_ssa(b, w[2], type, ssa);
}
static void
@@ -4194,8 +4193,9 @@ vtn_handle_ptr(struct vtn_builder *b, SpvOp opcode,
"%s operands must have the same storage class",
spirv_op_to_string(opcode));
- const struct glsl_type *type =
- vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ struct vtn_type *vtn_type =
+ vtn_value(b, w[1], vtn_value_type_type)->type;
+ const struct glsl_type *type = vtn_type->type;
nir_address_format addr_format = vtn_mode_to_address_format(
b, vtn_storage_class_to_mode(b, type1->storage_class, NULL, NULL));
@@ -4233,9 +4233,9 @@ vtn_handle_ptr(struct vtn_builder *b, SpvOp opcode,
unreachable("Invalid ptr operation");
}
- struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
- val->ssa = vtn_create_ssa_value(b, type);
- val->ssa->def = def;
+ struct vtn_ssa_value *ssa_value = vtn_create_ssa_value(b, type);
+ ssa_value->def = def;
+ vtn_push_ssa(b, w[2], vtn_type, ssa_value);
}
static bool