summaryrefslogtreecommitdiffstats
path: root/src/compiler
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2018-11-12 15:58:18 -0600
committerJason Ekstrand <[email protected]>2018-11-15 19:59:27 -0600
commitb77d68b78e2bf623bf9ba3f28945cf2080185a9d (patch)
tree129ec21f92efd1dda3a795bfa5892318fff2c702 /src/compiler
parent1f29f4db1e867357a119c0c7c34fb54dc27fb682 (diff)
nir/builder: Add iadd_imm and imul_imm helpers
The pattern of adding or multiplying an integer by an immediate is fairly common especially in deref chain handling. This adds a helper for it and uses it a few places. The advantage to the helper is that it automatically handles bit sizes for you. Reviewed-by: Samuel Iglesias Gonsálvez <[email protected]> Reviewed-by: Karol Herbst <[email protected]>
Diffstat (limited to 'src/compiler')
-rw-r--r--src/compiler/nir/nir_builder.h12
-rw-r--r--src/compiler/nir/nir_lower_io.c5
-rw-r--r--src/compiler/spirv/vtn_variables.c14
3 files changed, 20 insertions, 11 deletions
diff --git a/src/compiler/nir/nir_builder.h b/src/compiler/nir/nir_builder.h
index 3be630ab3dd..cf096d41bf2 100644
--- a/src/compiler/nir/nir_builder.h
+++ b/src/compiler/nir/nir_builder.h
@@ -553,6 +553,18 @@ nir_channels(nir_builder *b, nir_ssa_def *def, nir_component_mask_t mask)
return nir_swizzle(b, def, swizzle, num_channels, false);
}
+static inline nir_ssa_def *
+nir_iadd_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+{
+ return nir_iadd(build, x, nir_imm_intN_t(build, y, x->bit_size));
+}
+
+static inline nir_ssa_def *
+nir_imul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+{
+ return nir_imul(build, x, nir_imm_intN_t(build, y, x->bit_size));
+}
+
/**
* Turns a nir_src into a nir_ssa_def * so it can be passed to
* nir_build_alu()-based builder calls.
diff --git a/src/compiler/nir/nir_lower_io.c b/src/compiler/nir/nir_lower_io.c
index b3595bb19d5..f3377eaec8f 100644
--- a/src/compiler/nir/nir_lower_io.c
+++ b/src/compiler/nir/nir_lower_io.c
@@ -127,8 +127,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
unsigned size = type_size((*p)->type);
nir_ssa_def *mul =
- nir_imul(b, nir_imm_int(b, size),
- nir_ssa_for_src(b, (*p)->arr.index, 1));
+ nir_imul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
offset = nir_iadd(b, offset, mul);
} else if ((*p)->deref_type == nir_deref_type_struct) {
@@ -139,7 +138,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
for (unsigned i = 0; i < (*p)->strct.index; i++) {
field_offset += type_size(glsl_get_struct_field(parent->type, i));
}
- offset = nir_iadd(b, offset, nir_imm_int(b, field_offset));
+ offset = nir_iadd_imm(b, offset, field_offset);
} else {
unreachable("Unsupported deref type");
}
diff --git a/src/compiler/spirv/vtn_variables.c b/src/compiler/spirv/vtn_variables.c
index c5cf345d02a..586dcc5b99c 100644
--- a/src/compiler/spirv/vtn_variables.c
+++ b/src/compiler/spirv/vtn_variables.c
@@ -138,7 +138,7 @@ vtn_access_link_as_ssa(struct vtn_builder *b, struct vtn_access_link link,
nir_ssa_def *src0 = vtn_ssa_value(b, link.id)->def;
if (src0->bit_size != 32)
src0 = nir_u2u32(&b->nb, src0);
- return nir_imul(&b->nb, src0, nir_imm_int(&b->nb, stride));
+ return nir_imul_imm(&b->nb, src0, stride);
}
}
@@ -332,8 +332,7 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
case GLSL_TYPE_STRUCT: {
vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
unsigned member = deref_chain->link[idx].id;
- nir_ssa_def *mem_offset = nir_imm_int(&b->nb, type->offsets[member]);
- offset = nir_iadd(&b->nb, offset, mem_offset);
+ offset = nir_iadd_imm(&b->nb, offset, type->offsets[member]);
type = type->members[member];
access |= type->access;
break;
@@ -717,7 +716,7 @@ _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
for (unsigned i = 0; i < num_ops; i++) {
nir_ssa_def *elem_offset =
- nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * col_stride));
+ nir_iadd_imm(&b->nb, offset, i * col_stride);
_vtn_load_store_tail(b, op, load, index, elem_offset,
access_offset, access_size,
&(*inout)->elems[i],
@@ -747,8 +746,7 @@ _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
nir_ssa_def *per_comp[4];
for (unsigned i = 0; i < elems; i++) {
nir_ssa_def *elem_offset =
- nir_iadd(&b->nb, offset,
- nir_imm_int(&b->nb, i * type->stride));
+ nir_iadd_imm(&b->nb, offset, i * type->stride);
struct vtn_ssa_value *comp, temp_val;
if (!load) {
temp_val.def = nir_channel(&b->nb, (*inout)->def, i);
@@ -775,7 +773,7 @@ _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
unsigned elems = glsl_get_length(type->type);
for (unsigned i = 0; i < elems; i++) {
nir_ssa_def *elem_off =
- nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * type->stride));
+ nir_iadd_imm(&b->nb, offset, i * type->stride);
_vtn_block_load_store(b, op, load, index, elem_off,
access_offset, access_size,
type->array_element,
@@ -789,7 +787,7 @@ _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
unsigned elems = glsl_get_length(type->type);
for (unsigned i = 0; i < elems; i++) {
nir_ssa_def *elem_off =
- nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, type->offsets[i]));
+ nir_iadd_imm(&b->nb, offset, type->offsets[i]);
_vtn_block_load_store(b, op, load, index, elem_off,
access_offset, access_size,
type->members[i],