summaryrefslogtreecommitdiffstats
path: root/src/compiler/nir
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2019-07-11 15:19:17 -0500
committerJason Ekstrand <[email protected]>2019-07-16 16:05:16 +0000
commiteb768b0a090d1a90d2d4f49e7899067cf9fc0c64 (patch)
treeecc537ce95f3a7a730621f30da71a01d04418082 /src/compiler/nir
parent998d84fca56b2e89ff7342a2c51398d7e9429114 (diff)
nir/alu_to_scalar: Use "alu" as the name for the nir_alu_instr
Reviewed-by: Eric Anholt <[email protected]>
Diffstat (limited to 'src/compiler/nir')
-rw-r--r--src/compiler/nir/nir_lower_alu_to_scalar.c100
1 files changed, 50 insertions, 50 deletions
diff --git a/src/compiler/nir/nir_lower_alu_to_scalar.c b/src/compiler/nir/nir_lower_alu_to_scalar.c
index f46e15e17fd..4814c2d09ec 100644
--- a/src/compiler/nir/nir_lower_alu_to_scalar.c
+++ b/src/compiler/nir/nir_lower_alu_to_scalar.c
@@ -31,32 +31,32 @@
*/
static void
-nir_alu_ssa_dest_init(nir_alu_instr *instr, unsigned num_components,
+nir_alu_ssa_dest_init(nir_alu_instr *alu, unsigned num_components,
unsigned bit_size)
{
- nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_components,
+ nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components,
bit_size, NULL);
- instr->dest.write_mask = (1 << num_components) - 1;
+ alu->dest.write_mask = (1 << num_components) - 1;
}
static void
-lower_reduction(nir_alu_instr *instr, nir_op chan_op, nir_op merge_op,
+lower_reduction(nir_alu_instr *alu, nir_op chan_op, nir_op merge_op,
nir_builder *builder)
{
- unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
+ unsigned num_components = nir_op_infos[alu->op].input_sizes[0];
nir_ssa_def *last = NULL;
for (unsigned i = 0; i < num_components; i++) {
nir_alu_instr *chan = nir_alu_instr_create(builder->shader, chan_op);
- nir_alu_ssa_dest_init(chan, 1, instr->dest.dest.ssa.bit_size);
- nir_alu_src_copy(&chan->src[0], &instr->src[0], chan);
+ nir_alu_ssa_dest_init(chan, 1, alu->dest.dest.ssa.bit_size);
+ nir_alu_src_copy(&chan->src[0], &alu->src[0], chan);
chan->src[0].swizzle[0] = chan->src[0].swizzle[i];
if (nir_op_infos[chan_op].num_inputs > 1) {
assert(nir_op_infos[chan_op].num_inputs == 2);
- nir_alu_src_copy(&chan->src[1], &instr->src[1], chan);
+ nir_alu_src_copy(&chan->src[1], &alu->src[1], chan);
chan->src[1].swizzle[0] = chan->src[1].swizzle[i];
}
- chan->exact = instr->exact;
+ chan->exact = alu->exact;
nir_builder_instr_insert(builder, &chan->instr);
@@ -68,34 +68,34 @@ lower_reduction(nir_alu_instr *instr, nir_op chan_op, nir_op merge_op,
}
}
- assert(instr->dest.write_mask == 1);
- nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, nir_src_for_ssa(last));
- nir_instr_remove(&instr->instr);
+ assert(alu->dest.write_mask == 1);
+ nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(last));
+ nir_instr_remove(&alu->instr);
}
static bool
-lower_alu_instr_scalar(nir_alu_instr *instr, nir_builder *b, BITSET_WORD *lower_set)
+lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_set)
{
- unsigned num_src = nir_op_infos[instr->op].num_inputs;
+ unsigned num_src = nir_op_infos[alu->op].num_inputs;
unsigned i, chan;
- assert(instr->dest.dest.is_ssa);
- assert(instr->dest.write_mask != 0);
+ assert(alu->dest.dest.is_ssa);
+ assert(alu->dest.write_mask != 0);
- b->cursor = nir_before_instr(&instr->instr);
- b->exact = instr->exact;
+ b->cursor = nir_before_instr(&alu->instr);
+ b->exact = alu->exact;
- if (lower_set && !BITSET_TEST(lower_set, instr->op))
+ if (lower_set && !BITSET_TEST(lower_set, alu->op))
return false;
#define LOWER_REDUCTION(name, chan, merge) \
case name##2: \
case name##3: \
case name##4: \
- lower_reduction(instr, chan, merge, b); \
+ lower_reduction(alu, chan, merge, b); \
return true;
- switch (instr->op) {
+ switch (alu->op) {
case nir_op_vec4:
case nir_op_vec3:
case nir_op_vec2:
@@ -110,14 +110,14 @@ lower_alu_instr_scalar(nir_alu_instr *instr, nir_builder *b, BITSET_WORD *lower_
if (!b->shader->options->lower_pack_half_2x16)
return false;
- nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, instr, 0);
+ nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
nir_ssa_def *val =
nir_pack_half_2x16_split(b, nir_channel(b, src_vec2, 0),
nir_channel(b, src_vec2, 1));
- nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, nir_src_for_ssa(val));
- nir_instr_remove(&instr->instr);
+ nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
+ nir_instr_remove(&alu->instr);
return true;
case nir_op_unpack_unorm_4x8:
@@ -133,15 +133,15 @@ lower_alu_instr_scalar(nir_alu_instr *instr, nir_builder *b, BITSET_WORD *lower_
if (!b->shader->options->lower_unpack_half_2x16)
return false;
- nir_ssa_def *packed = nir_ssa_for_alu_src(b, instr, 0);
+ nir_ssa_def *packed = nir_ssa_for_alu_src(b, alu, 0);
nir_ssa_def *comps[2];
comps[0] = nir_unpack_half_2x16_split_x(b, packed);
comps[1] = nir_unpack_half_2x16_split_y(b, packed);
nir_ssa_def *vec = nir_vec(b, comps, 2);
- nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, nir_src_for_ssa(vec));
- nir_instr_remove(&instr->instr);
+ nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(vec));
+ nir_instr_remove(&alu->instr);
return true;
}
@@ -149,14 +149,14 @@ lower_alu_instr_scalar(nir_alu_instr *instr, nir_builder *b, BITSET_WORD *lower_
assert(b->shader->options->lower_pack_snorm_2x16 ||
b->shader->options->lower_pack_unorm_2x16);
- nir_ssa_def *word = nir_extract_u16(b, nir_ssa_for_alu_src(b, instr, 0),
+ nir_ssa_def *word = nir_extract_u16(b, nir_ssa_for_alu_src(b, alu, 0),
nir_imm_int(b, 0));
nir_ssa_def *val =
nir_ior(b, nir_ishl(b, nir_channel(b, word, 1), nir_imm_int(b, 16)),
nir_channel(b, word, 0));
- nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, nir_src_for_ssa(val));
- nir_instr_remove(&instr->instr);
+ nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
+ nir_instr_remove(&alu->instr);
break;
}
@@ -164,7 +164,7 @@ lower_alu_instr_scalar(nir_alu_instr *instr, nir_builder *b, BITSET_WORD *lower_
assert(b->shader->options->lower_pack_snorm_4x8 ||
b->shader->options->lower_pack_unorm_4x8);
- nir_ssa_def *byte = nir_extract_u8(b, nir_ssa_for_alu_src(b, instr, 0),
+ nir_ssa_def *byte = nir_extract_u8(b, nir_ssa_for_alu_src(b, alu, 0),
nir_imm_int(b, 0));
nir_ssa_def *val =
nir_ior(b, nir_ior(b, nir_ishl(b, nir_channel(b, byte, 3), nir_imm_int(b, 24)),
@@ -172,14 +172,14 @@ lower_alu_instr_scalar(nir_alu_instr *instr, nir_builder *b, BITSET_WORD *lower_
nir_ior(b, nir_ishl(b, nir_channel(b, byte, 1), nir_imm_int(b, 8)),
nir_channel(b, byte, 0)));
- nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, nir_src_for_ssa(val));
- nir_instr_remove(&instr->instr);
+ nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
+ nir_instr_remove(&alu->instr);
break;
}
case nir_op_fdph: {
- nir_ssa_def *src0_vec = nir_ssa_for_alu_src(b, instr, 0);
- nir_ssa_def *src1_vec = nir_ssa_for_alu_src(b, instr, 1);
+ nir_ssa_def *src0_vec = nir_ssa_for_alu_src(b, alu, 0);
+ nir_ssa_def *src1_vec = nir_ssa_for_alu_src(b, alu, 1);
nir_ssa_def *sum[4];
for (unsigned i = 0; i < 3; i++) {
@@ -191,8 +191,8 @@ lower_alu_instr_scalar(nir_alu_instr *instr, nir_builder *b, BITSET_WORD *lower_
nir_ssa_def *val = nir_fadd(b, nir_fadd(b, sum[0], sum[1]),
nir_fadd(b, sum[2], sum[3]));
- nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, nir_src_for_ssa(val));
- nir_instr_remove(&instr->instr);
+ nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
+ nir_instr_remove(&alu->instr);
return true;
}
@@ -216,43 +216,43 @@ lower_alu_instr_scalar(nir_alu_instr *instr, nir_builder *b, BITSET_WORD *lower_
break;
}
- if (instr->dest.dest.ssa.num_components == 1)
+ if (alu->dest.dest.ssa.num_components == 1)
return false;
- unsigned num_components = instr->dest.dest.ssa.num_components;
+ unsigned num_components = alu->dest.dest.ssa.num_components;
nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS] = { NULL };
for (chan = 0; chan < NIR_MAX_VEC_COMPONENTS; chan++) {
- if (!(instr->dest.write_mask & (1 << chan)))
+ if (!(alu->dest.write_mask & (1 << chan)))
continue;
- nir_alu_instr *lower = nir_alu_instr_create(b->shader, instr->op);
+ nir_alu_instr *lower = nir_alu_instr_create(b->shader, alu->op);
for (i = 0; i < num_src; i++) {
/* We only handle same-size-as-dest (input_sizes[] == 0) or scalar
* args (input_sizes[] == 1).
*/
- assert(nir_op_infos[instr->op].input_sizes[i] < 2);
- unsigned src_chan = (nir_op_infos[instr->op].input_sizes[i] == 1 ?
+ assert(nir_op_infos[alu->op].input_sizes[i] < 2);
+ unsigned src_chan = (nir_op_infos[alu->op].input_sizes[i] == 1 ?
0 : chan);
- nir_alu_src_copy(&lower->src[i], &instr->src[i], lower);
+ nir_alu_src_copy(&lower->src[i], &alu->src[i], lower);
for (int j = 0; j < NIR_MAX_VEC_COMPONENTS; j++)
- lower->src[i].swizzle[j] = instr->src[i].swizzle[src_chan];
+ lower->src[i].swizzle[j] = alu->src[i].swizzle[src_chan];
}
- nir_alu_ssa_dest_init(lower, 1, instr->dest.dest.ssa.bit_size);
- lower->dest.saturate = instr->dest.saturate;
+ nir_alu_ssa_dest_init(lower, 1, alu->dest.dest.ssa.bit_size);
+ lower->dest.saturate = alu->dest.saturate;
comps[chan] = &lower->dest.dest.ssa;
- lower->exact = instr->exact;
+ lower->exact = alu->exact;
nir_builder_instr_insert(b, &lower->instr);
}
nir_ssa_def *vec = nir_vec(b, comps, num_components);
- nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, nir_src_for_ssa(vec));
+ nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(vec));
- nir_instr_remove(&instr->instr);
+ nir_instr_remove(&alu->instr);
return true;
}