summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/panfrost/midgard/compiler.h29
-rw-r--r--src/panfrost/midgard/midgard_compile.c128
-rw-r--r--src/panfrost/midgard/midgard_derivatives.c8
-rw-r--r--src/panfrost/midgard/midgard_emit.c4
-rw-r--r--src/panfrost/midgard/midgard_liveness.c4
-rw-r--r--src/panfrost/midgard/midgard_opt_copy_prop.c8
-rw-r--r--src/panfrost/midgard/midgard_opt_dce.c14
-rw-r--r--src/panfrost/midgard/midgard_opt_float.c2
-rw-r--r--src/panfrost/midgard/midgard_opt_invert.c44
-rw-r--r--src/panfrost/midgard/midgard_opt_perspective.c24
-rw-r--r--src/panfrost/midgard/midgard_print.c12
-rw-r--r--src/panfrost/midgard/midgard_ra.c90
-rw-r--r--src/panfrost/midgard/midgard_ra_pipeline.c8
-rw-r--r--src/panfrost/midgard/midgard_schedule.c52
-rw-r--r--src/panfrost/midgard/mir.c24
-rw-r--r--src/panfrost/midgard/mir_promote_uniforms.c8
16 files changed, 210 insertions, 249 deletions
diff --git a/src/panfrost/midgard/compiler.h b/src/panfrost/midgard/compiler.h
index 8bea56548ae..099d108142b 100644
--- a/src/panfrost/midgard/compiler.h
+++ b/src/panfrost/midgard/compiler.h
@@ -67,16 +67,6 @@ typedef struct midgard_branch {
};
} midgard_branch;
-/* Instruction arguments represented as block-local SSA indices, rather than
- * registers. ~0 means unused. */
-
-typedef struct {
- unsigned src[3];
- unsigned dest;
-
- bool inline_constant;
-} ssa_args;
-
/* Generic in-memory data type repesenting a single logical instruction, rather
* than a single instruction group. This is the preferred form for code gen.
* Multiple midgard_insturctions will later be combined during scheduling,
@@ -93,8 +83,10 @@ typedef struct midgard_instruction {
unsigned type; /* ALU, load/store, texture */
- /* If the register allocator has not run yet... */
- ssa_args ssa_args;
+ /* Instruction arguments represented as block-local SSA
+ * indices, rather than registers. ~0 means unused. */
+ unsigned src[3];
+ unsigned dest;
/* Special fields for an ALU instruction */
midgard_reg_info registers;
@@ -112,6 +104,7 @@ typedef struct midgard_instruction {
uint32_t constants[4];
uint16_t inline_constant;
bool has_blend_constant;
+ bool has_inline_constant;
bool compact_branch;
bool writeout;
@@ -396,7 +389,7 @@ mir_next_op(struct midgard_instruction *ins)
v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL))
#define mir_foreach_src(ins, v) \
- for (unsigned v = 0; v < ARRAY_SIZE(ins->ssa_args.src); ++v)
+ for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
static inline midgard_instruction *
mir_last_in_block(struct midgard_block *block)
@@ -533,10 +526,8 @@ v_mov(unsigned src, midgard_vector_alu_src mod, unsigned dest)
midgard_instruction ins = {
.type = TAG_ALU_4,
.mask = 0xF,
- .ssa_args = {
- .src = { SSA_UNUSED, src, SSA_UNUSED },
- .dest = dest,
- },
+ .src = { SSA_UNUSED, src, SSA_UNUSED },
+ .dest = dest,
.alu = {
.op = midgard_alu_op_imov,
.reg_mode = midgard_reg_mode_32,
@@ -553,8 +544,8 @@ v_mov(unsigned src, midgard_vector_alu_src mod, unsigned dest)
static inline bool
mir_has_arg(midgard_instruction *ins, unsigned arg)
{
- for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i) {
- if (ins->ssa_args.src[i] == arg)
+ for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
+ if (ins->src[i] == arg)
return true;
}
diff --git a/src/panfrost/midgard/midgard_compile.c b/src/panfrost/midgard/midgard_compile.c
index 816adb7fd38..74511b278d1 100644
--- a/src/panfrost/midgard/midgard_compile.c
+++ b/src/panfrost/midgard/midgard_compile.c
@@ -104,10 +104,8 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor)
midgard_instruction i = { \
.type = TAG_LOAD_STORE_4, \
.mask = 0xF, \
- .ssa_args = { \
- .dest = ~0, \
- .src = { ~0, ~0, ~0 }, \
- }, \
+ .dest = ~0, \
+ .src = { ~0, ~0, ~0 }, \
.load_store = { \
.op = midgard_op_##name, \
.swizzle = SWIZZLE_XYZW, \
@@ -116,9 +114,9 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor)
}; \
\
if (store) \
- i.ssa_args.src[0] = ssa; \
+ i.src[0] = ssa; \
else \
- i.ssa_args.dest = ssa; \
+ i.dest = ssa; \
\
return i; \
}
@@ -212,10 +210,8 @@ v_alu_br_compact_cond(midgard_jmp_writeout_op op, unsigned tag, signed offset, u
.prepacked_branch = true,
.compact_branch = true,
.br_compact = compact,
- .ssa_args = {
- .dest = ~0,
- .src = { ~0, ~0, ~0 },
- }
+ .dest = ~0,
+ .src = { ~0, ~0, ~0 },
};
if (op == midgard_jmp_writeout_op_writeout)
@@ -235,10 +231,8 @@ v_branch(bool conditional, bool invert)
.conditional = conditional,
.invert_conditional = invert
},
- .ssa_args = {
- .dest = ~0,
- .src = { ~0, ~0, ~0 },
- }
+ .dest = ~0,
+ .src = { ~0, ~0, ~0 },
};
return ins;
@@ -620,11 +614,8 @@ emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned co
.precede_break = true,
.unit = for_branch ? UNIT_SMUL : UNIT_SADD,
.mask = 1 << COMPONENT_W,
-
- .ssa_args = {
- .src = { condition, condition, ~0 },
- .dest = SSA_FIXED_REGISTER(31),
- },
+ .src = { condition, condition, ~0 },
+ .dest = SSA_FIXED_REGISTER(31),
.alu = {
.op = midgard_alu_op_iand,
@@ -660,10 +651,8 @@ emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp)
.type = TAG_ALU_4,
.precede_break = true,
.mask = mask_of(nr_comp),
- .ssa_args = {
- .src = { condition, condition, ~0 },
- .dest = SSA_FIXED_REGISTER(31),
- },
+ .src = { condition, condition, ~0 },
+ .dest = SSA_FIXED_REGISTER(31),
.alu = {
.op = midgard_alu_op_iand,
.outmod = midgard_outmod_int_wrap,
@@ -1029,14 +1018,12 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
midgard_instruction ins = {
.type = TAG_ALU_4,
- .ssa_args = {
- .src = {
- quirk_flipped_r24 ? ~0 : src0,
- quirk_flipped_r24 ? src0 : src1,
- ~0
- },
- .dest = dest,
- }
+ .src = {
+ quirk_flipped_r24 ? ~0 : src0,
+ quirk_flipped_r24 ? src0 : src1,
+ ~0
+ },
+ .dest = dest,
};
nir_alu_src *nirmods[2] = { NULL };
@@ -1091,8 +1078,8 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
* inline, since we're 32-bit, not 16-bit like the inline
* constants) */
- ins.ssa_args.inline_constant = false;
- ins.ssa_args.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+ ins.has_inline_constant = false;
+ ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
ins.has_constants = true;
if (instr->op == nir_op_b2f32) {
@@ -1105,8 +1092,8 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
} else if (nr_inputs == 1 && !quirk_flipped_r24) {
/* Lots of instructions need a 0 plonked in */
- ins.ssa_args.inline_constant = false;
- ins.ssa_args.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+ ins.has_inline_constant = false;
+ ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
ins.has_constants = true;
ins.constants[0] = 0;
ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
@@ -1182,7 +1169,7 @@ emit_ubo_read(
ins.mask = mir_mask_for_intr(instr, true);
if (indirect_offset) {
- ins.ssa_args.src[1] = nir_src_index(ctx, indirect_offset);
+ ins.src[1] = nir_src_index(ctx, indirect_offset);
ins.load_store.arg_2 = 0x80;
} else {
ins.load_store.arg_2 = 0x1E;
@@ -1237,7 +1224,7 @@ emit_ssbo_access(
* arg_2 = the offset.
*/
- ins.ssa_args.src[is_read ? 0 : 1] = addr;
+ ins.src[is_read ? 0 : 1] = addr;
/* TODO: What is this? It looks superficially like a shift << 5, but
* arg_1 doesn't take a shift Should it be E0 or A0? */
@@ -1247,7 +1234,7 @@ emit_ssbo_access(
/* We also need to emit the indirect offset */
if (indirect_offset)
- ins.ssa_args.src[is_read ? 1 : 2] = nir_src_index(ctx, indirect_offset);
+ ins.src[is_read ? 1 : 2] = nir_src_index(ctx, indirect_offset);
else
ins.load_store.arg_2 = 0x7E;
@@ -1287,7 +1274,7 @@ emit_varying_read(
ins.load_store.varying_parameters = u;
if (indirect_offset)
- ins.ssa_args.src[1] = nir_src_index(ctx, indirect_offset);
+ ins.src[1] = nir_src_index(ctx, indirect_offset);
else
ins.load_store.arg_2 = 0x1E;
@@ -1369,9 +1356,7 @@ emit_fragment_store(compiler_context *ctx, unsigned src, unsigned rt)
/* If we're doing MRT, we need to specify the render target */
midgard_instruction rt_move = {
- .ssa_args = {
- .dest = ~0
- }
+ .dest = ~0
};
if (rt != 0) {
@@ -1381,7 +1366,7 @@ emit_fragment_store(compiler_context *ctx, unsigned src, unsigned rt)
rt_move.unit = UNIT_SADD;
/* r1.z = (rt * 0x100) */
- rt_move.ssa_args.inline_constant = true;
+ rt_move.has_inline_constant = true;
rt_move.inline_constant = (rt * 0x100);
/* r1 */
@@ -1400,8 +1385,8 @@ emit_fragment_store(compiler_context *ctx, unsigned src, unsigned rt)
v_alu_br_compact_cond(midgard_jmp_writeout_op_writeout, TAG_ALU_4, offset, midgard_condition_always);
/* Add dependencies */
- ins.ssa_args.src[0] = move.ssa_args.dest;
- ins.ssa_args.src[1] = rt_move.ssa_args.dest;
+ ins.src[0] = move.dest;
+ ins.src[1] = rt_move.dest;
/* Emit the branch */
emit_mir_instruction(ctx, ins);
@@ -1731,10 +1716,8 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
midgard_instruction ins = {
.type = TAG_TEXTURE_4,
.mask = 0xF,
- .ssa_args = {
- .dest = nir_dest_index(ctx, &instr->dest),
- .src = { ~0, ~0, ~0 },
- },
+ .dest = nir_dest_index(ctx, &instr->dest),
+ .src = { ~0, ~0, ~0 },
.texture = {
.op = midgard_texop,
.format = midgard_tex_format(instr->sampler_dim),
@@ -1793,16 +1776,16 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
unsigned temp = make_compiler_temp(ctx);
midgard_instruction ld = m_ld_cubemap_coords(temp, 0);
- ld.ssa_args.src[0] = index;
+ ld.src[0] = index;
ld.mask = 0x3; /* xy */
ld.load_store.arg_1 = 0x20;
ld.load_store.swizzle = alu_src.swizzle;
emit_mir_instruction(ctx, ld);
- ins.ssa_args.src[0] = temp;
+ ins.src[0] = temp;
ins.texture.in_reg_swizzle = SWIZZLE_XYXX;
} else {
- ins.ssa_args.src[0] = index;
+ ins.src[0] = index;
}
if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D) {
@@ -1827,7 +1810,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
break;
ins.texture.lod_register = true;
- ins.ssa_args.src[1] = index;
+ ins.src[1] = index;
emit_explicit_constant(ctx, index, index);
break;
@@ -1931,12 +1914,12 @@ emit_instr(compiler_context *ctx, struct nir_instr *instr)
/* ALU instructions can inline or embed constants, which decreases register
* pressure and saves space. */
-#define CONDITIONAL_ATTACH(src) { \
- void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->ssa_args.src + 1); \
+#define CONDITIONAL_ATTACH(idx) { \
+ void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[idx] + 1); \
\
if (entry) { \
- attach_constants(ctx, alu, entry, alu->ssa_args.src + 1); \
- alu->ssa_args.src = SSA_FIXED_REGISTER(REGISTER_CONSTANT); \
+ attach_constants(ctx, alu, entry, alu->src[idx] + 1); \
+ alu->src[idx] = SSA_FIXED_REGISTER(REGISTER_CONSTANT); \
} \
}
@@ -1950,10 +1933,10 @@ inline_alu_constants(compiler_context *ctx)
/* If there is already a constant here, we can do nothing */
if (alu->has_constants) continue;
- CONDITIONAL_ATTACH(src[0]);
+ CONDITIONAL_ATTACH(0);
if (!alu->has_constants) {
- CONDITIONAL_ATTACH(src[1])
+ CONDITIONAL_ATTACH(1)
} else if (!alu->inline_constant) {
/* Corner case: _two_ vec4 constants, for instance with a
* csel. For this case, we can only use a constant
@@ -1965,18 +1948,18 @@ inline_alu_constants(compiler_context *ctx)
* to the destination register.
*/
- void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->ssa_args.src[1] + 1);
- unsigned scratch = alu->ssa_args.dest;
+ void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[1] + 1);
+ unsigned scratch = alu->dest;
if (entry) {
midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch);
- attach_constants(ctx, &ins, entry, alu->ssa_args.src[1] + 1);
+ attach_constants(ctx, &ins, entry, alu->src[1] + 1);
/* Force a break XXX Defer r31 writes */
ins.unit = UNIT_VLUT;
/* Set the source */
- alu->ssa_args.src[1] = scratch;
+ alu->src[1] = scratch;
/* Inject us -before- the last instruction which set r31 */
mir_insert_instruction_before(mir_prev_op(alu), ins);
@@ -2030,8 +2013,7 @@ embedded_to_inline_constant(compiler_context *ctx)
{
mir_foreach_instr(ctx, ins) {
if (!ins->has_constants) continue;
-
- if (ins->ssa_args.inline_constant) continue;
+ if (ins->has_inline_constant) continue;
/* Blend constants must not be inlined by definition */
if (ins->has_blend_constant) continue;
@@ -2049,7 +2031,7 @@ embedded_to_inline_constant(compiler_context *ctx)
int op = ins->alu.op;
- if (ins->ssa_args.src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
+ if (ins->src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
bool flip = alu_opcode_props[op].props & OP_COMMUTES;
switch (op) {
@@ -2072,8 +2054,8 @@ embedded_to_inline_constant(compiler_context *ctx)
if (flip) {
/* Flip the SSA numbers */
- ins->ssa_args.src[0] = ins->ssa_args.src[1];
- ins->ssa_args.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+ ins->src[0] = ins->src[1];
+ ins->src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
/* And flip the modifiers */
@@ -2085,7 +2067,7 @@ embedded_to_inline_constant(compiler_context *ctx)
}
}
- if (ins->ssa_args.src[1] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
+ if (ins->src[1] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
/* Extract the source information */
midgard_vector_alu_src *src;
@@ -2159,8 +2141,8 @@ embedded_to_inline_constant(compiler_context *ctx)
/* Get rid of the embedded constant */
ins->has_constants = false;
- ins->ssa_args.src[1] = ~0;
- ins->ssa_args.inline_constant = true;
+ ins->src[1] = ~0;
+ ins->has_inline_constant = true;
ins->inline_constant = scaled_constant;
}
}
@@ -2225,7 +2207,7 @@ midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block)
if (ins->alu.outmod != midgard_outmod_pos) continue;
/* TODO: Registers? */
- unsigned src = ins->ssa_args.src[1];
+ unsigned src = ins->src[1];
if (src & IS_REG) continue;
assert(!mir_has_multiple_writes(ctx, src));
@@ -2235,7 +2217,7 @@ midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block)
/* Backpropagate the modifier */
mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
if (v->type != TAG_ALU_4) continue;
- if (v->ssa_args.dest != src) continue;
+ if (v->dest != src) continue;
/* Can we even take a float outmod? */
if (midgard_is_integer_out_op(v->alu.op)) continue;
diff --git a/src/panfrost/midgard/midgard_derivatives.c b/src/panfrost/midgard/midgard_derivatives.c
index 9a1506372b8..ce45b46ecb9 100644
--- a/src/panfrost/midgard/midgard_derivatives.c
+++ b/src/panfrost/midgard/midgard_derivatives.c
@@ -94,10 +94,8 @@ midgard_emit_derivatives(compiler_context *ctx, nir_alu_instr *instr)
midgard_instruction ins = {
.type = TAG_TEXTURE_4,
.mask = mask_of(nr_components),
- .ssa_args = {
- .dest = nir_dest_index(ctx, &instr->dest.dest),
- .src = { nir_alu_src_index(ctx, &instr->src[0]), ~0, ~0 },
- },
+ .dest = nir_dest_index(ctx, &instr->dest.dest),
+ .src = { nir_alu_src_index(ctx, &instr->src[0]), ~0, ~0 },
.texture = {
.op = mir_derivative_op(instr->op),
.format = MALI_TEX_2D,
@@ -159,6 +157,6 @@ midgard_lower_derivatives(compiler_context *ctx, midgard_block *block)
* rewrite to use a register */
unsigned new = make_compiler_temp_reg(ctx);
- mir_rewrite_index(ctx, ins->ssa_args.dest, new);
+ mir_rewrite_index(ctx, ins->dest, new);
}
}
diff --git a/src/panfrost/midgard/midgard_emit.c b/src/panfrost/midgard/midgard_emit.c
index 1996e1b1ef6..0d904f7166e 100644
--- a/src/panfrost/midgard/midgard_emit.c
+++ b/src/panfrost/midgard/midgard_emit.c
@@ -90,7 +90,7 @@ vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
{
bool is_int = midgard_is_integer_op(v.op);
bool is_full = v.reg_mode == midgard_reg_mode_32;
- bool is_inline_constant = ins->ssa_args.inline_constant;
+ bool is_inline_constant = ins->has_inline_constant;
unsigned comp = component_from_mask(ins->mask);
@@ -114,7 +114,7 @@ vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
/* Inline constant is passed along rather than trying to extract it
* from v */
- if (ins->ssa_args.inline_constant) {
+ if (ins->has_inline_constant) {
uint16_t imm = 0;
int lower_11 = ins->inline_constant & ((1 << 12) - 1);
imm |= (lower_11 >> 9) & 3;
diff --git a/src/panfrost/midgard/midgard_liveness.c b/src/panfrost/midgard/midgard_liveness.c
index 8ecb22ee273..155e1cc3514 100644
--- a/src/panfrost/midgard/midgard_liveness.c
+++ b/src/panfrost/midgard/midgard_liveness.c
@@ -55,7 +55,7 @@ is_live_after_successors(compiler_context *ctx, midgard_block *bl, int src)
/* If written-before-use, we're gone */
- if (ins->ssa_args.dest == src)
+ if (ins->dest == src)
overwritten_mask |= ins->mask;
}
@@ -100,7 +100,7 @@ mir_has_multiple_writes(compiler_context *ctx, int dest)
unsigned write_count = 0;
mir_foreach_instr_global(ctx, ins) {
- if (ins->ssa_args.dest == dest)
+ if (ins->dest == dest)
write_count++;
}
diff --git a/src/panfrost/midgard/midgard_opt_copy_prop.c b/src/panfrost/midgard/midgard_opt_copy_prop.c
index 68c1a4d0d55..9a278876088 100644
--- a/src/panfrost/midgard/midgard_opt_copy_prop.c
+++ b/src/panfrost/midgard/midgard_opt_copy_prop.c
@@ -34,8 +34,8 @@ midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
if (ins->type != TAG_ALU_4) continue;
if (!OP_IS_MOVE(ins->alu.op)) continue;
- unsigned from = ins->ssa_args.src[1];
- unsigned to = ins->ssa_args.dest;
+ unsigned from = ins->src[1];
+ unsigned to = ins->dest;
/* We only work on pure SSA */
@@ -45,7 +45,7 @@ midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
if (from & IS_REG) continue;
/* Constant propagation is not handled here, either */
- if (ins->ssa_args.inline_constant) continue;
+ if (ins->has_inline_constant) continue;
if (ins->has_constants) continue;
/* Modifier propagation is not handled here */
@@ -72,7 +72,7 @@ midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
OP_IS_STORE(q->load_store.op) ? 1 : 0;
mir_foreach_src(q, s) {
- if ((s >= start) && q->ssa_args.src[s] == to) {
+ if ((s >= start) && q->src[s] == to) {
skip = true;
break;
}
diff --git a/src/panfrost/midgard/midgard_opt_dce.c b/src/panfrost/midgard/midgard_opt_dce.c
index 57768ed69c3..5d23923b8e6 100644
--- a/src/panfrost/midgard/midgard_opt_dce.c
+++ b/src/panfrost/midgard/midgard_opt_dce.c
@@ -35,8 +35,8 @@ midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block)
if (ins->type != TAG_ALU_4) continue;
if (ins->compact_branch) continue;
- if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
- if (mir_is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue;
+ if (ins->dest >= SSA_FIXED_MINIMUM) continue;
+ if (mir_is_live_after(ctx, block, ins, ins->dest)) continue;
mir_remove_instruction(ins);
progress = true;
@@ -64,11 +64,11 @@ midgard_opt_dead_move_eliminate(compiler_context *ctx, midgard_block *block)
mir_foreach_instr_in_block_from(block, q, mir_next_op(ins)) {
/* Check if used */
- if (mir_has_arg(q, ins->ssa_args.dest))
+ if (mir_has_arg(q, ins->dest))
break;
/* Check if overwritten */
- if (q->ssa_args.dest == ins->ssa_args.dest) {
+ if (q->dest == ins->dest) {
/* Special case to vec4; component tracking is
* harder */
@@ -100,8 +100,8 @@ midgard_opt_post_move_eliminate(compiler_context *ctx, midgard_block *block, str
if (ins->dont_eliminate) continue;
/* Check we're to the same place post-RA */
- unsigned iA = ins->ssa_args.dest;
- unsigned iB = ins->ssa_args.src[1];
+ unsigned iA = ins->dest;
+ unsigned iB = ins->src[1];
if ((iA == ~0) || (iB == ~0)) continue;
@@ -125,7 +125,7 @@ midgard_opt_post_move_eliminate(compiler_context *ctx, midgard_block *block, str
if (ins->mask != 0xF) continue;
/* We do need to rewrite to facilitate pipelining/scheduling */
- mir_rewrite_index(ctx, ins->ssa_args.src[1], ins->ssa_args.dest);
+ mir_rewrite_index(ctx, ins->src[1], ins->dest);
/* We're good to go */
mir_remove_instruction(ins);
diff --git a/src/panfrost/midgard/midgard_opt_float.c b/src/panfrost/midgard/midgard_opt_float.c
index 630620120e3..39f95202857 100644
--- a/src/panfrost/midgard/midgard_opt_float.c
+++ b/src/panfrost/midgard/midgard_opt_float.c
@@ -52,7 +52,7 @@ midgard_opt_promote_fmov(compiler_context *ctx, midgard_block *block)
mir_foreach_instr_in_block(block, ins) {
if (ins->type != TAG_ALU_4) continue;
if (ins->alu.op != midgard_alu_op_imov) continue;
- if (ins->ssa_args.inline_constant) continue;
+ if (ins->has_inline_constant) continue;
if (!ins->has_constants) continue;
if (mir_nontrivial_source2_mod_simple(ins)) continue;
if (mir_nontrivial_outmod(ins)) continue;
diff --git a/src/panfrost/midgard/midgard_opt_invert.c b/src/panfrost/midgard/midgard_opt_invert.c
index 592a5d381b5..da0cff88649 100644
--- a/src/panfrost/midgard/midgard_opt_invert.c
+++ b/src/panfrost/midgard/midgard_opt_invert.c
@@ -40,11 +40,9 @@ midgard_lower_invert(compiler_context *ctx, midgard_block *block)
midgard_instruction not = {
.type = TAG_ALU_4,
.mask = ins->mask,
- .ssa_args = {
- .src = { temp, ~0, ~0 },
- .dest = ins->ssa_args.dest,
- .inline_constant = true
- },
+ .src = { temp, ~0, ~0 },
+ .dest = ins->dest,
+ .has_inline_constant = true,
.alu = {
.op = midgard_alu_op_inor,
/* TODO: i16 */
@@ -56,7 +54,7 @@ midgard_lower_invert(compiler_context *ctx, midgard_block *block)
},
};
- ins->ssa_args.dest = temp;
+ ins->dest = temp;
ins->invert = false;
mir_insert_instruction_before(mir_next_op(ins), not);
}
@@ -74,15 +72,15 @@ midgard_opt_not_propagate(compiler_context *ctx, midgard_block *block)
if (ins->alu.op != midgard_alu_op_imov) continue;
if (!ins->invert) continue;
if (mir_nontrivial_source2_mod_simple(ins)) continue;
- if (ins->ssa_args.src[1] & IS_REG) continue;
+ if (ins->src[1] & IS_REG) continue;
/* Is it beneficial to propagate? */
- if (!mir_single_use(ctx, ins->ssa_args.src[1])) continue;
+ if (!mir_single_use(ctx, ins->src[1])) continue;
/* We found an imov.not, propagate the invert back */
mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
- if (v->ssa_args.dest != ins->ssa_args.src[1]) continue;
+ if (v->dest != ins->src[1]) continue;
if (v->type != TAG_ALU_4) break;
v->invert = !v->invert;
@@ -198,7 +196,7 @@ mir_strip_inverted(compiler_context *ctx, unsigned node)
/* Strips and returns the invert off a node */
mir_foreach_instr_global(ctx, ins) {
if (ins->compact_branch) continue;
- if (ins->ssa_args.dest != node) continue;
+ if (ins->dest != node) continue;
bool status = ins->invert;
ins->invert = false;
@@ -219,18 +217,18 @@ midgard_opt_fuse_src_invert(compiler_context *ctx, midgard_block *block)
if (!mir_is_bitwise(ins)) continue;
if (ins->invert) continue;
- if (ins->ssa_args.src[0] & IS_REG) continue;
- if (ins->ssa_args.src[1] & IS_REG) continue;
- if (!mir_single_use(ctx, ins->ssa_args.src[0])) continue;
- if (!ins->ssa_args.inline_constant && !mir_single_use(ctx, ins->ssa_args.src[1])) continue;
+ if (ins->src[0] & IS_REG) continue;
+ if (ins->src[1] & IS_REG) continue;
+ if (!mir_single_use(ctx, ins->src[0])) continue;
+ if (!ins->has_inline_constant && !mir_single_use(ctx, ins->src[1])) continue;
- bool not_a = mir_strip_inverted(ctx, ins->ssa_args.src[0]);
+ bool not_a = mir_strip_inverted(ctx, ins->src[0]);
bool not_b =
- ins->ssa_args.inline_constant ? false :
- mir_strip_inverted(ctx, ins->ssa_args.src[1]);
+ ins->has_inline_constant ? false :
+ mir_strip_inverted(ctx, ins->src[1]);
/* Edge case: if src0 == src1, it'll've been stripped */
- if ((ins->ssa_args.src[0] == ins->ssa_args.src[1]) && !ins->ssa_args.inline_constant)
+ if ((ins->src[0] == ins->src[1]) && !ins->has_inline_constant)
not_b = not_a;
progress |= (not_a || not_b);
@@ -248,16 +246,16 @@ midgard_opt_fuse_src_invert(compiler_context *ctx, midgard_block *block)
if (both) {
ins->alu.op = mir_demorgan_op(ins->alu.op);
- } else if (right || (left && !ins->ssa_args.inline_constant)) {
+ } else if (right || (left && !ins->has_inline_constant)) {
if (left) {
/* Commute */
- unsigned temp = ins->ssa_args.src[0];
- ins->ssa_args.src[0] = ins->ssa_args.src[1];
- ins->ssa_args.src[1] = temp;
+ unsigned temp = ins->src[0];
+ ins->src[0] = ins->src[1];
+ ins->src[1] = temp;
}
ins->alu.op = mir_notright_op(ins->alu.op);
- } else if (left && ins->ssa_args.inline_constant) {
+ } else if (left && ins->has_inline_constant) {
/* Some special transformations:
*
* ~A & c = ~(~(~A) | (~c)) = ~(A | ~c) = inor(A, ~c)
diff --git a/src/panfrost/midgard/midgard_opt_perspective.c b/src/panfrost/midgard/midgard_opt_perspective.c
index 22b7736a379..feec5a5be39 100644
--- a/src/panfrost/midgard/midgard_opt_perspective.c
+++ b/src/panfrost/midgard/midgard_opt_perspective.c
@@ -61,8 +61,8 @@ midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block)
if (src2.swizzle != SWIZZLE_XXXX) continue;
/* Awesome, we're the right form. Now check where src2 is from */
- unsigned frcp = ins->ssa_args.src[1];
- unsigned to = ins->ssa_args.dest;
+ unsigned frcp = ins->src[1];
+ unsigned to = ins->dest;
if (frcp & IS_REG) continue;
if (to & IS_REG) continue;
@@ -72,13 +72,13 @@ midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block)
unsigned frcp_from = 0;
mir_foreach_instr_in_block_safe(block, sub) {
- if (sub->ssa_args.dest != frcp) continue;
+ if (sub->dest != frcp) continue;
midgard_vector_alu_src s =
vector_alu_from_unsigned(sub->alu.src1);
frcp_component = s.swizzle & 3;
- frcp_from = sub->ssa_args.src[0];
+ frcp_from = sub->src[0];
frcp_found =
(sub->type == TAG_ALU_4) &&
@@ -98,7 +98,7 @@ midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block)
if (mir_use_count(ctx, frcp_from) > 2) continue;
mir_foreach_instr_in_block_safe(block, v) {
- if (v->ssa_args.dest != frcp_from) continue;
+ if (v->dest != frcp_from) continue;
if (v->type != TAG_LOAD_STORE_4) break;
if (!OP_IS_LOAD_VARY_F(v->load_store.op)) break;
@@ -114,10 +114,8 @@ midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block)
midgard_instruction accel = {
.type = TAG_LOAD_STORE_4,
.mask = ins->mask,
- .ssa_args = {
- .dest = to,
- .src = { frcp_from, ~0, ~0 },
- },
+ .dest = to,
+ .src = { frcp_from, ~0, ~0 },
.load_store = {
.op = frcp_component == COMPONENT_W ?
midgard_op_ldst_perspective_division_w :
@@ -146,8 +144,8 @@ midgard_opt_varying_projection(compiler_context *ctx, midgard_block *block)
if (ins->type != TAG_LOAD_STORE_4) continue;
if (!OP_IS_PROJECTION(ins->load_store.op)) continue;
- unsigned vary = ins->ssa_args.src[0];
- unsigned to = ins->ssa_args.dest;
+ unsigned vary = ins->src[0];
+ unsigned to = ins->dest;
if (vary & IS_REG) continue;
if (to & IS_REG) continue;
@@ -158,7 +156,7 @@ midgard_opt_varying_projection(compiler_context *ctx, midgard_block *block)
bool rewritten = false;
mir_foreach_instr_in_block_safe(block, v) {
- if (v->ssa_args.dest != vary) continue;
+ if (v->dest != vary) continue;
if (v->type != TAG_LOAD_STORE_4) break;
if (!OP_IS_LOAD_VARY_F(v->load_store.op)) break;
@@ -184,7 +182,7 @@ midgard_opt_varying_projection(compiler_context *ctx, midgard_block *block)
v->load_store.varying_parameters = param;
/* Use the new destination */
- v->ssa_args.dest = to;
+ v->dest = to;
rewritten = true;
break;
diff --git a/src/panfrost/midgard/midgard_print.c b/src/panfrost/midgard/midgard_print.c
index 871d1c5a6de..2f65f2f8007 100644
--- a/src/panfrost/midgard/midgard_print.c
+++ b/src/panfrost/midgard/midgard_print.c
@@ -127,26 +127,24 @@ mir_print_instruction(midgard_instruction *ins)
if (ins->invert)
printf(".not");
- ssa_args *args = &ins->ssa_args;
-
printf(" ");
- mir_print_index(args->dest);
+ mir_print_index(ins->dest);
if (ins->mask != 0xF)
mir_print_mask(ins->mask);
printf(", ");
- mir_print_index(args->src[0]);
+ mir_print_index(ins->src[0]);
printf(", ");
- if (args->inline_constant)
+ if (ins->has_inline_constant)
printf("#%d", ins->inline_constant);
else
- mir_print_index(args->src[1]);
+ mir_print_index(ins->src[1]);
printf(", ");
- mir_print_index(args->src[2]);
+ mir_print_index(ins->src[2]);
if (ins->has_constants) {
uint32_t *uc = ins->constants;
diff --git a/src/panfrost/midgard/midgard_ra.c b/src/panfrost/midgard/midgard_ra.c
index e9097c2c63c..16cb31f0413 100644
--- a/src/panfrost/midgard/midgard_ra.c
+++ b/src/panfrost/midgard/midgard_ra.c
@@ -407,22 +407,22 @@ mir_lower_special_reads(compiler_context *ctx)
mir_foreach_instr_global(ctx, ins) {
switch (ins->type) {
case TAG_ALU_4:
- mark_node_class(aluw, ins->ssa_args.dest);
- mark_node_class(alur, ins->ssa_args.src[0]);
- mark_node_class(alur, ins->ssa_args.src[1]);
+ mark_node_class(aluw, ins->dest);
+ mark_node_class(alur, ins->src[0]);
+ mark_node_class(alur, ins->src[1]);
break;
case TAG_LOAD_STORE_4:
- mark_node_class(ldst, ins->ssa_args.src[0]);
- mark_node_class(ldst, ins->ssa_args.src[1]);
- mark_node_class(ldst, ins->ssa_args.src[2]);
+ mark_node_class(ldst, ins->src[0]);
+ mark_node_class(ldst, ins->src[1]);
+ mark_node_class(ldst, ins->src[2]);
break;
case TAG_TEXTURE_4:
- mark_node_class(texr, ins->ssa_args.src[0]);
- mark_node_class(texr, ins->ssa_args.src[1]);
- mark_node_class(texr, ins->ssa_args.src[2]);
- mark_node_class(texw, ins->ssa_args.dest);
+ mark_node_class(texr, ins->src[0]);
+ mark_node_class(texr, ins->src[1]);
+ mark_node_class(texr, ins->src[2]);
+ mark_node_class(texw, ins->dest);
break;
}
}
@@ -489,7 +489,7 @@ mir_lower_special_reads(compiler_context *ctx)
continue;
if (hazard_write) {
- if (pre_use->ssa_args.dest != i)
+ if (pre_use->dest != i)
continue;
} else {
if (!mir_has_arg(pre_use, i))
@@ -546,10 +546,10 @@ liveness_ins_update(uint8_t *live, midgard_instruction *ins, unsigned max)
{
/* live_in[s] = GEN[s] + (live_out[s] - KILL[s]) */
- liveness_kill(live, ins->ssa_args.dest, max, ins->mask);
+ liveness_kill(live, ins->dest, max, ins->mask);
mir_foreach_src(ins, src) {
- unsigned node = ins->ssa_args.src[src];
+ unsigned node = ins->src[src];
unsigned mask = mir_mask_of_read_components(ins, node);
liveness_gen(live, node, max, mask);
@@ -659,7 +659,7 @@ mir_compute_liveness(
/* Mark all registers live after the instruction as
* interfering with the destination */
- unsigned dest = ins->ssa_args.dest;
+ unsigned dest = ins->dest;
if (dest < ctx->temp_count) {
for (unsigned i = 0; i < ctx->temp_count; ++i)
@@ -712,7 +712,7 @@ allocate_registers(compiler_context *ctx, bool *spilled)
unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count);
mir_foreach_instr_global(ctx, ins) {
- if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
+ if (ins->dest >= SSA_FIXED_MINIMUM) continue;
/* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
int class = util_logbase2(ins->mask);
@@ -720,7 +720,7 @@ allocate_registers(compiler_context *ctx, bool *spilled)
/* Use the largest class if there's ambiguity, this
* handles partial writes */
- int dest = ins->ssa_args.dest;
+ int dest = ins->dest;
found_class[dest] = MAX2(found_class[dest], class);
}
@@ -737,30 +737,30 @@ allocate_registers(compiler_context *ctx, bool *spilled)
if (ins->type == TAG_LOAD_STORE_4) {
bool force_vec4_only = OP_IS_VEC4_ONLY(ins->load_store.op);
- set_class(found_class, ins->ssa_args.src[0], REG_CLASS_LDST);
- set_class(found_class, ins->ssa_args.src[1], REG_CLASS_LDST);
- set_class(found_class, ins->ssa_args.src[2], REG_CLASS_LDST);
+ set_class(found_class, ins->src[0], REG_CLASS_LDST);
+ set_class(found_class, ins->src[1], REG_CLASS_LDST);
+ set_class(found_class, ins->src[2], REG_CLASS_LDST);
if (force_vec4_only) {
- force_vec4(found_class, ins->ssa_args.dest);
- force_vec4(found_class, ins->ssa_args.src[0]);
- force_vec4(found_class, ins->ssa_args.src[1]);
- force_vec4(found_class, ins->ssa_args.src[2]);
+ force_vec4(found_class, ins->dest);
+ force_vec4(found_class, ins->src[0]);
+ force_vec4(found_class, ins->src[1]);
+ force_vec4(found_class, ins->src[2]);
}
} else if (ins->type == TAG_TEXTURE_4) {
- set_class(found_class, ins->ssa_args.dest, REG_CLASS_TEXW);
- set_class(found_class, ins->ssa_args.src[0], REG_CLASS_TEXR);
- set_class(found_class, ins->ssa_args.src[1], REG_CLASS_TEXR);
- set_class(found_class, ins->ssa_args.src[2], REG_CLASS_TEXR);
+ set_class(found_class, ins->dest, REG_CLASS_TEXW);
+ set_class(found_class, ins->src[0], REG_CLASS_TEXR);
+ set_class(found_class, ins->src[1], REG_CLASS_TEXR);
+ set_class(found_class, ins->src[2], REG_CLASS_TEXR);
}
}
/* Check that the semantics of the class are respected */
mir_foreach_instr_global(ctx, ins) {
- assert(check_write_class(found_class, ins->type, ins->ssa_args.dest));
- assert(check_read_class(found_class, ins->type, ins->ssa_args.src[0]));
- assert(check_read_class(found_class, ins->type, ins->ssa_args.src[1]));
- assert(check_read_class(found_class, ins->type, ins->ssa_args.src[2]));
+ assert(check_write_class(found_class, ins->type, ins->dest));
+ assert(check_read_class(found_class, ins->type, ins->src[0]));
+ assert(check_read_class(found_class, ins->type, ins->src[1]));
+ assert(check_read_class(found_class, ins->type, ins->src[2]));
}
for (unsigned i = 0; i < ctx->temp_count; ++i) {
@@ -792,13 +792,11 @@ install_registers_instr(
struct ra_graph *g,
midgard_instruction *ins)
{
- ssa_args args = ins->ssa_args;
-
switch (ins->type) {
case TAG_ALU_4: {
- struct phys_reg src1 = index_to_reg(ctx, g, args.src[0]);
- struct phys_reg src2 = index_to_reg(ctx, g, args.src[1]);
- struct phys_reg dest = index_to_reg(ctx, g, args.dest);
+ struct phys_reg src1 = index_to_reg(ctx, g, ins->src[0]);
+ struct phys_reg src2 = index_to_reg(ctx, g, ins->src[1]);
+ struct phys_reg dest = index_to_reg(ctx, g, ins->dest);
unsigned uncomposed_mask = ins->mask;
ins->mask = compose_writemask(uncomposed_mask, dest);
@@ -814,9 +812,9 @@ install_registers_instr(
ins->registers.src1_reg = src1.reg;
- ins->registers.src2_imm = args.inline_constant;
+ ins->registers.src2_imm = ins->has_inline_constant;
- if (args.inline_constant) {
+ if (ins->has_inline_constant) {
/* Encode inline 16-bit constant. See disassembler for
* where the algorithm is from */
@@ -849,7 +847,7 @@ install_registers_instr(
bool encodes_src = OP_IS_STORE(ins->load_store.op);
if (encodes_src) {
- struct phys_reg src = index_to_reg(ctx, g, args.src[0]);
+ struct phys_reg src = index_to_reg(ctx, g, ins->src[0]);
assert(src.reg == 26 || src.reg == 27);
ins->load_store.reg = src.reg - 26;
@@ -868,7 +866,7 @@ install_registers_instr(
new_swizzle, src.mask,
default_phys_reg(0), src);
} else {
- struct phys_reg src = index_to_reg(ctx, g, args.dest);
+ struct phys_reg src = index_to_reg(ctx, g, ins->dest);
ins->load_store.reg = src.reg;
@@ -883,10 +881,10 @@ install_registers_instr(
/* We also follow up by actual arguments */
int src2 =
- encodes_src ? args.src[1] : args.src[0];
+ encodes_src ? ins->src[1] : ins->src[0];
int src3 =
- encodes_src ? args.src[2] : args.src[1];
+ encodes_src ? ins->src[2] : ins->src[1];
if (src2 >= 0) {
struct phys_reg src = index_to_reg(ctx, g, src2);
@@ -905,9 +903,9 @@ install_registers_instr(
case TAG_TEXTURE_4: {
/* Grab RA results */
- struct phys_reg dest = index_to_reg(ctx, g, args.dest);
- struct phys_reg coord = index_to_reg(ctx, g, args.src[0]);
- struct phys_reg lod = index_to_reg(ctx, g, args.src[1]);
+ struct phys_reg dest = index_to_reg(ctx, g, ins->dest);
+ struct phys_reg coord = index_to_reg(ctx, g, ins->src[0]);
+ struct phys_reg lod = index_to_reg(ctx, g, ins->src[1]);
assert(dest.reg == 28 || dest.reg == 29);
assert(coord.reg == 28 || coord.reg == 29);
@@ -929,7 +927,7 @@ install_registers_instr(
compose_writemask(ins->mask, dest);
/* If there is a register LOD/bias, use it */
- if (args.src[1] != ~0) {
+ if (ins->src[1] != ~0) {
midgard_tex_register_select sel = {
.select = lod.reg,
.full = 1,
diff --git a/src/panfrost/midgard/midgard_ra_pipeline.c b/src/panfrost/midgard/midgard_ra_pipeline.c
index a5c1025ee27..feb457de0f9 100644
--- a/src/panfrost/midgard/midgard_ra_pipeline.c
+++ b/src/panfrost/midgard/midgard_ra_pipeline.c
@@ -46,13 +46,13 @@ mir_pipeline_ins(
unsigned pipeline_count)
{
midgard_instruction *ins = bundle->instructions[i];
- unsigned dest = ins->ssa_args.dest;
+ unsigned dest = ins->dest;
/* We could be pipelining a register, so we need to make sure that all
* of the components read in this bundle are written in this bundle,
* and that no components are written before this bundle */
- unsigned node = ins->ssa_args.dest;
+ unsigned node = ins->dest;
unsigned read_mask = 0;
/* Analyze the bundle for a read mask */
@@ -65,7 +65,7 @@ mir_pipeline_ins(
/* Now analyze for a write mask */
for (unsigned i = 0; i < bundle->instruction_count; ++i) {
midgard_instruction *q = bundle->instructions[i];
- if (q->ssa_args.dest != node) continue;
+ if (q->dest != node) continue;
/* Remove the written mask from the read requirements */
read_mask &= ~q->mask;
@@ -87,7 +87,7 @@ mir_pipeline_ins(
midgard_instruction *end = bundle->instructions[
bundle->instruction_count - 1];
- if (mir_is_live_after(ctx, block, end, ins->ssa_args.dest))
+ if (mir_is_live_after(ctx, block, end, ins->dest))
return false;
/* We're only live in this bundle -- pipeline! */
diff --git a/src/panfrost/midgard/midgard_schedule.c b/src/panfrost/midgard/midgard_schedule.c
index e1877b314ce..f80a0354fb8 100644
--- a/src/panfrost/midgard/midgard_schedule.c
+++ b/src/panfrost/midgard/midgard_schedule.c
@@ -71,12 +71,12 @@ can_run_concurrent_ssa(midgard_instruction *first, midgard_instruction *second)
/* Each instruction reads some registers and writes to a register. See
* where the first writes */
- int source = first->ssa_args.dest;
+ int source = first->dest;
int source_mask = first->mask;
/* As long as the second doesn't read from the first, we're okay */
- for (unsigned i = 0; i < ARRAY_SIZE(second->ssa_args.src); ++i) {
- if (second->ssa_args.src[i] != source)
+ for (unsigned i = 0; i < ARRAY_SIZE(second->src); ++i) {
+ if (second->src[i] != source)
continue;
if (first->type != TAG_ALU_4)
@@ -95,7 +95,7 @@ can_run_concurrent_ssa(midgard_instruction *first, midgard_instruction *second)
/* Otherwise, it's safe in that regard. Another data hazard is both
* writing to the same place, of course */
- if (second->ssa_args.dest == source) {
+ if (second->dest == source) {
/* ...but only if the components overlap */
if (second->mask & source_mask)
@@ -147,7 +147,7 @@ can_writeout_fragment(compiler_context *ctx, midgard_instruction **bundle, unsig
for (unsigned i = 0; i < count; ++i) {
midgard_instruction *ins = bundle[i];
- if (ins->ssa_args.dest != SSA_FIXED_REGISTER(0))
+ if (ins->dest != SSA_FIXED_REGISTER(0))
continue;
/* Record written out mask */
@@ -158,8 +158,8 @@ can_writeout_fragment(compiler_context *ctx, midgard_instruction **bundle, unsig
* we're writeout at the very end of the shader. So check if
* they were written before us. */
- unsigned src0 = ins->ssa_args.src[0];
- unsigned src1 = ins->ssa_args.src[1];
+ unsigned src0 = ins->src[0];
+ unsigned src1 = ins->src[1];
if (!mir_is_written_before(ctx, bundle[0], src0))
src0 = ~0;
@@ -185,7 +185,7 @@ can_writeout_fragment(compiler_context *ctx, midgard_instruction **bundle, unsig
/* Requirement 3 */
for (unsigned i = 0; i < count; ++i) {
- unsigned dest = bundle[i]->ssa_args.dest;
+ unsigned dest = bundle[i]->dest;
if (dest < node_count && BITSET_TEST(dependencies, dest))
return false;
@@ -450,10 +450,10 @@ schedule_bundle(compiler_context *ctx, midgard_block *block, midgard_instruction
unsigned swizzle = SWIZZLE_FROM_ARRAY(indices);
unsigned r_constant = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
- if (ains->ssa_args.src[0] == r_constant)
+ if (ains->src[0] == r_constant)
ains->alu.src1 = vector_alu_apply_swizzle(ains->alu.src1, swizzle);
- if (ains->ssa_args.src[1] == r_constant)
+ if (ains->src[1] == r_constant)
ains->alu.src2 = vector_alu_apply_swizzle(ains->alu.src2, swizzle);
bundle.has_embedded_constants = true;
@@ -632,8 +632,8 @@ midgard_pair_load_store(compiler_context *ctx, midgard_block *block)
bool deps = false;
- for (unsigned s = 0; s < ARRAY_SIZE(ins->ssa_args.src); ++s)
- deps |= (c->ssa_args.src[s] != ~0);
+ for (unsigned s = 0; s < ARRAY_SIZE(ins->src); ++s)
+ deps |= (c->src[s] != ~0);
if (deps)
continue;
@@ -685,10 +685,10 @@ mir_squeeze_index(compiler_context *ctx)
ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
mir_foreach_instr_global(ctx, ins) {
- ins->ssa_args.dest = find_or_allocate_temp(ctx, ins->ssa_args.dest);
+ ins->dest = find_or_allocate_temp(ctx, ins->dest);
- for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i)
- ins->ssa_args.src[i] = find_or_allocate_temp(ctx, ins->ssa_args.src[i]);
+ for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i)
+ ins->src[i] = find_or_allocate_temp(ctx, ins->src[i]);
}
}
@@ -705,10 +705,8 @@ v_load_store_scratch(
midgard_instruction ins = {
.type = TAG_LOAD_STORE_4,
.mask = mask,
- .ssa_args = {
- .dest = ~0,
- .src = { ~0, ~0, ~0 },
- },
+ .dest = ~0,
+ .src = { ~0, ~0, ~0 },
.load_store = {
.op = is_store ? midgard_op_st_int4 : midgard_op_ld_int4,
.swizzle = SWIZZLE_XYZW,
@@ -729,9 +727,9 @@ v_load_store_scratch(
if (is_store) {
/* r0 = r26, r1 = r27 */
assert(srcdest == SSA_FIXED_REGISTER(26) || srcdest == SSA_FIXED_REGISTER(27));
- ins.ssa_args.src[0] = srcdest;
+ ins.src[0] = srcdest;
} else {
- ins.ssa_args.dest = srcdest;
+ ins.dest = srcdest;
}
return ins;
@@ -759,9 +757,9 @@ static void mir_spill_register(
mir_foreach_instr_global(ctx, ins) {
if (ins->no_spill &&
- ins->ssa_args.dest >= 0 &&
- ins->ssa_args.dest < ctx->temp_count)
- ra_set_node_spill_cost(g, ins->ssa_args.dest, -1.0);
+ ins->dest >= 0 &&
+ ins->dest < ctx->temp_count)
+ ra_set_node_spill_cost(g, ins->dest, -1.0);
}
int spill_node = ra_get_best_spill_node(g);
@@ -791,7 +789,7 @@ static void mir_spill_register(
spill_slot = spill_index++;
mir_foreach_instr_global_safe(ctx, ins) {
- if (ins->ssa_args.dest != spill_node) continue;
+ if (ins->dest != spill_node) continue;
midgard_instruction st;
@@ -799,8 +797,8 @@ static void mir_spill_register(
st = v_mov(spill_node, blank_alu_src, spill_slot);
st.no_spill = true;
} else {
- ins->ssa_args.dest = SSA_FIXED_REGISTER(26);
- st = v_load_store_scratch(ins->ssa_args.dest, spill_slot, true, ins->mask);
+ ins->dest = SSA_FIXED_REGISTER(26);
+ st = v_load_store_scratch(ins->dest, spill_slot, true, ins->mask);
}
/* Hint: don't rewrite this node */
diff --git a/src/panfrost/midgard/mir.c b/src/panfrost/midgard/mir.c
index 42b84b0f6a2..0bf4c1d6021 100644
--- a/src/panfrost/midgard/mir.c
+++ b/src/panfrost/midgard/mir.c
@@ -26,16 +26,16 @@
void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new)
{
- for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i) {
- if (ins->ssa_args.src[i] == old)
- ins->ssa_args.src[i] = new;
+ for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
+ if (ins->src[i] == old)
+ ins->src[i] = new;
}
}
void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new)
{
- if (ins->ssa_args.dest == old)
- ins->ssa_args.dest = new;
+ if (ins->dest == old)
+ ins->dest = new;
}
static unsigned
@@ -144,10 +144,10 @@ mir_set_swizzle(midgard_instruction *ins, unsigned idx, unsigned new)
static void
mir_rewrite_index_src_single_swizzle(midgard_instruction *ins, unsigned old, unsigned new, unsigned swizzle)
{
- for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i) {
- if (ins->ssa_args.src[i] != old) continue;
+ for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
+ if (ins->src[i] != old) continue;
- ins->ssa_args.src[i] = new;
+ ins->src[i] = new;
mir_set_swizzle(ins, i,
pan_compose_swizzle(mir_get_swizzle(ins, i), swizzle));
@@ -198,8 +198,8 @@ mir_rewrite_index_dst_tag(compiler_context *ctx, unsigned old, unsigned new, uns
if (ins->type != tag)
continue;
- if (ins->ssa_args.dest == old)
- ins->ssa_args.dest = new;
+ if (ins->dest == old)
+ ins->dest = new;
}
}
@@ -334,7 +334,7 @@ mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned
if (q == ins)
break;
- if (q->ssa_args.dest == node)
+ if (q->dest == node)
return true;
}
@@ -407,7 +407,7 @@ mir_mask_of_read_components(midgard_instruction *ins, unsigned node)
unsigned mask = 0;
for (unsigned i = 0; i < mir_source_count(ins); ++i) {
- if (ins->ssa_args.src[i] != node) continue;
+ if (ins->src[i] != node) continue;
unsigned swizzle = mir_get_swizzle(ins, i);
unsigned m = mir_mask_of_read_components_single(swizzle, ins->mask);
diff --git a/src/panfrost/midgard/mir_promote_uniforms.c b/src/panfrost/midgard/mir_promote_uniforms.c
index 27e25cad8bf..500230f7820 100644
--- a/src/panfrost/midgard/mir_promote_uniforms.c
+++ b/src/panfrost/midgard/mir_promote_uniforms.c
@@ -84,8 +84,8 @@ midgard_promote_uniforms(compiler_context *ctx, unsigned promoted_count)
/* We do need the move for safety for a non-SSA dest, or if
* we're being fed into a special class */
- bool needs_move = ins->ssa_args.dest & IS_REG;
- needs_move |= mir_special_index(ctx, ins->ssa_args.dest);
+ bool needs_move = ins->dest & IS_REG;
+ needs_move |= mir_special_index(ctx, ins->dest);
/* Ensure this is a contiguous X-bound mask. It should be since
* we haven't done RA and per-component masked UBO reads don't
@@ -101,11 +101,11 @@ midgard_promote_uniforms(compiler_context *ctx, unsigned promoted_count)
unsigned nr_components = util_bitcount(ins->mask);
if (needs_move) {
- midgard_instruction mov = v_mov(promoted, blank_alu_src, ins->ssa_args.dest);
+ midgard_instruction mov = v_mov(promoted, blank_alu_src, ins->dest);
mov.mask = ins->mask;
mir_insert_instruction_before(ins, mov);
} else {
- mir_rewrite_index_src_swizzle(ctx, ins->ssa_args.dest,
+ mir_rewrite_index_src_swizzle(ctx, ins->dest,
promoted, swizzle_of(nr_components));
}