aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorAlyssa Rosenzweig <[email protected]>2019-11-15 14:19:34 -0500
committerAlyssa Rosenzweig <[email protected]>2019-11-17 22:19:31 -0500
commitbc9a7d0699a21f3fa94f910ed7cd16e047c63edf (patch)
tree6fd26b6f4a6147491f0af7d44c264d0d4af91421 /src
parent1798f6bfc33d1d77dae1112591dce18bf7fdbc4a (diff)
pan/midgard: Represent ld/st offset unpacked
This simplifies manipulation of the offsets dramatically, fixing some UBO access related bugs. Signed-off-by: Alyssa Rosenzweig <[email protected]>
Diffstat (limited to 'src')
-rw-r--r--src/panfrost/midgard/compiler.h1
-rw-r--r--src/panfrost/midgard/midgard_compile.c7
-rw-r--r--src/panfrost/midgard/midgard_emit.c8
-rw-r--r--src/panfrost/midgard/midgard_schedule.c6
-rw-r--r--src/panfrost/midgard/mir.c19
-rw-r--r--src/panfrost/midgard/mir_promote_uniforms.c20
6 files changed, 14 insertions, 47 deletions
diff --git a/src/panfrost/midgard/compiler.h b/src/panfrost/midgard/compiler.h
index 3c1730143e2..7e3f998ab4f 100644
--- a/src/panfrost/midgard/compiler.h
+++ b/src/panfrost/midgard/compiler.h
@@ -517,7 +517,6 @@ bool mir_special_index(compiler_context *ctx, unsigned idx);
unsigned mir_use_count(compiler_context *ctx, unsigned value);
bool mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned node);
uint16_t mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node);
-unsigned mir_ubo_shift(midgard_load_store_op op);
midgard_reg_mode mir_typesize(midgard_instruction *ins);
midgard_reg_mode mir_srcsize(midgard_instruction *ins, unsigned i);
unsigned mir_bytes_for_mode(midgard_reg_mode mode);
diff --git a/src/panfrost/midgard/midgard_compile.c b/src/panfrost/midgard/midgard_compile.c
index a187beaab7c..da7d995acd0 100644
--- a/src/panfrost/midgard/midgard_compile.c
+++ b/src/panfrost/midgard/midgard_compile.c
@@ -1141,11 +1141,8 @@ emit_ubo_read(
{
/* TODO: half-floats */
- midgard_instruction ins = m_ld_ubo_int4(dest, offset);
-
- /* TODO: Don't split */
- ins.load_store.varying_parameters = (offset & 0x7F) << 3;
- ins.load_store.address = offset >> 7;
+ midgard_instruction ins = m_ld_ubo_int4(dest, 0);
+ ins.constants[0] = offset;
mir_set_intr_mask(instr, &ins, true);
if (indirect_offset) {
diff --git a/src/panfrost/midgard/midgard_emit.c b/src/panfrost/midgard/midgard_emit.c
index 9d03bbc1a09..7559a34dcfb 100644
--- a/src/panfrost/midgard/midgard_emit.c
+++ b/src/panfrost/midgard/midgard_emit.c
@@ -388,6 +388,14 @@ emit_binary_bundle(compiler_context *ctx,
mir_pack_ldst_mask(bundle->instructions[i]);
mir_pack_swizzle_ldst(bundle->instructions[i]);
+
+ /* Apply a constant offset */
+ unsigned offset = bundle->instructions[i]->constants[0];
+
+ if (offset) {
+ bundle->instructions[i]->load_store.varying_parameters |= (offset & 0x7F) << 3;
+ bundle->instructions[i]->load_store.address |= (offset >> 7);
+ }
}
memcpy(&current64, &bundle->instructions[0]->load_store, sizeof(current64));
diff --git a/src/panfrost/midgard/midgard_schedule.c b/src/panfrost/midgard/midgard_schedule.c
index 418589a6192..addd65306d6 100644
--- a/src/panfrost/midgard/midgard_schedule.c
+++ b/src/panfrost/midgard/midgard_schedule.c
@@ -1169,16 +1169,14 @@ v_load_store_scratch(
/* For register spilling - to thread local storage */
.arg_1 = 0xEA,
.arg_2 = 0x1E,
-
- /* Splattered across, TODO combine logically */
- .varying_parameters = (byte & 0x1FF) << 1,
- .address = (byte >> 9)
},
/* If we spill an unspill, RA goes into an infinite loop */
.no_spill = true
};
+ ins.constants[0] = byte;
+
if (is_store) {
/* r0 = r26, r1 = r27 */
assert(srcdest == SSA_FIXED_REGISTER(26) || srcdest == SSA_FIXED_REGISTER(27));
diff --git a/src/panfrost/midgard/mir.c b/src/panfrost/midgard/mir.c
index 8d928458a54..3f4d53c5781 100644
--- a/src/panfrost/midgard/mir.c
+++ b/src/panfrost/midgard/mir.c
@@ -478,25 +478,6 @@ mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node)
return mask;
}
-unsigned
-mir_ubo_shift(midgard_load_store_op op)
-{
- switch (op) {
- case midgard_op_ld_ubo_char:
- return 0;
- case midgard_op_ld_ubo_char2:
- return 1;
- case midgard_op_ld_ubo_char4:
- return 2;
- case midgard_op_ld_ubo_short4:
- return 3;
- case midgard_op_ld_ubo_int4:
- return 4;
- default:
- unreachable("Invalid op");
- }
-}
-
/* Register allocation occurs after instruction scheduling, which is fine until
* we start needing to spill registers and therefore insert instructions into
* an already-scheduled program. We don't have to be terribly efficient about
diff --git a/src/panfrost/midgard/mir_promote_uniforms.c b/src/panfrost/midgard/mir_promote_uniforms.c
index 8d887a615fb..d7b3cce36d2 100644
--- a/src/panfrost/midgard/mir_promote_uniforms.c
+++ b/src/panfrost/midgard/mir_promote_uniforms.c
@@ -36,22 +36,6 @@
* program so we allow that many registers through at minimum, to prevent
* spilling. If we spill anyway, I mean, it's a lose-lose at that point. */
-static unsigned
-mir_ubo_offset(midgard_instruction *ins)
-{
- assert(ins->type == TAG_LOAD_STORE_4);
- assert(OP_IS_UBO_READ(ins->load_store.op));
-
- /* Grab the offset as the hw understands it */
- unsigned lo = ins->load_store.varying_parameters >> 7;
- unsigned hi = ins->load_store.address;
- unsigned raw = ((hi << 3) | lo);
-
- /* Account for the op's shift */
- unsigned shift = mir_ubo_shift(ins->load_store.op);
- return (raw << shift);
-}
-
void
midgard_promote_uniforms(compiler_context *ctx, unsigned promoted_count)
{
@@ -59,8 +43,8 @@ midgard_promote_uniforms(compiler_context *ctx, unsigned promoted_count)
if (ins->type != TAG_LOAD_STORE_4) continue;
if (!OP_IS_UBO_READ(ins->load_store.op)) continue;
- /* Get the offset. TODO: can we promote unaligned access? */
- unsigned off = mir_ubo_offset(ins);
+ /* TODO: promote unaligned access via swizzle? */
+ unsigned off = ins->constants[0];
if (off & 0xF) continue;
unsigned address = off / 16;