aboutsummaryrefslogtreecommitdiffstats
path: root/src/panfrost/midgard/midgard_schedule.c
diff options
context:
space:
mode:
authorAlyssa Rosenzweig <[email protected]>2019-08-05 09:19:39 -0700
committerAlyssa Rosenzweig <[email protected]>2019-08-12 12:43:00 -0700
commita8639b91b5e90dc8cf40a683a16cd8c4cb51193c (patch)
tree2f2bab12106d7413e8fa6371a53c852fc72d5820 /src/panfrost/midgard/midgard_schedule.c
parent63e240dd0573968a3602424da3b963ba82d5cf6b (diff)
pan/midgard: Pipe uniform mask through when spilling
This is a corner case that happens a lot with SSBOs. Basically, if we only read a few components of a uniform, we need to only spill a few components or otherwise we try to spill what we spilled and RA hangs. Signed-off-by: Alyssa Rosenzweig <[email protected]>
Diffstat (limited to 'src/panfrost/midgard/midgard_schedule.c')
-rw-r--r--src/panfrost/midgard/midgard_schedule.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/src/panfrost/midgard/midgard_schedule.c b/src/panfrost/midgard/midgard_schedule.c
index f69e86e2f46..d7d8254bd6b 100644
--- a/src/panfrost/midgard/midgard_schedule.c
+++ b/src/panfrost/midgard/midgard_schedule.c
@@ -723,7 +723,7 @@ v_load_store_scratch(
if (is_store) {
/* r0 = r26, r1 = r27 */
assert(srcdest == SSA_FIXED_REGISTER(26) || srcdest == SSA_FIXED_REGISTER(27));
- ins.ssa_args.src[0] = (srcdest == SSA_FIXED_REGISTER(27)) ? SSA_FIXED_REGISTER(1) : SSA_FIXED_REGISTER(0);
+ ins.ssa_args.src[0] = srcdest;
} else {
ins.ssa_args.dest = srcdest;
}
@@ -803,6 +803,13 @@ static void mir_spill_register(
}
}
+ /* For special reads, figure out how many components we need */
+ unsigned read_mask = 0;
+
+ mir_foreach_instr_global_safe(ctx, ins) {
+ read_mask |= mir_mask_of_read_components(ins, spill_node);
+ }
+
/* Insert a load from TLS before the first consecutive
* use of the node, rewriting to use spilled indices to
* break up the live range. Or, for special, insert a
@@ -850,6 +857,11 @@ static void mir_spill_register(
st = v_load_store_scratch(consecutive_index, spill_slot, false, 0xF);
}
+ /* Mask the load based on the component count
+ * actually needed to prvent RA loops */
+
+ st.mask = read_mask;
+
mir_insert_instruction_before(before, st);
// consecutive_skip = true;
} else {