aboutsummaryrefslogtreecommitdiffstats
path: root/src/panfrost/midgard
diff options
context:
space:
mode:
authorAlyssa Rosenzweig <[email protected]>2019-07-24 11:16:48 -0700
committerAlyssa Rosenzweig <[email protected]>2019-07-25 06:37:22 -0700
commit91195bdff176d139867fd164221ddc05adfad17e (patch)
treecc0675eb19152ee4a9dc2ee4b1ecf10b35789258 /src/panfrost/midgard
parent0f38f6466e8f1db31aec0bc60e12246a67f91d4d (diff)
pan/midgard: Implement class spilling
We reuse the same register spilling mechanism as for work->memory to spill special->work registers, e.g. to allow writing out more than 2 vec4 varyings (without better scheduling anyway). Signed-off-by: Alyssa Rosenzweig <[email protected]>
Diffstat (limited to 'src/panfrost/midgard')
-rw-r--r--src/panfrost/midgard/midgard_schedule.c53
1 files changed, 39 insertions, 14 deletions
diff --git a/src/panfrost/midgard/midgard_schedule.c b/src/panfrost/midgard/midgard_schedule.c
index a53ba7ee208..a83b6f5fcaa 100644
--- a/src/panfrost/midgard/midgard_schedule.c
+++ b/src/panfrost/midgard/midgard_schedule.c
@@ -707,26 +707,40 @@ schedule_program(compiler_context *ctx)
assert(0);
}
- /* Allocate TLS slot */
- unsigned spill_slot = spill_count++;
+ /* Check the class. Work registers legitimately spill
+ * to TLS, but special registers just spill to work
+ * registers */
+ unsigned class = ra_get_node_class(g, spill_node);
+ bool is_special = (class >> 2) != REG_CLASS_WORK;
- /* Replace all stores to the spilled node with stores
- * to TLS */
+ /* Allocate TLS slot (maybe) */
+ unsigned spill_slot = !is_special ? spill_count++ : 0;
- mir_foreach_instr_global_safe(ctx, ins) {
- if (ins->compact_branch) continue;
- if (ins->ssa_args.dest != spill_node) continue;
- ins->ssa_args.dest = SSA_FIXED_REGISTER(26);
+ /* For TLS, replace all stores to the spilled node. For
+ * special, just keep as-is; the class will be demoted
+ * implicitly */
+
+ if (!is_special) {
+ mir_foreach_instr_global_safe(ctx, ins) {
+ if (ins->compact_branch) continue;
+ if (ins->ssa_args.dest != spill_node) continue;
+ ins->ssa_args.dest = SSA_FIXED_REGISTER(26);
- midgard_instruction st = v_load_store_scratch(ins->ssa_args.dest, spill_slot, true, ins->mask);
- mir_insert_instruction_before(mir_next_op(ins), st);
+ midgard_instruction st = v_load_store_scratch(ins->ssa_args.dest, spill_slot, true, ins->mask);
+ mir_insert_instruction_before(mir_next_op(ins), st);
- ctx->spills++;
+ ctx->spills++;
+ }
}
/* Insert a load from TLS before the first consecutive
* use of the node, rewriting to use spilled indices to
- * break up the live range */
+ * break up the live range. Or, for special, insert a
+ * move. Ironically the latter *increases* register
+ * pressure, but the two uses of the spilling mechanism
+ * are somewhat orthogonal. (special spilling is to use
+ * work registers to back special registers; TLS
+ * spilling is to use memory to back work registers) */
mir_foreach_block(ctx, block) {
@@ -748,13 +762,23 @@ schedule_program(compiler_context *ctx)
}
consecutive_index = ++spill_index;
- midgard_instruction st = v_load_store_scratch(consecutive_index, spill_slot, false, 0xF);
+
midgard_instruction *before = ins;
/* For a csel, go back one more not to break up the bundle */
if (ins->type == TAG_ALU_4 && OP_IS_CSEL(ins->alu.op))
before = mir_prev_op(before);
+ midgard_instruction st;
+
+ if (is_special) {
+ /* Move */
+ st = v_mov(spill_node, blank_alu_src, consecutive_index);
+ } else {
+ /* TLS load */
+ st = v_load_store_scratch(consecutive_index, spill_slot, false, 0xF);
+ }
+
mir_insert_instruction_before(before, st);
// consecutive_skip = true;
@@ -762,7 +786,8 @@ schedule_program(compiler_context *ctx)
/* Rewrite to use */
mir_rewrite_index_src_single(ins, spill_node, consecutive_index);
- ctx->fills++;
+ if (!is_special)
+ ctx->fills++;
}
}
}