aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorRob Clark <[email protected]>2020-03-24 09:10:06 -0700
committerMarge Bot <[email protected]>2020-04-13 20:47:28 +0000
commit9701008d64171b1f16be9a8a69555df2b651c37b (patch)
tree8d3d9ea6bff649e19dad0eadcab050a89e7ef08e /src
parentd2f4d332dbb552af62fe5caabe67664d98f32229 (diff)
freedreno/ir3/sched: awareness of partial liveness
Realize that certain instructions make a vecN live, and account for this, in hopes of scheduling the remaining components of the vecN sooner. Signed-off-by: Rob Clark <[email protected]> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4440>
Diffstat (limited to 'src')
-rw-r--r--src/freedreno/ir3/ir3_sched.c45
1 files changed, 44 insertions, 1 deletions
diff --git a/src/freedreno/ir3/ir3_sched.c b/src/freedreno/ir3/ir3_sched.c
index 95c89ae8461..46448c10b4b 100644
--- a/src/freedreno/ir3/ir3_sched.c
+++ b/src/freedreno/ir3/ir3_sched.c
@@ -104,6 +104,22 @@ struct ir3_sched_node {
unsigned delay;
unsigned max_delay;
+ /* For instructions that are a meta:collect src, once we schedule
+ * the first src of the collect, the entire vecN is live (at least
+ * from the PoV of the first RA pass.. the 2nd scalar pass can fill
+ * in some of the gaps, but often not all). So we want to help out
+ * RA, and realize that as soon as we schedule the first collect
+ * src, there is no penalty to schedule the remainder (ie. they
+ * don't make additional values live). In fact we'd prefer to
+ * schedule the rest ASAP to minimize the live range of the vecN.
+ *
+ * For instructions that are the src of a collect, we track the
+ * corresponding collect, and mark them as partially live as soon
+ * as any one of the src's is scheduled.
+ */
+ struct ir3_instruction *collect;
+ bool partially_live;
+
/* Is this instruction a direct or indirect dependency for a kill?
* If so, we should prioritize it when possible
*/
@@ -158,6 +174,19 @@ schedule(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
struct ir3_sched_node *n = instr->data;
+ /* If this instruction is a meta:collect src, mark the remaining
+ * collect srcs as partially live.
+ */
+ if (n->collect) {
+ struct ir3_instruction *src;
+ foreach_ssa_src (src, n->collect) {
+ if (src->block != instr->block)
+ continue;
+ struct ir3_sched_node *sn = src->data;
+ sn->partially_live = true;
+ }
+ }
+
dag_prune_head(ctx->dag, &n->dag);
}
@@ -340,10 +369,17 @@ use_count(struct ir3_instruction *instr)
static int
live_effect(struct ir3_instruction *instr)
{
+ struct ir3_sched_node *n = instr->data;
struct ir3_instruction *src;
- int new_live = dest_regs(instr);
+ int new_live = n->partially_live ? 0 : dest_regs(instr);
int freed_live = 0;
+ /* if we schedule something that causes a vecN to be live,
+ * then count all it's other components too:
+ */
+ if (n->collect)
+ new_live *= n->collect->regs_count - 1;
+
foreach_ssa_src_n (src, n, instr) {
if (__is_false_dep(instr, n))
continue;
@@ -697,6 +733,13 @@ sched_node_add_dep(struct ir3_instruction *instr, struct ir3_instruction *src, i
struct ir3_sched_node *n = instr->data;
struct ir3_sched_node *sn = src->data;
+ /* If src is consumed by a collect, track that to realize that once
+ * any of the collect srcs are live, we should hurry up and schedule
+ * the rest.
+ */
+ if (instr->opc == OPC_META_COLLECT)
+ sn->collect = instr;
+
dag_add_edge(&sn->dag, &n->dag, NULL);
unsigned d = ir3_delayslots(src, instr, i, true);