summaryrefslogtreecommitdiffstats
path: root/src/broadcom/compiler/vir_register_allocate.c
diff options
context:
space:
mode:
authorEric Anholt <[email protected]>2019-02-26 10:49:25 -0800
committerEric Anholt <[email protected]>2019-03-05 12:57:39 -0800
commit1e98f02d887dada530595bc0c74292d4678c5e1a (patch)
tree3bc96cc728f6f95f65edb98cc44c08f6e94d7561 /src/broadcom/compiler/vir_register_allocate.c
parent060979a380be0a6149e3e875ee24fdb1e7872821 (diff)
v3d: Do uniform rematerialization spilling before dropping threadcount
This feels like the right tradeoff for threads vs uniforms, particularly given that we often have very short thread segments right now: total instructions in shared programs: 6411504 -> 6413571 (0.03%) total threads in shared programs: 153946 -> 154214 (0.17%) total uniforms in shared programs: 2387665 -> 2393604 (0.25%)
Diffstat (limited to 'src/broadcom/compiler/vir_register_allocate.c')
-rw-r--r--src/broadcom/compiler/vir_register_allocate.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/src/broadcom/compiler/vir_register_allocate.c b/src/broadcom/compiler/vir_register_allocate.c
index ae71e502494..78f59f84744 100644
--- a/src/broadcom/compiler/vir_register_allocate.c
+++ b/src/broadcom/compiler/vir_register_allocate.c
@@ -586,16 +586,18 @@ v3d_register_allocate(struct v3d_compile *c, bool *spilled)
bool ok = ra_allocate(g);
if (!ok) {
- /* Try to spill, if we can't reduce threading first. */
- if (thread_index == 0) {
- int node = v3d_choose_spill_node(c, g, temp_to_node);
+ int node = v3d_choose_spill_node(c, g, temp_to_node);
- if (node != -1) {
- v3d_spill_reg(c, map[node].temp);
+ /* Don't emit spills using the TMU until we've dropped thread
+ * conut first.
+ */
+ if (node != -1 &&
+ (vir_is_mov_uniform(c, map[node].temp) ||
+ thread_index == 0)) {
+ v3d_spill_reg(c, map[node].temp);
- /* Ask the outer loop to call back in. */
- *spilled = true;
- }
+ /* Ask the outer loop to call back in. */
+ *spilled = true;
}
ralloc_free(g);