summaryrefslogtreecommitdiffstats
path: root/src/broadcom/compiler/vir_to_qpu.c
diff options
context:
space:
mode:
authorEric Anholt <[email protected]>2018-01-09 09:40:57 -0800
committerEric Anholt <[email protected]>2018-01-12 21:55:23 -0800
commit86a12b4d5a49c68f4613513d2846c5eb8e56a677 (patch)
tree3ca97663bde40b843c1b7e054525eab1aa3f4f0a /src/broadcom/compiler/vir_to_qpu.c
parenta075bb67262bd48c882f0c8fcc18e0e642c76b86 (diff)
broadcom/vc5: Properly schedule the thread-end THRSW.
This fills in the delay slots of thread end as much as we can (other than being cautious about potential TLBZ writes). In the process, I moved the thread end THRSW instruction creation to the scheduler. Once we start emitting THRSWs in the shader, we need to schedule the thread-end one differently from other THRSWs, so having it in there makes that easy.
Diffstat (limited to 'src/broadcom/compiler/vir_to_qpu.c')
-rw-r--r--src/broadcom/compiler/vir_to_qpu.c6
1 files changed, 0 insertions, 6 deletions
diff --git a/src/broadcom/compiler/vir_to_qpu.c b/src/broadcom/compiler/vir_to_qpu.c
index 525638df691..955eb96a87e 100644
--- a/src/broadcom/compiler/vir_to_qpu.c
+++ b/src/broadcom/compiler/vir_to_qpu.c
@@ -322,8 +322,6 @@ void
v3d_vir_to_qpu(struct v3d_compile *c)
{
struct qpu_reg *temp_registers = v3d_register_allocate(c);
- struct qblock *end_block = list_last_entry(&c->blocks,
- struct qblock, link);
/* Reset the uniform count to how many will be actually loaded by the
* generated QPU code.
@@ -333,10 +331,6 @@ v3d_vir_to_qpu(struct v3d_compile *c)
vir_for_each_block(block, c)
v3d_generate_code_block(c, block, temp_registers);
- struct qinst *thrsw = vir_nop();
- list_addtail(&thrsw->link, &end_block->instructions);
- thrsw->qpu.sig.thrsw = true;
-
uint32_t cycles = v3d_qpu_schedule_instructions(c);
c->qpu_insts = rzalloc_array(c, uint64_t, c->qpu_inst_count);