From 3471ce99850cd2ebfe04a10d01f5fe69a349594f Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 14 Dec 2017 09:28:42 -0800 Subject: v3d: Add support for the TMUWT instruction. This instruction is used to ensure that TMU stores have been processed before moving on. In particular, you need any TMU ops to be done by the time the shader ends. --- src/broadcom/compiler/qpu_schedule.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'src/broadcom/compiler/qpu_schedule.c') diff --git a/src/broadcom/compiler/qpu_schedule.c b/src/broadcom/compiler/qpu_schedule.c index 441b6327825..fb5ecd6410c 100644 --- a/src/broadcom/compiler/qpu_schedule.c +++ b/src/broadcom/compiler/qpu_schedule.c @@ -402,7 +402,7 @@ calculate_deps(struct schedule_state *state, struct schedule_node *n) add_write_dep(state, &state->last_tmu_config, n); } - if (inst->sig.ldtmu) { + if (v3d_qpu_waits_on_tmu(inst)) { /* TMU loads are coming from a FIFO, so ordering is important. */ add_write_dep(state, &state->last_tmu_write, n); @@ -564,7 +564,7 @@ get_instruction_priority(const struct v3d_qpu_instr *inst) next_score++; /* Schedule texture read results collection late to hide latency. */ - if (inst->sig.ldtmu) + if (v3d_qpu_waits_on_tmu(inst)) return next_score; next_score++; @@ -605,6 +605,9 @@ qpu_accesses_peripheral(const struct v3d_qpu_instr *inst) return true; } + if (inst->alu.add.op == V3D_QPU_A_TMUWT) + return true; + if (inst->alu.mul.op != V3D_QPU_M_NOP && inst->alu.mul.magic_write && qpu_magic_waddr_is_periph(inst->alu.mul.waddr)) { @@ -910,7 +913,7 @@ static uint32_t magic_waddr_latency(enum v3d_qpu_waddr waddr, * * because we associate the first load_tmu0 with the *second* tmu0_s. */ - if (v3d_qpu_magic_waddr_is_tmu(waddr) && after->sig.ldtmu) + if (v3d_qpu_magic_waddr_is_tmu(waddr) && v3d_qpu_waits_on_tmu(after)) return 100; /* Assume that anything depending on us is consuming the SFU result. */ -- cgit v1.2.3