summaryrefslogtreecommitdiffstats
path: root/src/broadcom/compiler/v3d40_tex.c
diff options
context:
space:
mode:
authorAlejandro Piñeiro <[email protected]>2019-06-25 15:02:56 +0200
committerAlejandro Piñeiro <[email protected]>2019-07-12 11:51:22 +0200
commit85b78f96a663d11f2ec10ba6d34b4e5971f38077 (patch)
tree5be4c834ae87ee41f77874219ef9d61532fccb72 /src/broadcom/compiler/v3d40_tex.c
parent2e22879115b9b24d401006ab35674fc8537f155f (diff)
v3d: use inc/dec tmu operation with image atomic sub/add of 1
This allows to remove a mov of 1/-1, as it is implicit with the operation. As with atomic inc/dec/add, usual shader-db set doesn't include any GLES shader using it. So using as workaround vk-gl-cts shaders, we get this: total instructions in shared programs: 1217013 -> 1217006 (<.01%) instructions in affected programs: 53 -> 46 (-13.21%) helped: 2 HURT: 0 One of the helped shader went from 40 to 34 instructions. Reviewed-by: Eric Anholt <[email protected]>
Diffstat (limited to 'src/broadcom/compiler/v3d40_tex.c')
-rw-r--r--src/broadcom/compiler/v3d40_tex.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/src/broadcom/compiler/v3d40_tex.c b/src/broadcom/compiler/v3d40_tex.c
index e9f106e00db..30f1293c85c 100644
--- a/src/broadcom/compiler/v3d40_tex.c
+++ b/src/broadcom/compiler/v3d40_tex.c
@@ -251,7 +251,7 @@ v3d40_image_load_store_tmu_op(nir_intrinsic_instr *instr)
case nir_intrinsic_image_deref_store:
return V3D_TMU_OP_REGULAR;
case nir_intrinsic_image_deref_atomic_add:
- return V3D_TMU_OP_WRITE_ADD_READ_PREFETCH;
+ return v3d_get_op_for_atomic_add(instr, 3);
case nir_intrinsic_image_deref_atomic_min:
return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
case nir_intrinsic_image_deref_atomic_max:
@@ -292,11 +292,16 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c,
struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 };
- /* XXX perf: We should turn add/sub of 1 to inc/dec. Perhaps NIR
- * wants to have support for inc/dec?
- */
p2_unpacked.op = v3d40_image_load_store_tmu_op(instr);
+ /* If we were able to replace atomic_add for an inc/dec, then we
+ * need/can to do things slightly different, like not loading the
+ * amount to add/sub, as that is implicit.
+ */
+ bool atomic_add_replaced = (instr->intrinsic == nir_intrinsic_image_deref_atomic_add &&
+ (p2_unpacked.op == V3D_TMU_OP_WRITE_AND_READ_INC ||
+ p2_unpacked.op == V3D_TMU_OP_WRITE_OR_READ_DEC));
+
bool is_1d = false;
switch (glsl_get_sampler_dim(sampler_type)) {
case GLSL_SAMPLER_DIM_1D:
@@ -364,7 +369,8 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c,
vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
/* Emit the data writes for atomics or image store. */
- if (instr->intrinsic != nir_intrinsic_image_deref_load) {
+ if (instr->intrinsic != nir_intrinsic_image_deref_load &&
+ !atomic_add_replaced) {
/* Vector for stores, or first atomic argument */
struct qreg src[4];
for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {