summaryrefslogtreecommitdiffstats
path: root/src/panfrost
diff options
context:
space:
mode:
authorAlyssa Rosenzweig <[email protected]>2019-07-26 13:08:54 -0700
committerAlyssa Rosenzweig <[email protected]>2019-08-02 09:57:15 -0700
commitb821e1b85e9a2325e3ee3048ca25476ac3b32ff6 (patch)
tree0f3c8173a828bcd05f3b4ab47c466d6a5ff28601 /src/panfrost
parentd8584c5cf2ec0806ccd451362d170dbdf73954fa (diff)
pan/midgard: Fuse invert into bitwise ops
We use the new invert flag to produce ops like inand. Signed-off-by: Alyssa Rosenzweig <[email protected]>
Diffstat (limited to 'src/panfrost')
-rw-r--r--src/panfrost/midgard/compiler.h1
-rw-r--r--src/panfrost/midgard/midgard_compile.c1
-rw-r--r--src/panfrost/midgard/midgard_opt_invert.c55
3 files changed, 57 insertions, 0 deletions
diff --git a/src/panfrost/midgard/compiler.h b/src/panfrost/midgard/compiler.h
index f428db3123d..b5231f3075b 100644
--- a/src/panfrost/midgard/compiler.h
+++ b/src/panfrost/midgard/compiler.h
@@ -576,5 +576,6 @@ bool midgard_opt_dead_move_eliminate(compiler_context *ctx, midgard_block *block
void midgard_opt_post_move_eliminate(compiler_context *ctx, midgard_block *block, struct ra_graph *g);
void midgard_lower_invert(compiler_context *ctx, midgard_block *block);
+bool midgard_opt_fuse_dest_invert(compiler_context *ctx, midgard_block *block);
#endif
diff --git a/src/panfrost/midgard/midgard_compile.c b/src/panfrost/midgard/midgard_compile.c
index a35b43faee7..f0b3dde2754 100644
--- a/src/panfrost/midgard/midgard_compile.c
+++ b/src/panfrost/midgard/midgard_compile.c
@@ -2357,6 +2357,7 @@ midgard_compile_shader_nir(struct midgard_screen *screen, nir_shader *nir, midga
progress |= midgard_opt_dead_code_eliminate(ctx, block);
progress |= midgard_opt_combine_projection(ctx, block);
progress |= midgard_opt_varying_projection(ctx, block);
+ progress |= midgard_opt_fuse_dest_invert(ctx, block);
}
} while (progress);
diff --git a/src/panfrost/midgard/midgard_opt_invert.c b/src/panfrost/midgard/midgard_opt_invert.c
index 1e6c5b383ea..aab64a3c3b5 100644
--- a/src/panfrost/midgard/midgard_opt_invert.c
+++ b/src/panfrost/midgard/midgard_opt_invert.c
@@ -62,3 +62,58 @@ midgard_lower_invert(compiler_context *ctx, midgard_block *block)
mir_insert_instruction_before(mir_next_op(ins), not);
}
}
+
+/* With that lowering out of the way, we can focus on more interesting
+ * optimizations. One easy one is fusing inverts into bitwise operations:
+ *
+ * ~iand = inand
+ * ~ior = inor
+ * ~ixor = inxor
+ */
+
+static bool
+mir_is_bitwise(midgard_instruction *ins)
+{
+ switch (ins->alu.op) {
+ case midgard_alu_op_iand:
+ case midgard_alu_op_ior:
+ case midgard_alu_op_ixor:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static midgard_alu_op
+mir_invert_op(midgard_alu_op op)
+{
+ switch (op) {
+ case midgard_alu_op_iand:
+ return midgard_alu_op_inand;
+ case midgard_alu_op_ior:
+ return midgard_alu_op_inor;
+ case midgard_alu_op_ixor:
+ return midgard_alu_op_inxor;
+ default:
+ unreachable("Op not invertible");
+ }
+}
+
+bool
+midgard_opt_fuse_dest_invert(compiler_context *ctx, midgard_block *block)
+{
+ bool progress = false;
+
+ mir_foreach_instr_in_block_safe(block, ins) {
+ /* Search for inverted bitwise */
+ if (ins->type != TAG_ALU_4) continue;
+ if (!mir_is_bitwise(ins)) continue;
+ if (!ins->invert) continue;
+
+ ins->alu.op = mir_invert_op(ins->alu.op);
+ ins->invert = false;
+ progress |= true;
+ }
+
+ return progress;
+}