summaryrefslogtreecommitdiffstats
path: root/src/compiler/nir
diff options
context:
space:
mode:
authorIan Romanick <[email protected]>2018-08-22 21:21:04 -0700
committerIan Romanick <[email protected]>2019-05-06 22:52:29 -0700
commitae02622d8fd34d62aeeb0a1b6c8c71bb16a5f32d (patch)
treee679ce0519b94c099c63787ffafe648856fab710 /src/compiler/nir
parent6698d861a5719cb58019b1a2e073d331a8844002 (diff)
nir/flrp: Lower flrp(a, b, c) differently if another flrp(_, b, c) exists
There is little effect on Intel GPUs now because we almost always take the "always_precise" path first. It may help on other GPUs, and it does prevent a bunch of regressions in "intel/compiler: Don't always require precise lowering of flrp". No changes on any other Intel platforms. GM45 and Iron Lake had similar results. (Iron Lake shown) total cycles in shared programs: 188852500 -> 188852484 (<.01%) cycles in affected programs: 14612 -> 14596 (-0.11%) helped: 4 HURT: 0 helped stats (abs) min: 4 max: 4 x̄: 4.00 x̃: 4 helped stats (rel) min: 0.09% max: 0.13% x̄: 0.11% x̃: 0.11% 95% mean confidence interval for cycles value: -4.00 -4.00 95% mean confidence interval for cycles %-change: -0.13% -0.09% Cycles are helped. Reviewed-by: Matt Turner <[email protected]>
Diffstat (limited to 'src/compiler/nir')
-rw-r--r--src/compiler/nir/nir_lower_flrp.c57
1 files changed, 56 insertions, 1 deletions
diff --git a/src/compiler/nir/nir_lower_flrp.c b/src/compiler/nir/nir_lower_flrp.c
index 0b32daf1dd2..5094a714504 100644
--- a/src/compiler/nir/nir_lower_flrp.c
+++ b/src/compiler/nir/nir_lower_flrp.c
@@ -70,6 +70,39 @@ replace_with_strict_ffma(struct nir_builder *bld, struct u_vector *dead_flrp,
}
/**
+ * Replace flrp(a, b, c) with ffma(a, (1 - c), bc)
+ */
+static void
+replace_with_single_ffma(struct nir_builder *bld, struct u_vector *dead_flrp,
+ struct nir_alu_instr *alu)
+{
+ nir_ssa_def *const a = nir_ssa_for_alu_src(bld, alu, 0);
+ nir_ssa_def *const b = nir_ssa_for_alu_src(bld, alu, 1);
+ nir_ssa_def *const c = nir_ssa_for_alu_src(bld, alu, 2);
+
+ nir_ssa_def *const neg_c = nir_fneg(bld, c);
+ nir_instr_as_alu(neg_c->parent_instr)->exact = alu->exact;
+
+ nir_ssa_def *const one_minus_c =
+ nir_fadd(bld, nir_imm_float(bld, 1.0f), neg_c);
+ nir_instr_as_alu(one_minus_c->parent_instr)->exact = alu->exact;
+
+ nir_ssa_def *const b_times_c = nir_fmul(bld, b, c);
+ nir_instr_as_alu(b_times_c->parent_instr)->exact = alu->exact;
+
+ nir_ssa_def *const final_ffma = nir_ffma(bld, a, one_minus_c, b_times_c);
+ nir_instr_as_alu(final_ffma->parent_instr)->exact = alu->exact;
+
+ nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(final_ffma));
+
+ /* DO NOT REMOVE the original flrp yet. Many of the lowering choices are
+ * based on other uses of the sources. Removing the flrp may cause the
+ * last flrp in a sequence to make a different, incorrect choice.
+ */
+ append_flrp_to_dead_list(dead_flrp, alu);
+}
+
+/**
* Replace flrp(a, b, c) with a(1-c) + bc.
*/
static void
@@ -476,6 +509,20 @@ convert_flrp_instruction(nir_builder *bld,
replace_with_strict_ffma(bld, dead_flrp, alu);
return;
}
+
+ /*
+ * - If FMA is supported and another flrp(_, y, t) exists:
+ *
+ * fma(x, (1 - t), yt)
+ *
+ * The hope is that the (1 - t) and the yt will be shared with the
+ * other lowered flrp. This results in 3 insructions for the first
+ * flrp and 1 for each additional flrp.
+ */
+ if (st.src1_and_src2 > 0) {
+ replace_with_single_ffma(bld, dead_flrp, alu);
+ return;
+ }
} else {
if (always_precise) {
replace_with_strict(bld, dead_flrp, alu);
@@ -490,11 +537,19 @@ convert_flrp_instruction(nir_builder *bld,
* The hope is that the x(1 - t) will be shared with the other lowered
* flrp. This results in 4 insructions for the first flrp and 2 for
* each additional flrp.
+ *
+ * - If FMA is not supported and another flrp(_, y, t) exists:
+ *
+ * x(1 - t) + yt
+ *
+ * The hope is that the (1 - t) and the yt will be shared with the
+ * other lowered flrp. This results in 4 insructions for the first
+ * flrp and 2 for each additional flrp.
*/
struct similar_flrp_stats st;
get_similar_flrp_stats(alu, &st);
- if (st.src0_and_src2 > 0) {
+ if (st.src0_and_src2 > 0 || st.src1_and_src2 > 0) {
replace_with_strict(bld, dead_flrp, alu);
return;
}