summaryrefslogtreecommitdiffstats
path: root/src/glsl/nir/nir_lower_idiv.c
diff options
context:
space:
mode:
authorRob Clark <[email protected]>2015-03-31 17:03:39 -0400
committerRob Clark <[email protected]>2015-04-05 09:20:35 -0400
commitf2ecc95e44c5798a15cde4aa67887bc562756ccd (patch)
tree758dd975963306bd196bc7f962f32b800cf72143 /src/glsl/nir/nir_lower_idiv.c
parent7880bea2fb587871faead78f624920ad5e8b89f7 (diff)
nir: add lowering for idiv/udiv/umod
Based on the algo from NV50LegalizeSSA::handleDIV() and handleMOD(). See also trans_idiv() in freedreno/ir3/ir3_compiler.c (which was an adaptation of the nv50 code from Ilia Mirkin). A python/numpy script which implements the same algorithm (and is possibly useful for debugging or analysis) can be found here: http://people.freedesktop.org/~robclark/div-lowering.py I've tested this on i965 hacked up to insert the idiv lowering pass, and on freedreno with NIR frontend. Signed-off-by: Rob Clark <[email protected]> Tested-by: Eric Anholt <[email protected]> (vc4)
Diffstat (limited to 'src/glsl/nir/nir_lower_idiv.c')
-rw-r--r--src/glsl/nir/nir_lower_idiv.c157
1 files changed, 157 insertions, 0 deletions
diff --git a/src/glsl/nir/nir_lower_idiv.c b/src/glsl/nir/nir_lower_idiv.c
new file mode 100644
index 00000000000..c2f08dff375
--- /dev/null
+++ b/src/glsl/nir/nir_lower_idiv.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright © 2015 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <[email protected]>
+ */
+
+#include "nir.h"
+#include "nir_builder.h"
+
+/* Lowers idiv/udiv/umod
+ * Based on NV50LegalizeSSA::handleDIV()
+ *
+ * Note that this is probably not enough precision for compute shaders.
+ * Perhaps we want a second higher precision (looping) version of this?
+ * Or perhaps we assume if you can do compute shaders you can also
+ * branch out to a pre-optimized shader library routine..
+ */
+
+static void
+convert_instr(nir_builder *bld, nir_alu_instr *alu)
+{
+ nir_ssa_def *numer, *denom, *af, *bf, *a, *b, *q, *r;
+ nir_op op = alu->op;
+ bool is_signed;
+
+ if ((op != nir_op_idiv) &&
+ (op != nir_op_udiv) &&
+ (op != nir_op_umod))
+ return;
+
+ is_signed = (op == nir_op_idiv);
+
+ nir_builder_insert_before_instr(bld, &alu->instr);
+
+ numer = nir_ssa_for_src(bld, alu->src[0].src,
+ nir_ssa_alu_instr_src_components(alu, 0));
+ denom = nir_ssa_for_src(bld, alu->src[1].src,
+ nir_ssa_alu_instr_src_components(alu, 1));
+
+ if (is_signed) {
+ af = nir_i2f(bld, numer);
+ bf = nir_i2f(bld, denom);
+ af = nir_fabs(bld, af);
+ bf = nir_fabs(bld, bf);
+ a = nir_iabs(bld, numer);
+ b = nir_iabs(bld, denom);
+ } else {
+ af = nir_u2f(bld, numer);
+ bf = nir_u2f(bld, denom);
+ a = numer;
+ b = denom;
+ }
+
+ /* get first result: */
+ bf = nir_frcp(bld, bf);
+ bf = nir_isub(bld, bf, nir_imm_int(bld, 2)); /* yes, really */
+ q = nir_fmul(bld, af, bf);
+
+ if (is_signed) {
+ q = nir_f2i(bld, q);
+ } else {
+ q = nir_f2u(bld, q);
+ }
+
+ /* get error of first result: */
+ r = nir_imul(bld, q, b);
+ r = nir_isub(bld, a, r);
+ r = nir_u2f(bld, r);
+ r = nir_fmul(bld, r, bf);
+ r = nir_f2u(bld, r);
+
+ /* add quotients: */
+ q = nir_iadd(bld, q, r);
+
+ /* correction: if modulus >= divisor, add 1 */
+ r = nir_imul(bld, q, b);
+ r = nir_isub(bld, a, r);
+
+ r = nir_ige(bld, r, b);
+ r = nir_b2i(bld, r);
+
+ q = nir_iadd(bld, q, r);
+ if (is_signed) {
+ /* fix the sign: */
+ r = nir_ixor(bld, numer, denom);
+ r = nir_ushr(bld, r, nir_imm_int(bld, 31));
+ r = nir_i2b(bld, r);
+ b = nir_ineg(bld, q);
+ q = nir_bcsel(bld, r, b, q);
+ }
+
+ if (op == nir_op_umod) {
+ /* division result in q */
+ r = nir_imul(bld, q, b);
+ q = nir_isub(bld, a, r);
+ }
+
+ assert(alu->dest.dest.is_ssa);
+ nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa,
+ nir_src_for_ssa(q),
+ ralloc_parent(alu));
+}
+
+static bool
+convert_block(nir_block *block, void *state)
+{
+ nir_builder *b = state;
+
+ nir_foreach_instr_safe(block, instr) {
+ if (instr->type == nir_instr_type_alu)
+ convert_instr(b, nir_instr_as_alu(instr));
+ }
+
+ return true;
+}
+
+static void
+convert_impl(nir_function_impl *impl)
+{
+ nir_builder b;
+ nir_builder_init(&b, impl);
+
+ nir_foreach_block(impl, convert_block, &b);
+ nir_metadata_preserve(impl, nir_metadata_block_index |
+ nir_metadata_dominance);
+}
+
+void
+nir_lower_idiv(nir_shader *shader)
+{
+ nir_foreach_overload(shader, overload) {
+ if (overload->impl)
+ convert_impl(overload->impl);
+ }
+
+ exec_list_make_empty(&shader->system_values);
+}