aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/gallium/auxiliary/Makefile.sources2
-rw-r--r--src/gallium/auxiliary/gallivm/lp_bld_ir_common.c466
-rw-r--r--src/gallium/auxiliary/gallivm/lp_bld_ir_common.h120
-rw-r--r--src/gallium/auxiliary/gallivm/lp_bld_tgsi.h62
-rw-r--r--src/gallium/auxiliary/gallivm/lp_bld_tgsi_soa.c426
-rw-r--r--src/gallium/auxiliary/meson.build2
6 files changed, 599 insertions, 479 deletions
diff --git a/src/gallium/auxiliary/Makefile.sources b/src/gallium/auxiliary/Makefile.sources
index 2cc7e8a3fc7..213bfe93f9b 100644
--- a/src/gallium/auxiliary/Makefile.sources
+++ b/src/gallium/auxiliary/Makefile.sources
@@ -413,6 +413,8 @@ GALLIVM_SOURCES := \
gallivm/lp_bld_init.h \
gallivm/lp_bld_intr.c \
gallivm/lp_bld_intr.h \
+ gallivm/lp_bld_ir_common.c \
+ gallivm/lp_bld_ir_common.h \
gallivm/lp_bld_limits.h \
gallivm/lp_bld_logic.c \
gallivm/lp_bld_logic.h \
diff --git a/src/gallium/auxiliary/gallivm/lp_bld_ir_common.c b/src/gallium/auxiliary/gallivm/lp_bld_ir_common.c
new file mode 100644
index 00000000000..20af9d4d0c5
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/lp_bld_ir_common.c
@@ -0,0 +1,466 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * Copyright 2007-2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_memory.h"
+#include "lp_bld_type.h"
+#include "lp_bld_init.h"
+#include "lp_bld_flow.h"
+#include "lp_bld_ir_common.h"
+#include "lp_bld_logic.h"
+
+/*
+ * Return the context for the current function.
+ * (always 'main', if shader doesn't do any function calls)
+ */
+static inline struct function_ctx *
+func_ctx(struct lp_exec_mask *mask)
+{
+ assert(mask->function_stack_size > 0);
+ assert(mask->function_stack_size <= LP_MAX_NUM_FUNCS);
+ return &mask->function_stack[mask->function_stack_size - 1];
+}
+
+/*
+ * Returns true if we're in a loop.
+ * It's global, meaning that it returns true even if there's
+ * no loop inside the current function, but we were inside
+ * a loop inside another function, from which this one was called.
+ */
+static inline boolean
+mask_has_loop(struct lp_exec_mask *mask)
+{
+ int i;
+ for (i = mask->function_stack_size - 1; i >= 0; --i) {
+ const struct function_ctx *ctx = &mask->function_stack[i];
+ if (ctx->loop_stack_size > 0)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/*
+ * Returns true if we're inside a switch statement.
+ * It's global, meaning that it returns true even if there's
+ * no switch in the current function, but we were inside
+ * a switch inside another function, from which this one was called.
+ */
+static inline boolean
+mask_has_switch(struct lp_exec_mask *mask)
+{
+ int i;
+ for (i = mask->function_stack_size - 1; i >= 0; --i) {
+ const struct function_ctx *ctx = &mask->function_stack[i];
+ if (ctx->switch_stack_size > 0)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/*
+ * Returns true if we're inside a conditional.
+ * It's global, meaning that it returns true even if there's
+ * no conditional in the current function, but we were inside
+ * a conditional inside another function, from which this one was called.
+ */
+static inline boolean
+mask_has_cond(struct lp_exec_mask *mask)
+{
+ int i;
+ for (i = mask->function_stack_size - 1; i >= 0; --i) {
+ const struct function_ctx *ctx = &mask->function_stack[i];
+ if (ctx->cond_stack_size > 0)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+void lp_exec_mask_update(struct lp_exec_mask *mask)
+{
+ LLVMBuilderRef builder = mask->bld->gallivm->builder;
+ boolean has_loop_mask = mask_has_loop(mask);
+ boolean has_cond_mask = mask_has_cond(mask);
+ boolean has_switch_mask = mask_has_switch(mask);
+ boolean has_ret_mask = mask->function_stack_size > 1 ||
+ mask->ret_in_main;
+
+ if (has_loop_mask) {
+ /*for loops we need to update the entire mask at runtime */
+ LLVMValueRef tmp;
+ assert(mask->break_mask);
+ tmp = LLVMBuildAnd(builder,
+ mask->cont_mask,
+ mask->break_mask,
+ "maskcb");
+ mask->exec_mask = LLVMBuildAnd(builder,
+ mask->cond_mask,
+ tmp,
+ "maskfull");
+ } else
+ mask->exec_mask = mask->cond_mask;
+
+ if (has_switch_mask) {
+ mask->exec_mask = LLVMBuildAnd(builder,
+ mask->exec_mask,
+ mask->switch_mask,
+ "switchmask");
+ }
+
+ if (has_ret_mask) {
+ mask->exec_mask = LLVMBuildAnd(builder,
+ mask->exec_mask,
+ mask->ret_mask,
+ "callmask");
+ }
+
+ mask->has_mask = (has_cond_mask ||
+ has_loop_mask ||
+ has_switch_mask ||
+ has_ret_mask);
+}
+
+/*
+ * Initialize a function context at the specified index.
+ */
+void
+lp_exec_mask_function_init(struct lp_exec_mask *mask, int function_idx)
+{
+ LLVMTypeRef int_type = LLVMInt32TypeInContext(mask->bld->gallivm->context);
+ LLVMBuilderRef builder = mask->bld->gallivm->builder;
+ struct function_ctx *ctx = &mask->function_stack[function_idx];
+
+ ctx->cond_stack_size = 0;
+ ctx->loop_stack_size = 0;
+ ctx->bgnloop_stack_size = 0;
+ ctx->switch_stack_size = 0;
+
+ if (function_idx == 0) {
+ ctx->ret_mask = mask->ret_mask;
+ }
+
+ ctx->loop_limiter = lp_build_alloca(mask->bld->gallivm,
+ int_type, "looplimiter");
+ LLVMBuildStore(
+ builder,
+ LLVMConstInt(int_type, LP_MAX_TGSI_LOOP_ITERATIONS, false),
+ ctx->loop_limiter);
+}
+
+void lp_exec_mask_init(struct lp_exec_mask *mask, struct lp_build_context *bld)
+{
+ mask->bld = bld;
+ mask->has_mask = FALSE;
+ mask->ret_in_main = FALSE;
+ /* For the main function */
+ mask->function_stack_size = 1;
+
+ mask->int_vec_type = lp_build_int_vec_type(bld->gallivm, mask->bld->type);
+ mask->exec_mask = mask->ret_mask = mask->break_mask = mask->cont_mask =
+ mask->cond_mask = mask->switch_mask =
+ LLVMConstAllOnes(mask->int_vec_type);
+
+ mask->function_stack = CALLOC(LP_MAX_NUM_FUNCS,
+ sizeof(mask->function_stack[0]));
+ lp_exec_mask_function_init(mask, 0);
+}
+
+void
+lp_exec_mask_fini(struct lp_exec_mask *mask)
+{
+ FREE(mask->function_stack);
+}
+
+/* stores val into an address pointed to by dst_ptr.
+ * mask->exec_mask is used to figure out which bits of val
+ * should be stored into the address
+ * (0 means don't store this bit, 1 means do store).
+ */
+void lp_exec_mask_store(struct lp_exec_mask *mask,
+ struct lp_build_context *bld_store,
+ LLVMValueRef val,
+ LLVMValueRef dst_ptr)
+{
+ LLVMBuilderRef builder = mask->bld->gallivm->builder;
+ LLVMValueRef exec_mask = mask->has_mask ? mask->exec_mask : NULL;
+
+ assert(lp_check_value(bld_store->type, val));
+ assert(LLVMGetTypeKind(LLVMTypeOf(dst_ptr)) == LLVMPointerTypeKind);
+ assert(LLVMGetElementType(LLVMTypeOf(dst_ptr)) == LLVMTypeOf(val) ||
+ LLVMGetTypeKind(LLVMGetElementType(LLVMTypeOf(dst_ptr))) == LLVMArrayTypeKind);
+
+ if (exec_mask) {
+ LLVMValueRef res, dst;
+
+ dst = LLVMBuildLoad(builder, dst_ptr, "");
+ res = lp_build_select(bld_store, exec_mask, val, dst);
+ LLVMBuildStore(builder, res, dst_ptr);
+ } else
+ LLVMBuildStore(builder, val, dst_ptr);
+}
+
+void lp_exec_bgnloop_post_phi(struct lp_exec_mask *mask)
+{
+ LLVMBuilderRef builder = mask->bld->gallivm->builder;
+ struct function_ctx *ctx = func_ctx(mask);
+
+ if (ctx->loop_stack_size != ctx->bgnloop_stack_size) {
+ mask->break_mask = LLVMBuildLoad(builder, ctx->break_var, "");
+ lp_exec_mask_update(mask);
+ ctx->bgnloop_stack_size = ctx->loop_stack_size;
+ }
+}
+
+void lp_exec_bgnloop(struct lp_exec_mask *mask, bool load)
+{
+ LLVMBuilderRef builder = mask->bld->gallivm->builder;
+ struct function_ctx *ctx = func_ctx(mask);
+
+ if (ctx->loop_stack_size >= LP_MAX_TGSI_NESTING) {
+ ++ctx->loop_stack_size;
+ return;
+ }
+
+ ctx->break_type_stack[ctx->loop_stack_size + ctx->switch_stack_size] =
+ ctx->break_type;
+ ctx->break_type = LP_EXEC_MASK_BREAK_TYPE_LOOP;
+
+ ctx->loop_stack[ctx->loop_stack_size].loop_block = ctx->loop_block;
+ ctx->loop_stack[ctx->loop_stack_size].cont_mask = mask->cont_mask;
+ ctx->loop_stack[ctx->loop_stack_size].break_mask = mask->break_mask;
+ ctx->loop_stack[ctx->loop_stack_size].break_var = ctx->break_var;
+ ++ctx->loop_stack_size;
+
+ ctx->break_var = lp_build_alloca(mask->bld->gallivm, mask->int_vec_type, "");
+ LLVMBuildStore(builder, mask->break_mask, ctx->break_var);
+
+ ctx->loop_block = lp_build_insert_new_block(mask->bld->gallivm, "bgnloop");
+
+ LLVMBuildBr(builder, ctx->loop_block);
+ LLVMPositionBuilderAtEnd(builder, ctx->loop_block);
+
+ if (load) {
+ lp_exec_bgnloop_post_phi(mask);
+ }
+}
+
+void lp_exec_endloop(struct gallivm_state *gallivm,
+ struct lp_exec_mask *mask)
+{
+ LLVMBuilderRef builder = mask->bld->gallivm->builder;
+ struct function_ctx *ctx = func_ctx(mask);
+ LLVMBasicBlockRef endloop;
+ LLVMTypeRef int_type = LLVMInt32TypeInContext(mask->bld->gallivm->context);
+ LLVMTypeRef reg_type = LLVMIntTypeInContext(gallivm->context,
+ mask->bld->type.width *
+ mask->bld->type.length);
+ LLVMValueRef i1cond, i2cond, icond, limiter;
+
+ assert(mask->break_mask);
+
+ assert(ctx->loop_stack_size);
+ if (ctx->loop_stack_size > LP_MAX_TGSI_NESTING) {
+ --ctx->loop_stack_size;
+ --ctx->bgnloop_stack_size;
+ return;
+ }
+
+ /*
+ * Restore the cont_mask, but don't pop
+ */
+ mask->cont_mask = ctx->loop_stack[ctx->loop_stack_size - 1].cont_mask;
+ lp_exec_mask_update(mask);
+
+ /*
+ * Unlike the continue mask, the break_mask must be preserved across loop
+ * iterations
+ */
+ LLVMBuildStore(builder, mask->break_mask, ctx->break_var);
+
+ /* Decrement the loop limiter */
+ limiter = LLVMBuildLoad(builder, ctx->loop_limiter, "");
+
+ limiter = LLVMBuildSub(
+ builder,
+ limiter,
+ LLVMConstInt(int_type, 1, false),
+ "");
+
+ LLVMBuildStore(builder, limiter, ctx->loop_limiter);
+
+ /* i1cond = (mask != 0) */
+ i1cond = LLVMBuildICmp(
+ builder,
+ LLVMIntNE,
+ LLVMBuildBitCast(builder, mask->exec_mask, reg_type, ""),
+ LLVMConstNull(reg_type), "i1cond");
+
+ /* i2cond = (looplimiter > 0) */
+ i2cond = LLVMBuildICmp(
+ builder,
+ LLVMIntSGT,
+ limiter,
+ LLVMConstNull(int_type), "i2cond");
+
+ /* if( i1cond && i2cond ) */
+ icond = LLVMBuildAnd(builder, i1cond, i2cond, "");
+
+ endloop = lp_build_insert_new_block(mask->bld->gallivm, "endloop");
+
+ LLVMBuildCondBr(builder,
+ icond, ctx->loop_block, endloop);
+
+ LLVMPositionBuilderAtEnd(builder, endloop);
+
+ assert(ctx->loop_stack_size);
+ --ctx->loop_stack_size;
+ --ctx->bgnloop_stack_size;
+ mask->cont_mask = ctx->loop_stack[ctx->loop_stack_size].cont_mask;
+ mask->break_mask = ctx->loop_stack[ctx->loop_stack_size].break_mask;
+ ctx->loop_block = ctx->loop_stack[ctx->loop_stack_size].loop_block;
+ ctx->break_var = ctx->loop_stack[ctx->loop_stack_size].break_var;
+ ctx->break_type = ctx->break_type_stack[ctx->loop_stack_size +
+ ctx->switch_stack_size];
+
+ lp_exec_mask_update(mask);
+}
+
+void lp_exec_mask_cond_push(struct lp_exec_mask *mask,
+ LLVMValueRef val)
+{
+ LLVMBuilderRef builder = mask->bld->gallivm->builder;
+ struct function_ctx *ctx = func_ctx(mask);
+
+ if (ctx->cond_stack_size >= LP_MAX_TGSI_NESTING) {
+ ctx->cond_stack_size++;
+ return;
+ }
+ if (ctx->cond_stack_size == 0 && mask->function_stack_size == 1) {
+ assert(mask->cond_mask == LLVMConstAllOnes(mask->int_vec_type));
+ }
+ ctx->cond_stack[ctx->cond_stack_size++] = mask->cond_mask;
+ assert(LLVMTypeOf(val) == mask->int_vec_type);
+ mask->cond_mask = LLVMBuildAnd(builder,
+ mask->cond_mask,
+ val,
+ "");
+ lp_exec_mask_update(mask);
+}
+
+void lp_exec_mask_cond_invert(struct lp_exec_mask *mask)
+{
+ LLVMBuilderRef builder = mask->bld->gallivm->builder;
+ struct function_ctx *ctx = func_ctx(mask);
+ LLVMValueRef prev_mask;
+ LLVMValueRef inv_mask;
+
+ assert(ctx->cond_stack_size);
+ if (ctx->cond_stack_size >= LP_MAX_TGSI_NESTING)
+ return;
+ prev_mask = ctx->cond_stack[ctx->cond_stack_size - 1];
+ if (ctx->cond_stack_size == 1 && mask->function_stack_size == 1) {
+ assert(prev_mask == LLVMConstAllOnes(mask->int_vec_type));
+ }
+
+ inv_mask = LLVMBuildNot(builder, mask->cond_mask, "");
+
+ mask->cond_mask = LLVMBuildAnd(builder,
+ inv_mask,
+ prev_mask, "");
+ lp_exec_mask_update(mask);
+}
+
+void lp_exec_mask_cond_pop(struct lp_exec_mask *mask)
+{
+ struct function_ctx *ctx = func_ctx(mask);
+ assert(ctx->cond_stack_size);
+ --ctx->cond_stack_size;
+ if (ctx->cond_stack_size >= LP_MAX_TGSI_NESTING)
+ return;
+ mask->cond_mask = ctx->cond_stack[ctx->cond_stack_size];
+ lp_exec_mask_update(mask);
+}
+
+
+void lp_exec_continue(struct lp_exec_mask *mask)
+{
+ LLVMBuilderRef builder = mask->bld->gallivm->builder;
+ LLVMValueRef exec_mask = LLVMBuildNot(builder,
+ mask->exec_mask,
+ "");
+
+ mask->cont_mask = LLVMBuildAnd(builder,
+ mask->cont_mask,
+ exec_mask, "");
+
+ lp_exec_mask_update(mask);
+}
+
+void lp_exec_break(struct lp_exec_mask *mask, int *pc,
+ bool break_always)
+{
+ LLVMBuilderRef builder = mask->bld->gallivm->builder;
+ struct function_ctx *ctx = func_ctx(mask);
+
+ if (ctx->break_type == LP_EXEC_MASK_BREAK_TYPE_LOOP) {
+ LLVMValueRef exec_mask = LLVMBuildNot(builder,
+ mask->exec_mask,
+ "break");
+
+ mask->break_mask = LLVMBuildAnd(builder,
+ mask->break_mask,
+ exec_mask, "break_full");
+ }
+ else {
+ if (ctx->switch_in_default) {
+ /*
+ * stop default execution but only if this is an unconditional switch.
+ * (The condition here is not perfect since dead code after break is
+ * allowed but should be sufficient since false negatives are just
+ * unoptimized - so we don't have to pre-evaluate that).
+ */
+ if(break_always && ctx->switch_pc) {
+ if (pc)
+ *pc = ctx->switch_pc;
+ return;
+ }
+ }
+
+ if (break_always) {
+ mask->switch_mask = LLVMConstNull(mask->bld->int_vec_type);
+ }
+ else {
+ LLVMValueRef exec_mask = LLVMBuildNot(builder,
+ mask->exec_mask,
+ "break");
+ mask->switch_mask = LLVMBuildAnd(builder,
+ mask->switch_mask,
+ exec_mask, "break_switch");
+ }
+ }
+
+ lp_exec_mask_update(mask);
+}
diff --git a/src/gallium/auxiliary/gallivm/lp_bld_ir_common.h b/src/gallium/auxiliary/gallivm/lp_bld_ir_common.h
new file mode 100644
index 00000000000..34c09193936
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/lp_bld_ir_common.h
@@ -0,0 +1,120 @@
+/**************************************************************************
+ *
+ * Copyright 2011-2012 Advanced Micro Devices, Inc.
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef LP_BLD_IR_COMMON_H
+#define LP_BLD_IR_COMMON_H
+
+#include "gallivm/lp_bld.h"
+#include "gallivm/lp_bld_limits.h"
+
+/* SM 4.0 says that subroutines can nest 32 deep and
+ * we need one more for our main function */
+#define LP_MAX_NUM_FUNCS 33
+
+enum lp_exec_mask_break_type {
+ LP_EXEC_MASK_BREAK_TYPE_LOOP,
+ LP_EXEC_MASK_BREAK_TYPE_SWITCH
+};
+
+struct lp_exec_mask {
+ struct lp_build_context *bld;
+
+ boolean has_mask;
+ boolean ret_in_main;
+
+ LLVMTypeRef int_vec_type;
+
+ LLVMValueRef exec_mask;
+
+ LLVMValueRef ret_mask;
+ LLVMValueRef cond_mask;
+ LLVMValueRef switch_mask; /* current switch exec mask */
+ LLVMValueRef cont_mask;
+ LLVMValueRef break_mask;
+
+ struct function_ctx {
+ int pc;
+ LLVMValueRef ret_mask;
+
+ LLVMValueRef cond_stack[LP_MAX_TGSI_NESTING];
+ int cond_stack_size;
+
+ /* keep track if break belongs to switch or loop */
+ enum lp_exec_mask_break_type break_type_stack[LP_MAX_TGSI_NESTING];
+ enum lp_exec_mask_break_type break_type;
+
+ struct {
+ LLVMValueRef switch_val;
+ LLVMValueRef switch_mask;
+ LLVMValueRef switch_mask_default;
+ boolean switch_in_default;
+ unsigned switch_pc;
+ } switch_stack[LP_MAX_TGSI_NESTING];
+ int switch_stack_size;
+ LLVMValueRef switch_val;
+ LLVMValueRef switch_mask_default; /* reverse of switch mask used for default */
+ boolean switch_in_default; /* if switch exec is currently in default */
+ unsigned switch_pc; /* when used points to default or endswitch-1 */
+
+ LLVMValueRef loop_limiter;
+ LLVMBasicBlockRef loop_block;
+ LLVMValueRef break_var;
+ struct {
+ LLVMBasicBlockRef loop_block;
+ LLVMValueRef cont_mask;
+ LLVMValueRef break_mask;
+ LLVMValueRef break_var;
+ } loop_stack[LP_MAX_TGSI_NESTING];
+ int loop_stack_size;
+ int bgnloop_stack_size;
+
+ } *function_stack;
+ int function_stack_size;
+};
+
+void lp_exec_mask_function_init(struct lp_exec_mask *mask, int function_idx);
+void lp_exec_mask_init(struct lp_exec_mask *mask, struct lp_build_context *bld);
+void lp_exec_mask_fini(struct lp_exec_mask *mask);
+void lp_exec_mask_store(struct lp_exec_mask *mask,
+ struct lp_build_context *bld_store,
+ LLVMValueRef val,
+ LLVMValueRef dst_ptr);
+void lp_exec_mask_update(struct lp_exec_mask *mask);
+void lp_exec_bgnloop_post_phi(struct lp_exec_mask *mask);
+void lp_exec_bgnloop(struct lp_exec_mask *mask, bool load_mask);
+void lp_exec_endloop(struct gallivm_state *gallivm,
+ struct lp_exec_mask *mask);
+void lp_exec_mask_cond_push(struct lp_exec_mask *mask,
+ LLVMValueRef val);
+void lp_exec_mask_cond_invert(struct lp_exec_mask *mask);
+void lp_exec_mask_cond_pop(struct lp_exec_mask *mask);
+void lp_exec_continue(struct lp_exec_mask *mask);
+
+void lp_exec_break(struct lp_exec_mask *mask, int *pc, bool break_always);
+
+#endif
diff --git a/src/gallium/auxiliary/gallivm/lp_bld_tgsi.h b/src/gallium/auxiliary/gallivm/lp_bld_tgsi.h
index 4bd0c0cf2af..6a67d90f8a5 100644
--- a/src/gallium/auxiliary/gallivm/lp_bld_tgsi.h
+++ b/src/gallium/auxiliary/gallivm/lp_bld_tgsi.h
@@ -41,6 +41,7 @@
#include "gallivm/lp_bld_tgsi_action.h"
#include "gallivm/lp_bld_limits.h"
#include "gallivm/lp_bld_sample.h"
+#include "gallivm/lp_bld_ir_common.h"
#include "lp_bld_type.h"
#include "pipe/p_compiler.h"
#include "pipe/p_state.h"
@@ -272,67 +273,6 @@ lp_build_tgsi_aos(struct gallivm_state *gallivm,
const struct tgsi_shader_info *info);
-enum lp_exec_mask_break_type {
- LP_EXEC_MASK_BREAK_TYPE_LOOP,
- LP_EXEC_MASK_BREAK_TYPE_SWITCH
-};
-
-
-struct lp_exec_mask {
- struct lp_build_context *bld;
-
- boolean has_mask;
- boolean ret_in_main;
-
- LLVMTypeRef int_vec_type;
-
- LLVMValueRef exec_mask;
-
- LLVMValueRef ret_mask;
- LLVMValueRef cond_mask;
- LLVMValueRef switch_mask; /* current switch exec mask */
- LLVMValueRef cont_mask;
- LLVMValueRef break_mask;
-
- struct function_ctx {
- int pc;
- LLVMValueRef ret_mask;
-
- LLVMValueRef cond_stack[LP_MAX_TGSI_NESTING];
- int cond_stack_size;
-
- /* keep track if break belongs to switch or loop */
- enum lp_exec_mask_break_type break_type_stack[LP_MAX_TGSI_NESTING];
- enum lp_exec_mask_break_type break_type;
-
- struct {
- LLVMValueRef switch_val;
- LLVMValueRef switch_mask;
- LLVMValueRef switch_mask_default;
- boolean switch_in_default;
- unsigned switch_pc;
- } switch_stack[LP_MAX_TGSI_NESTING];
- int switch_stack_size;
- LLVMValueRef switch_val;
- LLVMValueRef switch_mask_default; /* reverse of switch mask used for default */
- boolean switch_in_default; /* if switch exec is currently in default */
- unsigned switch_pc; /* when used points to default or endswitch-1 */
-
- LLVMValueRef loop_limiter;
- LLVMBasicBlockRef loop_block;
- LLVMValueRef break_var;
- struct {
- LLVMBasicBlockRef loop_block;
- LLVMValueRef cont_mask;
- LLVMValueRef break_mask;
- LLVMValueRef break_var;
- } loop_stack[LP_MAX_TGSI_NESTING];
- int loop_stack_size;
-
- } *function_stack;
- int function_stack_size;
-};
-
struct lp_build_tgsi_inst_list
{
struct tgsi_full_instruction *instructions;
diff --git a/src/gallium/auxiliary/gallivm/lp_bld_tgsi_soa.c b/src/gallium/auxiliary/gallivm/lp_bld_tgsi_soa.c
index 5a67f834c90..ca70a96302e 100644
--- a/src/gallium/auxiliary/gallivm/lp_bld_tgsi_soa.c
+++ b/src/gallium/auxiliary/gallivm/lp_bld_tgsi_soa.c
@@ -69,10 +69,6 @@
#include "lp_bld_sample.h"
#include "lp_bld_struct.h"
-/* SM 4.0 says that subroutines can nest 32 deep and
- * we need one more for our main function */
-#define LP_MAX_NUM_FUNCS 33
-
#define DUMP_GS_EMITS 0
/*
@@ -105,10 +101,6 @@ emit_dump_reg(struct gallivm_state *gallivm,
lp_build_print_value(gallivm, buf, value);
}
-/*
- * Return the context for the current function.
- * (always 'main', if shader doesn't do any function calls)
- */
static inline struct function_ctx *
func_ctx(struct lp_exec_mask *mask)
{
@@ -118,24 +110,6 @@ func_ctx(struct lp_exec_mask *mask)
}
/*
- * Returns true if we're in a loop.
- * It's global, meaning that it returns true even if there's
- * no loop inside the current function, but we were inside
- * a loop inside another function, from which this one was called.
- */
-static inline boolean
-mask_has_loop(struct lp_exec_mask *mask)
-{
- int i;
- for (i = mask->function_stack_size - 1; i >= 0; --i) {
- const struct function_ctx *ctx = &mask->function_stack[i];
- if (ctx->loop_stack_size > 0)
- return TRUE;
- }
- return FALSE;
-}
-
-/*
* combine the execution mask if there is one with the current mask.
*/
static LLVMValueRef
@@ -154,370 +128,14 @@ mask_vec(struct lp_build_tgsi_context *bld_base)
exec_mask->exec_mask, "");
}
-/*
- * Returns true if we're inside a switch statement.
- * It's global, meaning that it returns true even if there's
- * no switch in the current function, but we were inside
- * a switch inside another function, from which this one was called.
- */
-static inline boolean
-mask_has_switch(struct lp_exec_mask *mask)
-{
- int i;
- for (i = mask->function_stack_size - 1; i >= 0; --i) {
- const struct function_ctx *ctx = &mask->function_stack[i];
- if (ctx->switch_stack_size > 0)
- return TRUE;
- }
- return FALSE;
-}
-
-/*
- * Returns true if we're inside a conditional.
- * It's global, meaning that it returns true even if there's
- * no conditional in the current function, but we were inside
- * a conditional inside another function, from which this one was called.
- */
-static inline boolean
-mask_has_cond(struct lp_exec_mask *mask)
-{
- int i;
- for (i = mask->function_stack_size - 1; i >= 0; --i) {
- const struct function_ctx *ctx = &mask->function_stack[i];
- if (ctx->cond_stack_size > 0)
- return TRUE;
- }
- return FALSE;
-}
-
-
-/*
- * Initialize a function context at the specified index.
- */
-static void
-lp_exec_mask_function_init(struct lp_exec_mask *mask, int function_idx)
-{
- LLVMTypeRef int_type = LLVMInt32TypeInContext(mask->bld->gallivm->context);
- LLVMBuilderRef builder = mask->bld->gallivm->builder;
- struct function_ctx *ctx = &mask->function_stack[function_idx];
-
- ctx->cond_stack_size = 0;
- ctx->loop_stack_size = 0;
- ctx->switch_stack_size = 0;
-
- if (function_idx == 0) {
- ctx->ret_mask = mask->ret_mask;
- }
-
- ctx->loop_limiter = lp_build_alloca(mask->bld->gallivm,
- int_type, "looplimiter");
- LLVMBuildStore(
- builder,
- LLVMConstInt(int_type, LP_MAX_TGSI_LOOP_ITERATIONS, false),
- ctx->loop_limiter);
-}
-
-static void lp_exec_mask_init(struct lp_exec_mask *mask, struct lp_build_context *bld)
-{
- mask->bld = bld;
- mask->has_mask = FALSE;
- mask->ret_in_main = FALSE;
- /* For the main function */
- mask->function_stack_size = 1;
-
- mask->int_vec_type = lp_build_int_vec_type(bld->gallivm, mask->bld->type);
- mask->exec_mask = mask->ret_mask = mask->break_mask = mask->cont_mask =
- mask->cond_mask = mask->switch_mask =
- LLVMConstAllOnes(mask->int_vec_type);
-
- mask->function_stack = CALLOC(LP_MAX_NUM_FUNCS,
- sizeof(mask->function_stack[0]));
- lp_exec_mask_function_init(mask, 0);
-}
-
-static void
-lp_exec_mask_fini(struct lp_exec_mask *mask)
-{
- FREE(mask->function_stack);
-}
-
-static void lp_exec_mask_update(struct lp_exec_mask *mask)
-{
- LLVMBuilderRef builder = mask->bld->gallivm->builder;
- boolean has_loop_mask = mask_has_loop(mask);
- boolean has_cond_mask = mask_has_cond(mask);
- boolean has_switch_mask = mask_has_switch(mask);
- boolean has_ret_mask = mask->function_stack_size > 1 ||
- mask->ret_in_main;
-
- if (has_loop_mask) {
- /*for loops we need to update the entire mask at runtime */
- LLVMValueRef tmp;
- assert(mask->break_mask);
- tmp = LLVMBuildAnd(builder,
- mask->cont_mask,
- mask->break_mask,
- "maskcb");
- mask->exec_mask = LLVMBuildAnd(builder,
- mask->cond_mask,
- tmp,
- "maskfull");
- } else
- mask->exec_mask = mask->cond_mask;
-
- if (has_switch_mask) {
- mask->exec_mask = LLVMBuildAnd(builder,
- mask->exec_mask,
- mask->switch_mask,
- "switchmask");
- }
-
- if (has_ret_mask) {
- mask->exec_mask = LLVMBuildAnd(builder,
- mask->exec_mask,
- mask->ret_mask,
- "callmask");
- }
-
- mask->has_mask = (has_cond_mask ||
- has_loop_mask ||
- has_switch_mask ||
- has_ret_mask);
-}
-
-static void lp_exec_mask_cond_push(struct lp_exec_mask *mask,
- LLVMValueRef val)
-{
- LLVMBuilderRef builder = mask->bld->gallivm->builder;
- struct function_ctx *ctx = func_ctx(mask);
-
- if (ctx->cond_stack_size >= LP_MAX_TGSI_NESTING) {
- ctx->cond_stack_size++;
- return;
- }
- if (ctx->cond_stack_size == 0 && mask->function_stack_size == 1) {
- assert(mask->cond_mask == LLVMConstAllOnes(mask->int_vec_type));
- }
- ctx->cond_stack[ctx->cond_stack_size++] = mask->cond_mask;
- assert(LLVMTypeOf(val) == mask->int_vec_type);
- mask->cond_mask = LLVMBuildAnd(builder,
- mask->cond_mask,
- val,
- "");
- lp_exec_mask_update(mask);
-}
-
-static void lp_exec_mask_cond_invert(struct lp_exec_mask *mask)
-{
- LLVMBuilderRef builder = mask->bld->gallivm->builder;
- struct function_ctx *ctx = func_ctx(mask);
- LLVMValueRef prev_mask;
- LLVMValueRef inv_mask;
-
- assert(ctx->cond_stack_size);
- if (ctx->cond_stack_size >= LP_MAX_TGSI_NESTING)
- return;
- prev_mask = ctx->cond_stack[ctx->cond_stack_size - 1];
- if (ctx->cond_stack_size == 1 && mask->function_stack_size == 1) {
- assert(prev_mask == LLVMConstAllOnes(mask->int_vec_type));
- }
-
- inv_mask = LLVMBuildNot(builder, mask->cond_mask, "");
-
- mask->cond_mask = LLVMBuildAnd(builder,
- inv_mask,
- prev_mask, "");
- lp_exec_mask_update(mask);
-}
-
-static void lp_exec_mask_cond_pop(struct lp_exec_mask *mask)
-{
- struct function_ctx *ctx = func_ctx(mask);
- assert(ctx->cond_stack_size);
- --ctx->cond_stack_size;
- if (ctx->cond_stack_size >= LP_MAX_TGSI_NESTING)
- return;
- mask->cond_mask = ctx->cond_stack[ctx->cond_stack_size];
- lp_exec_mask_update(mask);
-}
-
-static void lp_exec_bgnloop(struct lp_exec_mask *mask)
-{
- LLVMBuilderRef builder = mask->bld->gallivm->builder;
- struct function_ctx *ctx = func_ctx(mask);
-
- if (ctx->loop_stack_size >= LP_MAX_TGSI_NESTING) {
- ++ctx->loop_stack_size;
- return;
- }
-
- ctx->break_type_stack[ctx->loop_stack_size + ctx->switch_stack_size] =
- ctx->break_type;
- ctx->break_type = LP_EXEC_MASK_BREAK_TYPE_LOOP;
-
- ctx->loop_stack[ctx->loop_stack_size].loop_block = ctx->loop_block;
- ctx->loop_stack[ctx->loop_stack_size].cont_mask = mask->cont_mask;
- ctx->loop_stack[ctx->loop_stack_size].break_mask = mask->break_mask;
- ctx->loop_stack[ctx->loop_stack_size].break_var = ctx->break_var;
- ++ctx->loop_stack_size;
-
- ctx->break_var = lp_build_alloca(mask->bld->gallivm, mask->int_vec_type, "");
- LLVMBuildStore(builder, mask->break_mask, ctx->break_var);
-
- ctx->loop_block = lp_build_insert_new_block(mask->bld->gallivm, "bgnloop");
-
- LLVMBuildBr(builder, ctx->loop_block);
- LLVMPositionBuilderAtEnd(builder, ctx->loop_block);
-
- mask->break_mask = LLVMBuildLoad(builder, ctx->break_var, "");
-
- lp_exec_mask_update(mask);
-}
-
-static void lp_exec_break(struct lp_exec_mask *mask,
+static void lp_exec_tgsi_break(struct lp_exec_mask *mask,
struct lp_build_tgsi_context * bld_base)
{
- LLVMBuilderRef builder = mask->bld->gallivm->builder;
- struct function_ctx *ctx = func_ctx(mask);
-
- if (ctx->break_type == LP_EXEC_MASK_BREAK_TYPE_LOOP) {
- LLVMValueRef exec_mask = LLVMBuildNot(builder,
- mask->exec_mask,
- "break");
-
- mask->break_mask = LLVMBuildAnd(builder,
- mask->break_mask,
- exec_mask, "break_full");
- }
- else {
- enum tgsi_opcode opcode =
- bld_base->instructions[bld_base->pc + 1].Instruction.Opcode;
- boolean break_always = (opcode == TGSI_OPCODE_ENDSWITCH ||
- opcode == TGSI_OPCODE_CASE);
-
-
- if (ctx->switch_in_default) {
- /*
- * stop default execution but only if this is an unconditional switch.
- * (The condition here is not perfect since dead code after break is
- * allowed but should be sufficient since false negatives are just
- * unoptimized - so we don't have to pre-evaluate that).
- */
- if(break_always && ctx->switch_pc) {
- bld_base->pc = ctx->switch_pc;
- return;
- }
- }
-
- if (break_always) {
- mask->switch_mask = LLVMConstNull(mask->bld->int_vec_type);
- }
- else {
- LLVMValueRef exec_mask = LLVMBuildNot(builder,
- mask->exec_mask,
- "break");
- mask->switch_mask = LLVMBuildAnd(builder,
- mask->switch_mask,
- exec_mask, "break_switch");
- }
- }
-
- lp_exec_mask_update(mask);
-}
-
-static void lp_exec_continue(struct lp_exec_mask *mask)
-{
- LLVMBuilderRef builder = mask->bld->gallivm->builder;
- LLVMValueRef exec_mask = LLVMBuildNot(builder,
- mask->exec_mask,
- "");
-
- mask->cont_mask = LLVMBuildAnd(builder,
- mask->cont_mask,
- exec_mask, "");
-
- lp_exec_mask_update(mask);
-}
-
-
-static void lp_exec_endloop(struct gallivm_state *gallivm,
- struct lp_exec_mask *mask)
-{
- LLVMBuilderRef builder = mask->bld->gallivm->builder;
- struct function_ctx *ctx = func_ctx(mask);
- LLVMBasicBlockRef endloop;
- LLVMTypeRef int_type = LLVMInt32TypeInContext(mask->bld->gallivm->context);
- LLVMTypeRef reg_type = LLVMIntTypeInContext(gallivm->context,
- mask->bld->type.width *
- mask->bld->type.length);
- LLVMValueRef i1cond, i2cond, icond, limiter;
-
- assert(mask->break_mask);
-
-
- assert(ctx->loop_stack_size);
- if (ctx->loop_stack_size > LP_MAX_TGSI_NESTING) {
- --ctx->loop_stack_size;
- return;
- }
-
- /*
- * Restore the cont_mask, but don't pop
- */
- mask->cont_mask = ctx->loop_stack[ctx->loop_stack_size - 1].cont_mask;
- lp_exec_mask_update(mask);
-
- /*
- * Unlike the continue mask, the break_mask must be preserved across loop
- * iterations
- */
- LLVMBuildStore(builder, mask->break_mask, ctx->break_var);
-
- /* Decrement the loop limiter */
- limiter = LLVMBuildLoad(builder, ctx->loop_limiter, "");
-
- limiter = LLVMBuildSub(
- builder,
- limiter,
- LLVMConstInt(int_type, 1, false),
- "");
-
- LLVMBuildStore(builder, limiter, ctx->loop_limiter);
-
- /* i1cond = (mask != 0) */
- i1cond = LLVMBuildICmp(
- builder,
- LLVMIntNE,
- LLVMBuildBitCast(builder, mask->exec_mask, reg_type, ""),
- LLVMConstNull(reg_type), "i1cond");
-
- /* i2cond = (looplimiter > 0) */
- i2cond = LLVMBuildICmp(
- builder,
- LLVMIntSGT,
- limiter,
- LLVMConstNull(int_type), "i2cond");
-
- /* if( i1cond && i2cond ) */
- icond = LLVMBuildAnd(builder, i1cond, i2cond, "");
-
- endloop = lp_build_insert_new_block(mask->bld->gallivm, "endloop");
-
- LLVMBuildCondBr(builder,
- icond, ctx->loop_block, endloop);
-
- LLVMPositionBuilderAtEnd(builder, endloop);
-
- assert(ctx->loop_stack_size);
- --ctx->loop_stack_size;
- mask->cont_mask = ctx->loop_stack[ctx->loop_stack_size].cont_mask;
- mask->break_mask = ctx->loop_stack[ctx->loop_stack_size].break_mask;
- ctx->loop_block = ctx->loop_stack[ctx->loop_stack_size].loop_block;
- ctx->break_var = ctx->loop_stack[ctx->loop_stack_size].break_var;
- ctx->break_type = ctx->break_type_stack[ctx->loop_stack_size +
- ctx->switch_stack_size];
-
- lp_exec_mask_update(mask);
+ enum tgsi_opcode opcode =
+ bld_base->instructions[bld_base->pc + 1].Instruction.Opcode;
+ bool break_always = (opcode == TGSI_OPCODE_ENDSWITCH ||
+ opcode == TGSI_OPCODE_CASE);
+ lp_exec_break(mask, &bld_base->pc, break_always);
}
static void lp_exec_switch(struct lp_exec_mask *mask,
@@ -748,34 +366,6 @@ static void lp_exec_default(struct lp_exec_mask *mask,
}
-/* stores val into an address pointed to by dst_ptr.
- * mask->exec_mask is used to figure out which bits of val
- * should be stored into the address
- * (0 means don't store this bit, 1 means do store).
- */
-static void lp_exec_mask_store(struct lp_exec_mask *mask,
- struct lp_build_context *bld_store,
- LLVMValueRef val,
- LLVMValueRef dst_ptr)
-{
- LLVMBuilderRef builder = mask->bld->gallivm->builder;
- LLVMValueRef exec_mask = mask->has_mask ? mask->exec_mask : NULL;
-
- assert(lp_check_value(bld_store->type, val));
- assert(LLVMGetTypeKind(LLVMTypeOf(dst_ptr)) == LLVMPointerTypeKind);
- assert(LLVMGetElementType(LLVMTypeOf(dst_ptr)) == LLVMTypeOf(val) ||
- LLVMGetTypeKind(LLVMGetElementType(LLVMTypeOf(dst_ptr))) == LLVMArrayTypeKind);
-
- if (exec_mask) {
- LLVMValueRef res, dst;
-
- dst = LLVMBuildLoad(builder, dst_ptr, "");
- res = lp_build_select(bld_store, exec_mask, val, dst);
- LLVMBuildStore(builder, res, dst_ptr);
- } else
- LLVMBuildStore(builder, val, dst_ptr);
-}
-
static void lp_exec_mask_call(struct lp_exec_mask *mask,
int func,
int *pc)
@@ -4107,7 +3697,7 @@ brk_emit(
{
struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
- lp_exec_break(&bld->exec_mask, bld_base);
+ lp_exec_tgsi_break(&bld->exec_mask, bld_base);
}
static void
@@ -4191,7 +3781,7 @@ bgnloop_emit(
{
struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
- lp_exec_bgnloop(&bld->exec_mask);
+ lp_exec_bgnloop(&bld->exec_mask, true);
}
static void
diff --git a/src/gallium/auxiliary/meson.build b/src/gallium/auxiliary/meson.build
index 9ebfcea8624..27cd8207e14 100644
--- a/src/gallium/auxiliary/meson.build
+++ b/src/gallium/auxiliary/meson.build
@@ -381,6 +381,8 @@ if with_llvm
'gallivm/lp_bld_init.h',
'gallivm/lp_bld_intr.c',
'gallivm/lp_bld_intr.h',
+ 'gallivm/lp_bld_ir_common.c',
+ 'gallivm/lp_bld_ir_common.h',
'gallivm/lp_bld_limits.h',
'gallivm/lp_bld_logic.c',
'gallivm/lp_bld_logic.h',