summaryrefslogtreecommitdiffstats
path: root/src/amd/common/ac_llvm_util.c
diff options
context:
space:
mode:
authorDave Airlie <[email protected]>2017-02-02 08:58:57 +1000
committerDave Airlie <[email protected]>2017-02-03 09:54:04 +1000
commita9773311f6a1b7f49dbd207c12e8081a5823c5a9 (patch)
tree06fcf139d800333a544768dbc09f4778495a4139 /src/amd/common/ac_llvm_util.c
parente198a64e3532af9b30d7c3fac4d092ecea7d2e41 (diff)
radeonsi/ac: move a bunch of load/store related things to common code.
These are all shareable with radv, so start migrating them to the common code. Reviewed-by: Bas Nieuwenhuizen <[email protected]> Reviewed-by: Nicolai Hähnle <[email protected]> Signed-off-by: Dave Airlie <[email protected]>
Diffstat (limited to 'src/amd/common/ac_llvm_util.c')
-rw-r--r--src/amd/common/ac_llvm_util.c65
1 files changed, 65 insertions, 0 deletions
diff --git a/src/amd/common/ac_llvm_util.c b/src/amd/common/ac_llvm_util.c
index 8c87a139744..43eeaac310c 100644
--- a/src/amd/common/ac_llvm_util.c
+++ b/src/amd/common/ac_llvm_util.c
@@ -160,10 +160,18 @@ ac_llvm_context_init(struct ac_llvm_context *ctx, LLVMContextRef context)
ctx->i32 = LLVMIntTypeInContext(ctx->context, 32);
ctx->f32 = LLVMFloatTypeInContext(ctx->context);
+ ctx->invariant_load_md_kind = LLVMGetMDKindIDInContext(ctx->context,
+ "invariant.load", 14);
+
ctx->fpmath_md_kind = LLVMGetMDKindIDInContext(ctx->context, "fpmath", 6);
args[0] = LLVMConstReal(ctx->f32, 2.5);
ctx->fpmath_md_2p5_ulp = LLVMMDNodeInContext(ctx->context, args, 1);
+
+ ctx->uniform_md_kind = LLVMGetMDKindIDInContext(ctx->context,
+ "amdgpu.uniform", 14);
+
+ ctx->empty_md = LLVMMDNodeInContext(ctx->context, NULL, 0);
}
#if HAVE_LLVM < 0x0400
@@ -583,3 +591,60 @@ ac_build_fs_interp_mov(struct ac_llvm_context *ctx,
return ac_emit_llvm_intrinsic(ctx, "llvm.amdgcn.interp.mov",
ctx->f32, args, 4, AC_FUNC_ATTR_READNONE);
}
+
+LLVMValueRef
+ac_build_gep0(struct ac_llvm_context *ctx,
+ LLVMValueRef base_ptr,
+ LLVMValueRef index)
+{
+ LLVMValueRef indices[2] = {
+ LLVMConstInt(ctx->i32, 0, 0),
+ index,
+ };
+ return LLVMBuildGEP(ctx->builder, base_ptr,
+ indices, 2, "");
+}
+
+void
+ac_build_indexed_store(struct ac_llvm_context *ctx,
+ LLVMValueRef base_ptr, LLVMValueRef index,
+ LLVMValueRef value)
+{
+ LLVMBuildStore(ctx->builder, value,
+ ac_build_gep0(ctx, base_ptr, index));
+}
+
+/**
+ * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
+ * It's equivalent to doing a load from &base_ptr[index].
+ *
+ * \param base_ptr Where the array starts.
+ * \param index The element index into the array.
+ * \param uniform Whether the base_ptr and index can be assumed to be
+ * dynamically uniform
+ */
+LLVMValueRef
+ac_build_indexed_load(struct ac_llvm_context *ctx,
+ LLVMValueRef base_ptr, LLVMValueRef index,
+ bool uniform)
+{
+ LLVMValueRef pointer;
+
+ pointer = ac_build_gep0(ctx, base_ptr, index);
+ if (uniform)
+ LLVMSetMetadata(pointer, ctx->uniform_md_kind, ctx->empty_md);
+ return LLVMBuildLoad(ctx->builder, pointer, "");
+}
+
+/**
+ * Do a load from &base_ptr[index], but also add a flag that it's loading
+ * a constant from a dynamically uniform index.
+ */
+LLVMValueRef
+ac_build_indexed_load_const(struct ac_llvm_context *ctx,
+ LLVMValueRef base_ptr, LLVMValueRef index)
+{
+ LLVMValueRef result = ac_build_indexed_load(ctx, base_ptr, index, true);
+ LLVMSetMetadata(result, ctx->invariant_load_md_kind, ctx->empty_md);
+ return result;
+}