summaryrefslogtreecommitdiffstats
path: root/src/amd
diff options
context:
space:
mode:
Diffstat (limited to 'src/amd')
-rw-r--r--src/amd/common/ac_nir_to_llvm.c2
-rw-r--r--src/amd/common/ac_shader_abi.h4
-rw-r--r--src/amd/vulkan/radv_nir_to_llvm.c6
3 files changed, 1 insertions, 11 deletions
diff --git a/src/amd/common/ac_nir_to_llvm.c b/src/amd/common/ac_nir_to_llvm.c
index e71c8f1fee9..d4b30e4a330 100644
--- a/src/amd/common/ac_nir_to_llvm.c
+++ b/src/amd/common/ac_nir_to_llvm.c
@@ -2503,7 +2503,7 @@ static LLVMValueRef get_image_buffer_descriptor(struct ac_nir_context *ctx,
bool write, bool atomic)
{
LLVMValueRef rsrc = get_image_descriptor(ctx, instr, AC_DESC_BUFFER, write);
- if (ctx->abi->gfx9_stride_size_workaround_for_atomic && atomic) {
+ if (ctx->ac.chip_class == GFX9 && HAVE_LLVM < 0x900 && atomic) {
LLVMValueRef elem_count = LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 2, 0), "");
LLVMValueRef stride = LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 1, 0), "");
stride = LLVMBuildLShr(ctx->ac.builder, stride, LLVMConstInt(ctx->ac.i32, 16, 0), "");
diff --git a/src/amd/common/ac_shader_abi.h b/src/amd/common/ac_shader_abi.h
index 5d4479c5242..ef628c1ff10 100644
--- a/src/amd/common/ac_shader_abi.h
+++ b/src/amd/common/ac_shader_abi.h
@@ -207,10 +207,6 @@ struct ac_shader_abi {
bool clamp_shadow_reference;
bool interp_at_sample_force_center;
- /* Whether to workaround GFX9 ignoring the stride for the buffer size if IDXEN=0
- * and LLVM optimizes an indexed load with constant index to IDXEN=0. */
- bool gfx9_stride_size_workaround_for_atomic;
-
/* Whether bounds checks are required */
bool robust_buffer_access;
};
diff --git a/src/amd/vulkan/radv_nir_to_llvm.c b/src/amd/vulkan/radv_nir_to_llvm.c
index c594f051503..d58cc9613f8 100644
--- a/src/amd/vulkan/radv_nir_to_llvm.c
+++ b/src/amd/vulkan/radv_nir_to_llvm.c
@@ -4362,12 +4362,6 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
ctx.abi.clamp_shadow_reference = false;
ctx.abi.robust_buffer_access = options->robust_buffer_access;
- /* Because the new raw/struct atomic intrinsics are buggy with LLVM 8,
- * we fallback to the old intrinsics for atomic buffer image operations
- * and thus we need to apply the indexing workaround...
- */
- ctx.abi.gfx9_stride_size_workaround_for_atomic = ctx.ac.chip_class == GFX9 && HAVE_LLVM < 0x900;
-
bool is_ngg = is_pre_gs_stage(shaders[0]->info.stage) && ctx.options->key.vs_common_out.as_ngg;
if (shader_count >= 2 || is_ngg)
ac_init_exec_full_mask(&ctx.ac);