diff options
author | Samuel Pitoiset <samuel.pitoiset@gmail.com> | 2019-08-01 11:18:43 +0200 |
---|---|---|
committer | Samuel Pitoiset <samuel.pitoiset@gmail.com> | 2019-08-23 08:12:34 +0200 |
commit | 1fd60db4a1fca96ccf9293d0c03158baf7d215a5 (patch) | |
tree | 9095e21da91d6da09b423a4c29ca9ed0f32609d5 | |
parent | 3e03a3fc5315b488468b28aa40a7e9416f506520 (diff) |
ac,radv,radeonsi: remove LLVM 7 support
Now that LLVM 9 will be released soon, we will only support
LLVM 8, 9 and master (10).
Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Reviewed-by: Marek Olšák <marek.olsak@amd.com>
-rw-r--r-- | meson.build | 2 | ||||
-rw-r--r-- | src/amd/common/ac_gpu_info.c | 3 | ||||
-rw-r--r-- | src/amd/common/ac_llvm_build.c | 305 | ||||
-rw-r--r-- | src/amd/common/ac_llvm_build.h | 10 | ||||
-rw-r--r-- | src/amd/common/ac_llvm_util.c | 5 | ||||
-rw-r--r-- | src/amd/common/ac_nir_to_llvm.c | 24 | ||||
-rw-r--r-- | src/amd/common/ac_shader_abi.h | 1 | ||||
-rw-r--r-- | src/amd/vulkan/radv_device.c | 2 | ||||
-rw-r--r-- | src/amd/vulkan/radv_extensions.py | 2 | ||||
-rw-r--r-- | src/amd/vulkan/radv_nir_to_llvm.c | 1 | ||||
-rw-r--r-- | src/amd/vulkan/radv_shader.c | 11 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_shader.c | 5 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_shader_tgsi_mem.c | 7 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_state.c | 9 |
14 files changed, 66 insertions, 321 deletions
diff --git a/meson.build b/meson.build index bf61511d292..501957e35e1 100644 --- a/meson.build +++ b/meson.build @@ -1243,7 +1243,7 @@ if with_gallium_opencl endif if with_amd_vk or with_gallium_radeonsi - _llvm_version = '>= 7.0.0' + _llvm_version = '>= 8.0.0' elif with_gallium_swr _llvm_version = '>= 6.0.0' elif with_gallium_opencl or with_gallium_r600 diff --git a/src/amd/common/ac_gpu_info.c b/src/amd/common/ac_gpu_info.c index 9ec7359ed79..b02f1471463 100644 --- a/src/amd/common/ac_gpu_info.c +++ b/src/amd/common/ac_gpu_info.c @@ -487,8 +487,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, } info->has_gds_ordered_append = info->chip_class >= GFX7 && - info->drm_minor >= 29 && - HAVE_LLVM >= 0x0800; + info->drm_minor >= 29; return true; } diff --git a/src/amd/common/ac_llvm_build.c b/src/amd/common/ac_llvm_build.c index 5abae00d8f6..1a891cc0d1f 100644 --- a/src/amd/common/ac_llvm_build.c +++ b/src/amd/common/ac_llvm_build.c @@ -492,7 +492,6 @@ LLVMValueRef ac_get_i1_sgpr_mask(struct ac_llvm_context *ctx, LLVMConstInt(ctx->i32, LLVMIntNE, 0), }; - assert(HAVE_LLVM >= 0x0800); return ac_build_intrinsic(ctx, name, ctx->i64, args, 3, AC_FUNC_ATTR_NOUNWIND | AC_FUNC_ATTR_READNONE | @@ -1151,41 +1150,6 @@ static unsigned get_load_cache_policy(struct ac_llvm_context *ctx, } static void -ac_build_llvm7_buffer_store_common(struct ac_llvm_context *ctx, - LLVMValueRef rsrc, - LLVMValueRef data, - LLVMValueRef vindex, - LLVMValueRef voffset, - unsigned num_channels, - unsigned cache_policy, - bool use_format) -{ - LLVMValueRef args[] = { - data, - LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""), - vindex ? vindex : ctx->i32_0, - voffset, - LLVMConstInt(ctx->i1, !!(cache_policy & ac_glc), 0), - LLVMConstInt(ctx->i1, !!(cache_policy & ac_slc), 0) - }; - unsigned func = CLAMP(num_channels, 1, 3) - 1; - - const char *type_names[] = {"f32", "v2f32", "v4f32"}; - char name[256]; - - if (use_format) { - snprintf(name, sizeof(name), "llvm.amdgcn.buffer.store.format.%s", - type_names[func]); - } else { - snprintf(name, sizeof(name), "llvm.amdgcn.buffer.store.%s", - type_names[func]); - } - - ac_build_intrinsic(ctx, name, ctx->voidt, args, ARRAY_SIZE(args), - AC_FUNC_ATTR_INACCESSIBLE_MEM_ONLY); -} - -static void ac_build_llvm8_buffer_store_common(struct ac_llvm_context *ctx, LLVMValueRef rsrc, LLVMValueRef data, @@ -1235,16 +1199,10 @@ ac_build_buffer_store_format(struct ac_llvm_context *ctx, unsigned num_channels, unsigned cache_policy) { - if (HAVE_LLVM >= 0x800) { - ac_build_llvm8_buffer_store_common(ctx, rsrc, data, vindex, - voffset, NULL, num_channels, - ctx->f32, cache_policy, - true, true); - } else { - ac_build_llvm7_buffer_store_common(ctx, rsrc, data, vindex, voffset, - num_channels, cache_policy, - true); - } + ac_build_llvm8_buffer_store_common(ctx, rsrc, data, vindex, + voffset, NULL, num_channels, + ctx->f32, cache_policy, + true, true); } /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4. @@ -1294,25 +1252,14 @@ ac_build_buffer_store_dword(struct ac_llvm_context *ctx, offset = LLVMBuildAdd(ctx->builder, offset, LLVMConstInt(ctx->i32, inst_offset, 0), ""); - if (HAVE_LLVM >= 0x800) { - ac_build_llvm8_buffer_store_common(ctx, rsrc, - ac_to_float(ctx, vdata), - ctx->i32_0, - voffset, offset, - num_channels, - ctx->f32, - cache_policy, - false, false); - } else { - if (voffset) - offset = LLVMBuildAdd(ctx->builder, offset, voffset, ""); - - ac_build_llvm7_buffer_store_common(ctx, rsrc, - ac_to_float(ctx, vdata), - ctx->i32_0, offset, - num_channels, cache_policy, - false); - } + ac_build_llvm8_buffer_store_common(ctx, rsrc, + ac_to_float(ctx, vdata), + ctx->i32_0, + voffset, offset, + num_channels, + ctx->f32, + cache_policy, + false, false); return; } @@ -1331,42 +1278,6 @@ ac_build_buffer_store_dword(struct ac_llvm_context *ctx, } static LLVMValueRef -ac_build_llvm7_buffer_load_common(struct ac_llvm_context *ctx, - LLVMValueRef rsrc, - LLVMValueRef vindex, - LLVMValueRef voffset, - unsigned num_channels, - unsigned cache_policy, - bool can_speculate, - bool use_format) -{ - LLVMValueRef args[] = { - LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""), - vindex ? vindex : ctx->i32_0, - voffset, - LLVMConstInt(ctx->i1, !!(cache_policy & ac_glc), 0), - LLVMConstInt(ctx->i1, !!(cache_policy & ac_slc), 0) - }; - unsigned func = CLAMP(num_channels, 1, 3) - 1; - - LLVMTypeRef types[] = {ctx->f32, ctx->v2f32, ctx->v4f32}; - const char *type_names[] = {"f32", "v2f32", "v4f32"}; - char name[256]; - - if (use_format) { - snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.format.%s", - type_names[func]); - } else { - snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.%s", - type_names[func]); - } - - return ac_build_intrinsic(ctx, name, types[func], args, - ARRAY_SIZE(args), - ac_get_load_intr_attribs(can_speculate)); -} - -static LLVMValueRef ac_build_llvm8_buffer_load_common(struct ac_llvm_context *ctx, LLVMValueRef rsrc, LLVMValueRef vindex, @@ -1425,7 +1336,7 @@ ac_build_buffer_load(struct ac_llvm_context *ctx, offset = LLVMBuildAdd(ctx->builder, offset, soffset, ""); if (allow_smem && !(cache_policy & ac_slc) && - (!(cache_policy & ac_glc) || (HAVE_LLVM >= 0x0800 && ctx->chip_class >= GFX8))) { + (!(cache_policy & ac_glc) || ctx->chip_class >= GFX8)) { assert(vindex == NULL); LLVMValueRef result[8]; @@ -1435,19 +1346,15 @@ ac_build_buffer_load(struct ac_llvm_context *ctx, offset = LLVMBuildAdd(ctx->builder, offset, LLVMConstInt(ctx->i32, 4, 0), ""); } - const char *intrname = - HAVE_LLVM >= 0x0800 ? "llvm.amdgcn.s.buffer.load.f32" - : "llvm.SI.load.const.v4i32"; - unsigned num_args = HAVE_LLVM >= 0x0800 ? 3 : 2; LLVMValueRef args[3] = { rsrc, offset, LLVMConstInt(ctx->i32, get_load_cache_policy(ctx, cache_policy), 0), }; - result[i] = ac_build_intrinsic(ctx, intrname, - ctx->f32, args, num_args, - AC_FUNC_ATTR_READNONE | - (HAVE_LLVM < 0x0800 ? AC_FUNC_ATTR_LEGACY : 0)); + result[i] = ac_build_intrinsic(ctx, + "llvm.amdgcn.s.buffer.load.f32", + ctx->f32, args, 3, + AC_FUNC_ATTR_READNONE); } if (num_channels == 1) return result[0]; @@ -1457,18 +1364,11 @@ ac_build_buffer_load(struct ac_llvm_context *ctx, return ac_build_gather_values(ctx, result, num_channels); } - if (HAVE_LLVM >= 0x0800) { - return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex, - offset, ctx->i32_0, - num_channels, ctx->f32, - cache_policy, - can_speculate, false, - false); - } - - return ac_build_llvm7_buffer_load_common(ctx, rsrc, vindex, offset, - num_channels, cache_policy, - can_speculate, false); + return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex, + offset, ctx->i32_0, + num_channels, ctx->f32, + cache_policy, + can_speculate, false, false); } LLVMValueRef ac_build_buffer_load_format(struct ac_llvm_context *ctx, @@ -1479,44 +1379,9 @@ LLVMValueRef ac_build_buffer_load_format(struct ac_llvm_context *ctx, unsigned cache_policy, bool can_speculate) { - if (HAVE_LLVM >= 0x800) { - return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex, voffset, ctx->i32_0, - num_channels, ctx->f32, - cache_policy, can_speculate, true, true); - } - return ac_build_llvm7_buffer_load_common(ctx, rsrc, vindex, voffset, - num_channels, cache_policy, - can_speculate, true); -} - -LLVMValueRef ac_build_buffer_load_format_gfx9_safe(struct ac_llvm_context *ctx, - LLVMValueRef rsrc, - LLVMValueRef vindex, - LLVMValueRef voffset, - unsigned num_channels, - unsigned cache_policy, - bool can_speculate) -{ - if (HAVE_LLVM >= 0x800) { - return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex, voffset, ctx->i32_0, - num_channels, ctx->f32, - cache_policy, can_speculate, true, true); - } - - LLVMValueRef elem_count = LLVMBuildExtractElement(ctx->builder, rsrc, LLVMConstInt(ctx->i32, 2, 0), ""); - LLVMValueRef stride = LLVMBuildExtractElement(ctx->builder, rsrc, ctx->i32_1, ""); - stride = LLVMBuildLShr(ctx->builder, stride, LLVMConstInt(ctx->i32, 16, 0), ""); - - LLVMValueRef new_elem_count = LLVMBuildSelect(ctx->builder, - LLVMBuildICmp(ctx->builder, LLVMIntUGT, elem_count, stride, ""), - elem_count, stride, ""); - - LLVMValueRef new_rsrc = LLVMBuildInsertElement(ctx->builder, rsrc, new_elem_count, - LLVMConstInt(ctx->i32, 2, 0), ""); - - return ac_build_llvm7_buffer_load_common(ctx, new_rsrc, vindex, voffset, - num_channels, cache_policy, - can_speculate, true); + return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex, voffset, ctx->i32_0, + num_channels, ctx->f32, + cache_policy, can_speculate, true, true); } /// Translate a (dfmt, nfmt) pair into a chip-appropriate combined format @@ -1615,36 +1480,12 @@ ac_build_tbuffer_load(struct ac_llvm_context *ctx, bool can_speculate, bool structurized) /* only matters for LLVM 8+ */ { - if (HAVE_LLVM >= 0x800) { - voffset = LLVMBuildAdd(ctx->builder, voffset, immoffset, ""); - - return ac_build_llvm8_tbuffer_load(ctx, rsrc, vindex, voffset, - soffset, num_channels, - dfmt, nfmt, cache_policy, - can_speculate, structurized); - } + voffset = LLVMBuildAdd(ctx->builder, voffset, immoffset, ""); - LLVMValueRef args[] = { - rsrc, - vindex ? vindex : ctx->i32_0, - voffset, - soffset, - immoffset, - LLVMConstInt(ctx->i32, dfmt, false), - LLVMConstInt(ctx->i32, nfmt, false), - LLVMConstInt(ctx->i1, !!(cache_policy & ac_glc), false), - LLVMConstInt(ctx->i1, !!(cache_policy & ac_slc), false), - }; - unsigned func = CLAMP(num_channels, 1, 3) - 1; - LLVMTypeRef types[] = {ctx->i32, ctx->v2i32, ctx->v4i32}; - const char *type_names[] = {"i32", "v2i32", "v4i32"}; - char name[256]; - - snprintf(name, sizeof(name), "llvm.amdgcn.tbuffer.load.%s", - type_names[func]); - - return ac_build_intrinsic(ctx, name, types[func], args, 9, - ac_get_load_intr_attribs(can_speculate)); + return ac_build_llvm8_tbuffer_load(ctx, rsrc, vindex, voffset, + soffset, num_channels, + dfmt, nfmt, cache_policy, + can_speculate, structurized); } LLVMValueRef @@ -1872,20 +1713,13 @@ ac_build_opencoded_load_format(struct ac_llvm_context *ctx, for (unsigned i = 0; i < load_num_channels; ++i) { tmp = LLVMBuildAdd(ctx->builder, soffset, LLVMConstInt(ctx->i32, i << load_log_size, false), ""); - if (HAVE_LLVM >= 0x0800) { - LLVMTypeRef channel_type = load_log_size == 0 ? ctx->i8 : - load_log_size == 1 ? ctx->i16 : ctx->i32; - unsigned num_channels = 1 << (MAX2(load_log_size, 2) - 2); - loads[i] = ac_build_llvm8_buffer_load_common( - ctx, rsrc, vindex, voffset, tmp, - num_channels, channel_type, cache_policy, - can_speculate, false, true); - } else { - tmp = LLVMBuildAdd(ctx->builder, voffset, tmp, ""); - loads[i] = ac_build_llvm7_buffer_load_common( - ctx, rsrc, vindex, tmp, - 1 << (load_log_size - 2), cache_policy, can_speculate, false); - } + LLVMTypeRef channel_type = load_log_size == 0 ? ctx->i8 : + load_log_size == 1 ? ctx->i16 : ctx->i32; + unsigned num_channels = 1 << (MAX2(load_log_size, 2) - 2); + loads[i] = ac_build_llvm8_buffer_load_common( + ctx, rsrc, vindex, voffset, tmp, + num_channels, channel_type, cache_policy, + can_speculate, false, true); if (load_log_size >= 2) loads[i] = ac_to_integer(ctx, loads[i]); } @@ -2108,37 +1942,12 @@ ac_build_tbuffer_store(struct ac_llvm_context *ctx, unsigned cache_policy, bool structurized) /* only matters for LLVM 8+ */ { - if (HAVE_LLVM >= 0x800) { - voffset = LLVMBuildAdd(ctx->builder, - voffset ? voffset : ctx->i32_0, - immoffset, ""); + voffset = LLVMBuildAdd(ctx->builder, voffset ? voffset : ctx->i32_0, + immoffset, ""); - ac_build_llvm8_tbuffer_store(ctx, rsrc, vdata, vindex, voffset, - soffset, num_channels, dfmt, nfmt, - cache_policy, structurized); - } else { - LLVMValueRef params[] = { - vdata, - rsrc, - vindex ? vindex : ctx->i32_0, - voffset ? voffset : ctx->i32_0, - soffset ? soffset : ctx->i32_0, - immoffset, - LLVMConstInt(ctx->i32, dfmt, false), - LLVMConstInt(ctx->i32, nfmt, false), - LLVMConstInt(ctx->i1, !!(cache_policy & ac_glc), false), - LLVMConstInt(ctx->i1, !!(cache_policy & ac_slc), false), - }; - unsigned func = CLAMP(num_channels, 1, 3) - 1; - const char *type_names[] = {"i32", "v2i32", "v4i32"}; - char name[256]; - - snprintf(name, sizeof(name), "llvm.amdgcn.tbuffer.store.%s", - type_names[func]); - - ac_build_intrinsic(ctx, name, ctx->voidt, params, 10, - AC_FUNC_ATTR_INACCESSIBLE_MEM_ONLY); - } + ac_build_llvm8_tbuffer_store(ctx, rsrc, vdata, vindex, voffset, + soffset, num_channels, dfmt, nfmt, + cache_policy, structurized); } void @@ -2879,22 +2688,10 @@ LLVMValueRef ac_build_bfe(struct ac_llvm_context *ctx, LLVMValueRef input, width, }; - LLVMValueRef result = ac_build_intrinsic(ctx, - is_signed ? "llvm.amdgcn.sbfe.i32" : - "llvm.amdgcn.ubfe.i32", - ctx->i32, args, 3, - AC_FUNC_ATTR_READNONE); - - if (HAVE_LLVM < 0x0800) { - /* FIXME: LLVM 7+ returns incorrect result when count is 0. - * https://bugs.freedesktop.org/show_bug.cgi?id=107276 - */ - LLVMValueRef zero = ctx->i32_0; - LLVMValueRef icond = LLVMBuildICmp(ctx->builder, LLVMIntEQ, width, zero, ""); - result = LLVMBuildSelect(ctx->builder, icond, zero, result, ""); - } + return ac_build_intrinsic(ctx, is_signed ? "llvm.amdgcn.sbfe.i32" : + "llvm.amdgcn.ubfe.i32", + ctx->i32, args, 3, AC_FUNC_ATTR_READNONE); - return result; } LLVMValueRef ac_build_imad(struct ac_llvm_context *ctx, LLVMValueRef s0, @@ -3869,15 +3666,9 @@ ac_build_readlane(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef la LLVMValueRef ac_build_writelane(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef value, LLVMValueRef lane) { - if (HAVE_LLVM >= 0x0800) { - return ac_build_intrinsic(ctx, "llvm.amdgcn.writelane", ctx->i32, - (LLVMValueRef []) {value, lane, src}, 3, - AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT); - } - - LLVMValueRef pred = LLVMBuildICmp(ctx->builder, LLVMIntEQ, lane, - ac_get_thread_id(ctx), ""); - return LLVMBuildSelect(ctx->builder, pred, value, src, ""); + return ac_build_intrinsic(ctx, "llvm.amdgcn.writelane", ctx->i32, + (LLVMValueRef []) {value, lane, src}, 3, + AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT); } LLVMValueRef diff --git a/src/amd/common/ac_llvm_build.h b/src/amd/common/ac_llvm_build.h index 103c3b484dd..a2e4ec6194d 100644 --- a/src/amd/common/ac_llvm_build.h +++ b/src/amd/common/ac_llvm_build.h @@ -324,16 +324,6 @@ LLVMValueRef ac_build_buffer_load_format(struct ac_llvm_context *ctx, unsigned cache_policy, bool can_speculate); -/* load_format that handles the stride & element count better if idxen is - * disabled by LLVM. */ -LLVMValueRef ac_build_buffer_load_format_gfx9_safe(struct ac_llvm_context *ctx, - LLVMValueRef rsrc, - LLVMValueRef vindex, - LLVMValueRef voffset, - unsigned num_channels, - unsigned cache_policy, - bool can_speculate); - LLVMValueRef ac_build_tbuffer_load_short(struct ac_llvm_context *ctx, LLVMValueRef rsrc, diff --git a/src/amd/common/ac_llvm_util.c b/src/amd/common/ac_llvm_util.c index a201f2d1fc5..7793926bf49 100644 --- a/src/amd/common/ac_llvm_util.c +++ b/src/amd/common/ac_llvm_util.c @@ -133,7 +133,7 @@ const char *ac_get_llvm_processor_name(enum radeon_family family) return "gfx906"; case CHIP_RAVEN2: case CHIP_RENOIR: - return HAVE_LLVM >= 0x0800 ? "gfx909" : "gfx902"; + return "gfx909"; case CHIP_ARCTURUS: return "gfx908"; case CHIP_NAVI10: @@ -158,8 +158,7 @@ static LLVMTargetMachineRef ac_create_target_machine(enum radeon_family family, LLVMTargetRef target = ac_get_llvm_target(triple); snprintf(features, sizeof(features), - "+DumpCode,-fp32-denormals,+fp64-denormals%s%s%s%s%s%s%s", - HAVE_LLVM >= 0x0800 ? "" : ",+vgpr-spilling", + "+DumpCode,-fp32-denormals,+fp64-denormals%s%s%s%s%s%s", family >= CHIP_NAVI10 && !(tm_options & AC_TM_WAVE32) ? ",+wavefrontsize64,-wavefrontsize32" : "", tm_options & AC_TM_SISCHED ? ",+si-scheduler" : "", diff --git a/src/amd/common/ac_nir_to_llvm.c b/src/amd/common/ac_nir_to_llvm.c index 33a95bc71ae..0b06290f58a 100644 --- a/src/amd/common/ac_nir_to_llvm.c +++ b/src/amd/common/ac_nir_to_llvm.c @@ -1308,21 +1308,12 @@ static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) { unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa); - if (ctx->abi->gfx9_stride_size_workaround) { - return ac_build_buffer_load_format_gfx9_safe(&ctx->ac, - args->resource, - args->coords[0], - ctx->ac.i32_0, - util_last_bit(mask), - 0, true); - } else { - return ac_build_buffer_load_format(&ctx->ac, - args->resource, - args->coords[0], - ctx->ac.i32_0, - util_last_bit(mask), - 0, true); - } + return ac_build_buffer_load_format(&ctx->ac, + args->resource, + args->coords[0], + ctx->ac.i32_0, + util_last_bit(mask), + 0, true); } args->opcode = ac_image_sample; @@ -2477,8 +2468,7 @@ static LLVMValueRef get_image_buffer_descriptor(struct ac_nir_context *ctx, bool write, bool atomic) { LLVMValueRef rsrc = get_image_descriptor(ctx, instr, AC_DESC_BUFFER, write); - if (ctx->abi->gfx9_stride_size_workaround || - (ctx->abi->gfx9_stride_size_workaround_for_atomic && atomic)) { + if (ctx->abi->gfx9_stride_size_workaround_for_atomic && atomic) { LLVMValueRef elem_count = LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 2, 0), ""); LLVMValueRef stride = LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 1, 0), ""); stride = LLVMBuildLShr(ctx->ac.builder, stride, LLVMConstInt(ctx->ac.i32, 16, 0), ""); diff --git a/src/amd/common/ac_shader_abi.h b/src/amd/common/ac_shader_abi.h index 7dd580bb195..5d4479c5242 100644 --- a/src/amd/common/ac_shader_abi.h +++ b/src/amd/common/ac_shader_abi.h @@ -209,7 +209,6 @@ struct ac_shader_abi { /* Whether to workaround GFX9 ignoring the stride for the buffer size if IDXEN=0 * and LLVM optimizes an indexed load with constant index to IDXEN=0. */ - bool gfx9_stride_size_workaround; bool gfx9_stride_size_workaround_for_atomic; /* Whether bounds checks are required */ diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index 2d5b006c1cf..39d87b5b575 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -979,7 +979,7 @@ void radv_GetPhysicalDeviceFeatures2( case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR: { VkPhysicalDeviceFloat16Int8FeaturesKHR *features = (VkPhysicalDeviceFloat16Int8FeaturesKHR*)ext; - features->shaderFloat16 = pdevice->rad_info.chip_class >= GFX8 && HAVE_LLVM >= 0x0800; + features->shaderFloat16 = pdevice->rad_info.chip_class >= GFX8; features->shaderInt8 = true; break; } diff --git a/src/amd/vulkan/radv_extensions.py b/src/amd/vulkan/radv_extensions.py index b28d74f5746..b349ab74d58 100644 --- a/src/amd/vulkan/radv_extensions.py +++ b/src/amd/vulkan/radv_extensions.py @@ -138,7 +138,7 @@ EXTENSIONS = [ Extension('VK_AMD_buffer_marker', 1, True), Extension('VK_AMD_draw_indirect_count', 1, True), Extension('VK_AMD_gcn_shader', 1, True), - Extension('VK_AMD_gpu_shader_half_float', 1, 'device->rad_info.chip_class >= GFX9 && HAVE_LLVM >= 0x0800'), + Extension('VK_AMD_gpu_shader_half_float', 1, 'device->rad_info.chip_class >= GFX9'), Extension('VK_AMD_gpu_shader_int16', 1, 'device->rad_info.chip_class >= GFX9'), Extension('VK_AMD_rasterization_order', 1, 'device->has_out_of_order_rast'), Extension('VK_AMD_shader_ballot', 1, 'device->use_shader_ballot'), diff --git a/src/amd/vulkan/radv_nir_to_llvm.c b/src/amd/vulkan/radv_nir_to_llvm.c index c7c837d16f0..c594f051503 100644 --- a/src/amd/vulkan/radv_nir_to_llvm.c +++ b/src/amd/vulkan/radv_nir_to_llvm.c @@ -4360,7 +4360,6 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm, ctx.abi.load_sampler_desc = radv_get_sampler_desc; ctx.abi.load_resource = radv_load_resource; ctx.abi.clamp_shadow_reference = false; - ctx.abi.gfx9_stride_size_workaround = ctx.ac.chip_class == GFX9 && HAVE_LLVM < 0x800; ctx.abi.robust_buffer_access = options->robust_buffer_access; /* Because the new raw/struct atomic intrinsics are buggy with LLVM 8, diff --git a/src/amd/vulkan/radv_shader.c b/src/amd/vulkan/radv_shader.c index 9397c5d6f40..f7ef20e89b0 100644 --- a/src/amd/vulkan/radv_shader.c +++ b/src/amd/vulkan/radv_shader.c @@ -942,15 +942,8 @@ static void radv_init_llvm_target() * * "mesa" is the prefix for error messages. */ - if (HAVE_LLVM >= 0x0800) { - const char *argv[2] = { "mesa", "-simplifycfg-sink-common=false" }; - LLVMParseCommandLineOptions(2, argv, NULL); - - } else { - const char *argv[3] = { "mesa", "-simplifycfg-sink-common=false", - "-amdgpu-skip-threshold=1" }; - LLVMParseCommandLineOptions(3, argv, NULL); - } + const char *argv[2] = { "mesa", "-simplifycfg-sink-common=false" }; + LLVMParseCommandLineOptions(2, argv, NULL); } static once_flag radv_init_llvm_target_once_flag = ONCE_FLAG_INIT; diff --git a/src/gallium/drivers/radeonsi/si_shader.c b/src/gallium/drivers/radeonsi/si_shader.c index 64d7ec08348..6f1ecaf44ed 100644 --- a/src/gallium/drivers/radeonsi/si_shader.c +++ b/src/gallium/drivers/radeonsi/si_shader.c @@ -5305,11 +5305,6 @@ static bool si_get_external_symbol(void *data, const char *name, uint64_t *value /* Enable scratch coalescing. */ *value = S_008F04_BASE_ADDRESS_HI(*scratch_va >> 32) | S_008F04_SWIZZLE_ENABLE(1); - if (HAVE_LLVM < 0x0800) { - /* Old LLVM created an R_ABS32_HI relocation for - * this symbol. */ - *value <<= 32; - } return true; } diff --git a/src/gallium/drivers/radeonsi/si_shader_tgsi_mem.c b/src/gallium/drivers/radeonsi/si_shader_tgsi_mem.c index f79ed2c57e1..76156817685 100644 --- a/src/gallium/drivers/radeonsi/si_shader_tgsi_mem.c +++ b/src/gallium/drivers/radeonsi/si_shader_tgsi_mem.c @@ -840,8 +840,7 @@ static void atomic_emit( vindex = args.coords[0]; /* for buffers only */ } - if (HAVE_LLVM >= 0x0800 && - inst->Src[0].Register.File != TGSI_FILE_BUFFER && + if (inst->Src[0].Register.File != TGSI_FILE_BUFFER && inst->Memory.Texture == TGSI_TEXTURE_BUFFER) { LLVMValueRef buf_args[7]; unsigned num_args = 0; @@ -866,9 +865,7 @@ static void atomic_emit( return; } - if (inst->Src[0].Register.File == TGSI_FILE_BUFFER || - (HAVE_LLVM < 0x0800 && - inst->Memory.Texture == TGSI_TEXTURE_BUFFER)) { + if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) { LLVMValueRef buf_args[7]; unsigned num_args = 0; diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c index 12636dbeab7..05844abe360 100644 --- a/src/gallium/drivers/radeonsi/si_state.c +++ b/src/gallium/drivers/radeonsi/si_state.c @@ -3801,14 +3801,7 @@ si_make_buffer_descriptor(struct si_screen *screen, struct si_resource *buf, * - For VMEM and inst.IDXEN == 0 or STRIDE == 0, it's in byte units. * - For VMEM and inst.IDXEN == 1 and STRIDE != 0, it's in units of STRIDE. */ - if (screen->info.chip_class == GFX9 && HAVE_LLVM < 0x0800) - /* When vindex == 0, LLVM < 8.0 sets IDXEN = 0, thus changing units - * from STRIDE to bytes. This works around it by setting - * NUM_RECORDS to at least the size of one element, so that - * the first element is readable when IDXEN == 0. - */ - num_records = num_records ? MAX2(num_records, stride) : 0; - else if (screen->info.chip_class == GFX8) + if (screen->info.chip_class == GFX8) num_records *= stride; state[4] = 0; |