diff options
author | Samuel Pitoiset <[email protected]> | 2018-10-29 16:16:46 +0100 |
---|---|---|
committer | Samuel Pitoiset <[email protected]> | 2019-01-14 17:59:49 +0100 |
commit | af2a85df743ed7b59b03aba96071cef12fce41c3 (patch) | |
tree | f495ae4346b7f881161618a9249bbab30661d6a9 /src/amd | |
parent | 5e4f9ea363a638645670abeffce08ed58c37c369 (diff) |
ac/nir: add get_cache_policy() helper and use it
Signed-off-by: Samuel Pitoiset <[email protected]>
Reviewed-by: Bas Nieuwenhuizen <[email protected]>
Diffstat (limited to 'src/amd')
-rw-r--r-- | src/amd/common/ac_nir_to_llvm.c | 38 |
1 files changed, 26 insertions, 12 deletions
diff --git a/src/amd/common/ac_nir_to_llvm.c b/src/amd/common/ac_nir_to_llvm.c index 5023b96f92d..78e803330c0 100644 --- a/src/amd/common/ac_nir_to_llvm.c +++ b/src/amd/common/ac_nir_to_llvm.c @@ -1463,6 +1463,24 @@ static LLVMValueRef extract_vector_range(struct ac_llvm_context *ctx, LLVMValueR } } +static unsigned get_cache_policy(struct ac_nir_context *ctx, + enum gl_access_qualifier access, + bool may_store_unaligned) +{ + unsigned cache_policy = 0; + + /* SI has a TC L1 bug causing corruption of 8bit/16bit stores. All + * store opcodes not aligned to a dword are affected. The only way to + * get unaligned stores is through shader images. + */ + if (((may_store_unaligned && ctx->ac.chip_class == SI) || + access & (ACCESS_COHERENT | ACCESS_VOLATILE))) { + cache_policy |= ac_glc; + } + + return cache_policy; +} + static void visit_store_ssbo(struct ac_nir_context *ctx, nir_intrinsic_instr *instr) { @@ -1471,10 +1489,8 @@ static void visit_store_ssbo(struct ac_nir_context *ctx, int elem_size_bytes = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src_data)) / 8; unsigned writemask = nir_intrinsic_write_mask(instr); enum gl_access_qualifier access = nir_intrinsic_access(instr); - LLVMValueRef glc = ctx->ac.i1false; - - if (access & (ACCESS_VOLATILE | ACCESS_COHERENT)) - glc = ctx->ac.i1true; + unsigned cache_policy = get_cache_policy(ctx, access, false); + LLVMValueRef glc = (cache_policy & ac_glc) ? ctx->ac.i1true : ctx->ac.i1false; LLVMValueRef rsrc = ctx->abi->load_ssbo(ctx->abi, get_src(ctx, instr->src[1]), true); @@ -1630,10 +1646,8 @@ static LLVMValueRef visit_load_buffer(struct ac_nir_context *ctx, int elem_size_bytes = instr->dest.ssa.bit_size / 8; int num_components = instr->num_components; enum gl_access_qualifier access = nir_intrinsic_access(instr); - LLVMValueRef glc = ctx->ac.i1false; - - if (access & (ACCESS_VOLATILE | ACCESS_COHERENT)) - glc = ctx->ac.i1true; + unsigned cache_policy = get_cache_policy(ctx, access, false); + LLVMValueRef glc = (cache_policy & ac_glc) ? ctx->ac.i1true : ctx->ac.i1false; LLVMValueRef offset = get_src(ctx, instr->src[1]); LLVMValueRef rsrc = ctx->abi->load_ssbo(ctx->abi, @@ -2371,8 +2385,8 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, glsl_sampler_type_is_array(type)); args.dmask = 15; args.attributes = AC_FUNC_ATTR_READONLY; - if (var->data.image.access & (ACCESS_VOLATILE | ACCESS_COHERENT)) - args.cache_policy |= ac_glc; + args.cache_policy = + get_cache_policy(ctx, var->data.image.access, false); res = ac_build_image_opcode(&ctx->ac, &args); } @@ -2428,8 +2442,8 @@ static void visit_image_store(struct ac_nir_context *ctx, args.dim = get_ac_image_dim(&ctx->ac, glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type)); args.dmask = 15; - if (force_glc || (var->data.image.access & (ACCESS_VOLATILE | ACCESS_COHERENT))) - args.cache_policy |= ac_glc; + args.cache_policy = + get_cache_policy(ctx, var->data.image.access, true); ac_build_image_opcode(&ctx->ac, &args); } |