summaryrefslogtreecommitdiffstats
path: root/src/gallium/auxiliary
diff options
context:
space:
mode:
authorRoland Scheidegger <[email protected]>2014-06-18 01:34:49 +0200
committerRoland Scheidegger <[email protected]>2014-06-18 19:52:57 +0200
commit56335b44417bc3d49625f9637e2b95457f522ad2 (patch)
treea4910f758a54d84d3548b97108114af08c412b3d /src/gallium/auxiliary
parent7928b946adbcbbb835c0080967bbb538f6bd35dc (diff)
gallivm: fix SCALED -> NORM conversions
Such conversions (which are most likely rather pointless in practice) were resulting in shifts with negative shift counts and shifts with counts the same as the bit width. This was always undefined in llvm, the code generated was rather horrendous but happened to work. So make sure such shifts are filtered out and replaced with something that works (the generated code is still just as horrendous as before). This fixes lp_test_format, https://bugs.freedesktop.org/show_bug.cgi?id=73846. v2: prettify by using build context shift helpers. Reviewed-by: Jose Fonseca <[email protected]>
Diffstat (limited to 'src/gallium/auxiliary')
-rw-r--r--src/gallium/auxiliary/gallivm/lp_bld_conv.c39
1 files changed, 23 insertions, 16 deletions
diff --git a/src/gallium/auxiliary/gallivm/lp_bld_conv.c b/src/gallium/auxiliary/gallivm/lp_bld_conv.c
index d3bf62167b3..14244470c90 100644
--- a/src/gallium/auxiliary/gallivm/lp_bld_conv.c
+++ b/src/gallium/auxiliary/gallivm/lp_bld_conv.c
@@ -792,29 +792,23 @@ lp_build_conv(struct gallivm_state *gallivm,
unsigned dst_shift = lp_const_shift(dst_type);
unsigned src_offset = lp_const_offset(src_type);
unsigned dst_offset = lp_const_offset(dst_type);
+ struct lp_build_context bld;
+ lp_build_context_init(&bld, gallivm, tmp_type);
/* Compensate for different offsets */
- if (dst_offset > src_offset && src_type.width > dst_type.width) {
+ /* sscaled -> unorm and similar would cause negative shift count, skip */
+ if (dst_offset > src_offset && src_type.width > dst_type.width && src_shift > 0) {
for (i = 0; i < num_tmps; ++i) {
LLVMValueRef shifted;
- LLVMValueRef shift = lp_build_const_int_vec(gallivm, tmp_type, src_shift - 1);
- if(src_type.sign)
- shifted = LLVMBuildAShr(builder, tmp[i], shift, "");
- else
- shifted = LLVMBuildLShr(builder, tmp[i], shift, "");
+ shifted = lp_build_shr_imm(&bld, tmp[i], src_shift - 1);
tmp[i] = LLVMBuildSub(builder, tmp[i], shifted, "");
}
}
if(src_shift > dst_shift) {
- LLVMValueRef shift = lp_build_const_int_vec(gallivm, tmp_type,
- src_shift - dst_shift);
for(i = 0; i < num_tmps; ++i)
- if(src_type.sign)
- tmp[i] = LLVMBuildAShr(builder, tmp[i], shift, "");
- else
- tmp[i] = LLVMBuildLShr(builder, tmp[i], shift, "");
+ tmp[i] = lp_build_shr_imm(&bld, tmp[i], src_shift - dst_shift);
}
}
@@ -900,14 +894,27 @@ lp_build_conv(struct gallivm_state *gallivm,
unsigned dst_shift = lp_const_shift(dst_type);
unsigned src_offset = lp_const_offset(src_type);
unsigned dst_offset = lp_const_offset(dst_type);
+ struct lp_build_context bld;
+ lp_build_context_init(&bld, gallivm, tmp_type);
if (src_shift < dst_shift) {
LLVMValueRef pre_shift[LP_MAX_VECTOR_LENGTH];
- LLVMValueRef shift = lp_build_const_int_vec(gallivm, tmp_type, dst_shift - src_shift);
- for (i = 0; i < num_tmps; ++i) {
- pre_shift[i] = tmp[i];
- tmp[i] = LLVMBuildShl(builder, tmp[i], shift, "");
+ if (dst_shift - src_shift < dst_type.width) {
+ for (i = 0; i < num_tmps; ++i) {
+ pre_shift[i] = tmp[i];
+ tmp[i] = lp_build_shl_imm(&bld, tmp[i], dst_shift - src_shift);
+ }
+ }
+ else {
+ /*
+ * This happens for things like sscaled -> unorm conversions. Shift
+ * counts equal to bit width cause undefined results, so hack around it.
+ */
+ for (i = 0; i < num_tmps; ++i) {
+ pre_shift[i] = tmp[i];
+ tmp[i] = lp_build_zero(gallivm, dst_type);
+ }
}
/* Compensate for different offsets */