summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers
diff options
context:
space:
mode:
authorRoland Scheidegger <[email protected]>2013-01-12 17:20:13 -0800
committerRoland Scheidegger <[email protected]>2013-01-18 09:14:52 -0800
commitf2a87a1f5bcd78f381409345740ed37273453c0d (patch)
tree9cac7e9b786fdbd3d0a6d6acf3db8b6ae893cab6 /src/gallium/drivers
parentdc6bc3b642dc2dc04f583dd92d97aa1da9b90332 (diff)
llvmpipe: more fixes for integer color buffers
Cast back the fake floats to ints, and make sure we don't try to do scaling in format conversion (which only makes sense with normalized values). Also need to disable blending and alpha test (as per spec) for such buffers. This makes fbo-blending from the piglit ext_texture_integer tests work for most formats (some crash, and the luminance and intensity variants have the GB or GBA channels respectively wrong). Reviewed-by: Brian Paul <[email protected]> Reviewed-by: José Fonseca <[email protected]>
Diffstat (limited to 'src/gallium/drivers')
-rw-r--r--src/gallium/drivers/llvmpipe/lp_state_fs.c42
1 files changed, 39 insertions, 3 deletions
diff --git a/src/gallium/drivers/llvmpipe/lp_state_fs.c b/src/gallium/drivers/llvmpipe/lp_state_fs.c
index 83b902de959..cf936d029b5 100644
--- a/src/gallium/drivers/llvmpipe/lp_state_fs.c
+++ b/src/gallium/drivers/llvmpipe/lp_state_fs.c
@@ -1143,7 +1143,10 @@ convert_to_blend_type(struct gallivm_state *gallivm,
"");
/* Scale bits */
- chans[j] = scale_bits(gallivm, src_fmt->channel[j].size, blend_type.width, chans[j], src_type);
+ if (src_type.norm) {
+ chans[j] = scale_bits(gallivm, src_fmt->channel[j].size,
+ blend_type.width, chans[j], src_type);
+ }
/* Insert bits into correct position */
chans[j] = LLVMBuildShl(builder,
@@ -1250,7 +1253,10 @@ convert_from_blend_type(struct gallivm_state *gallivm,
"");
/* Scale down bits */
- chans[j] = scale_bits(gallivm, blend_type.width, src_fmt->channel[j].size, chans[j], src_type);
+ if (src_type.norm) {
+ chans[j] = scale_bits(gallivm, blend_type.width,
+ src_fmt->channel[j].size, chans[j], src_type);
+ }
/* Insert bits */
chans[j] = LLVMBuildShl(builder,
@@ -1438,6 +1444,25 @@ generate_unswizzled_blend(struct gallivm_state *gallivm,
}
}
+ if (util_format_is_pure_integer(out_format)) {
+ /*
+ * In this case fs_type was really ints or uints disguised as floats,
+ * fix that up now.
+ */
+ fs_type.floating = 0;
+ fs_type.sign = dst_type.sign;
+ for (i = 0; i < num_fs; ++i) {
+ for (j = 0; j < dst_channels; ++j) {
+ fs_src[i][j] = LLVMBuildBitCast(builder, fs_src[i][j],
+ lp_build_vec_type(gallivm, fs_type), "");
+ }
+ if (dst_channels == 3 && !has_alpha) {
+ fs_src[i][3] = LLVMBuildBitCast(builder, fs_src[i][3],
+ lp_build_vec_type(gallivm, fs_type), "");
+ }
+ }
+ }
+
/*
* Pixel twiddle from fragment shader order to memory order
@@ -2498,7 +2523,11 @@ make_variant_key(struct llvmpipe_context *lp,
}
}
- key->alpha.enabled = lp->depth_stencil->alpha.enabled;
+ /* alpha test only applies if render buffer 0 is non-integer (or does not exist) */
+ if (!lp->framebuffer.nr_cbufs ||
+ !util_format_is_pure_integer(lp->framebuffer.cbufs[0]->format)) {
+ key->alpha.enabled = lp->depth_stencil->alpha.enabled;
+ }
if(key->alpha.enabled)
key->alpha.func = lp->depth_stencil->alpha.func;
/* alpha.ref_value is passed in jit_context */
@@ -2539,6 +2568,13 @@ make_variant_key(struct llvmpipe_context *lp,
blend_rt->colormask &= util_format_colormask(format_desc);
/*
+ * Disable blend for integer formats.
+ */
+ if (util_format_is_pure_integer(format)) {
+ blend_rt->blend_enable = 0;
+ }
+
+ /*
* Our swizzled render tiles always have an alpha channel, but the linear
* render target format often does not, so force here the dst alpha to be
* one.