summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/radeonsi/r600_blit.c
diff options
context:
space:
mode:
authorMarek Olšák <[email protected]>2013-01-17 19:36:41 +0100
committerMichel Dänzer <[email protected]>2013-01-21 15:42:28 +0100
commit6f6112a2b982462667ba36a6f3ba381558780e8a (patch)
treeb4273ce12fca87afe1cfe0d375b38fbd1475a265 /src/gallium/drivers/radeonsi/r600_blit.c
parentbc398f908f8765edee48150dc7e3f24874bb03d9 (diff)
radeonsi: More assorted depth/stencil changes ported from r600g.
[ Squashed port of the following r600g commits: - Michel Dänzer ] commit 428e37c2da420f7dc14a2ea265f2387270f9bee1 Author: Marek Olšák <[email protected]> Date: Tue Oct 2 22:02:54 2012 +0200 r600g: add in-place DB decompression and texturing with DB tiling The decompression is done in-place and only the compressed tiles are decompressed. Note: R6xx-R7xx can do that only with Z16 and Z32F. The texture unit is programmed to use non-displayable tiling and depth ordering of samples, so that it can fetch the texture in the native DB format. The latest version of the libdrm surface allocator is required for stencil texturing to work. The old one didn't create the mipmap tree correctly. We need a separate mipmap tree for stencil, because the stencil mipmap offsets are not really depth offsets/4. There are still some known bugs, but this should save some memory and it also improves performance a little bit in Lightsmark (especially with low resolutions; tested with Radeon HD 5000). The DB->CB copy is still used for transfers. commit e2f623f1d6da9bc987582ff68d0471061ae44030 Author: Marek Olšák <[email protected]> Date: Sat Jul 28 13:55:59 2012 +0200 r600g: don't decompress depth or stencil if there isn't any commit 43e226b6efb77db2247741cc2057d9625a2cfa05 Author: Marek Olšák <[email protected]> Date: Wed Jul 18 00:32:50 2012 +0200 r600g: optimize uploading depth textures Make it only copy the portion of a depth texture being uploaded and not the whole 2D layer. There is also a little code cleanup. commit b242adbe5cfa165b252064a1ea36f802d8251ef1 Author: Marek Olšák <[email protected]> Date: Wed Jul 18 00:17:46 2012 +0200 r600g: remove needless wrapper r600_texture_depth_flush commit 611dd529425281d73f1f0ad2000362d4a5525a25 Author: Marek Olšák <[email protected]> Date: Wed Jul 18 00:05:14 2012 +0200 r600g: init_flushed_depth_texture should be able to report errors commit 80755ff56317446a8c89e611edc1fdf320d6779b Author: Marek Olšák <[email protected]> Date: Sat Jul 14 17:06:27 2012 +0200 r600g: properly track which textures are depth This fixes the issue with have_depth_texture never being set to false. commit fe1fd675565231b49d3ac53d0b4bec39d8bc6781 Author: Marek Olšák <[email protected]> Date: Sun Jul 8 03:10:37 2012 +0200 r600g: don't flush depth textures set as colorbuffers The only case a depth buffer can be set as a color buffer is when flushing. That wasn't always the case, but now this code isn't required anymore. commit 5a17d8318ec2c20bf86275044dc8f715105a88e7 Author: Marek Olšák <[email protected]> Date: Sun Jul 8 02:14:18 2012 +0200 r600g: flush depth textures bound to vertex shaders This was missing/broken. There are also minor code cleanups. commit dee58f94af833906863b0ff2955b20f3ab407e63 Author: Marek Olšák <[email protected]> Date: Sun Jul 8 01:54:24 2012 +0200 r600g: do fine-grained depth texture flushing - maintain a mask of which mipmap levels are dirty (instead of one big flag) - only flush what was requested at a given point and not the whole resource (most often only one level and one layer has to be flushed) Signed-off-by: Michel Dänzer <[email protected]>
Diffstat (limited to 'src/gallium/drivers/radeonsi/r600_blit.c')
-rw-r--r--src/gallium/drivers/radeonsi/r600_blit.c145
1 files changed, 104 insertions, 41 deletions
diff --git a/src/gallium/drivers/radeonsi/r600_blit.c b/src/gallium/drivers/radeonsi/r600_blit.c
index d600962249f..b7aedb11524 100644
--- a/src/gallium/drivers/radeonsi/r600_blit.c
+++ b/src/gallium/drivers/radeonsi/r600_blit.c
@@ -98,39 +98,63 @@ static void r600_blitter_end(struct pipe_context *ctx)
r600_context_queries_resume(rctx);
}
-static unsigned u_num_layers(struct pipe_resource *r, unsigned level)
+static unsigned u_max_layer(struct pipe_resource *r, unsigned level)
{
switch (r->target) {
case PIPE_TEXTURE_CUBE:
- return 6;
+ return 6 - 1;
case PIPE_TEXTURE_3D:
- return u_minify(r->depth0, level);
+ return u_minify(r->depth0, level) - 1;
case PIPE_TEXTURE_1D_ARRAY:
- return r->array_size;
case PIPE_TEXTURE_2D_ARRAY:
- return r->array_size;
+ return r->array_size - 1;
default:
- return 1;
+ return 0;
}
}
void si_blit_uncompress_depth(struct pipe_context *ctx,
struct r600_resource_texture *texture,
- struct r600_resource_texture *staging)
+ struct r600_resource_texture *staging,
+ unsigned first_level, unsigned last_level,
+ unsigned first_layer, unsigned last_layer)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- unsigned layer, level;
+ unsigned layer, level, checked_last_layer, max_layer;
float depth = 1.0f;
+ const struct util_format_description *desc;
+ void *custom_dsa;
struct r600_resource_texture *flushed_depth_texture = staging ?
staging : texture->flushed_depth_texture;
- if (!staging && !texture->dirty_db)
+ if (!staging && !texture->dirty_db_mask)
return;
- for (level = 0; level <= texture->resource.b.b.last_level; level++) {
- unsigned num_layers = u_num_layers(&texture->resource.b.b, level);
+ desc = util_format_description(flushed_depth_texture->resource.b.b.format);
+ switch (util_format_has_depth(desc) | util_format_has_stencil(desc) << 1) {
+ default:
+ assert(!"No depth or stencil to uncompress");
+ case 3:
+ custom_dsa = rctx->custom_dsa_flush_depth_stencil;
+ break;
+ case 2:
+ custom_dsa = rctx->custom_dsa_flush_stencil;
+ break;
+ case 1:
+ custom_dsa = rctx->custom_dsa_flush_depth;
+ break;
+ }
+
+ for (level = first_level; level <= last_level; level++) {
+ if (!staging && !(texture->dirty_db_mask & (1 << level)))
+ continue;
- for (layer = 0; layer < num_layers; layer++) {
+ /* The smaller the mipmap level, the less layers there are
+ * as far as 3D textures are concerned. */
+ max_layer = u_max_layer(&texture->resource.b.b, level);
+ checked_last_layer = last_layer < max_layer ? last_layer : max_layer;
+
+ for (layer = first_layer; layer <= checked_last_layer; layer++) {
struct pipe_surface *zsurf, *cbsurf, surf_tmpl;
surf_tmpl.format = texture->real_format;
@@ -145,53 +169,84 @@ void si_blit_uncompress_depth(struct pipe_context *ctx,
(struct pipe_resource*)flushed_depth_texture, &surf_tmpl);
r600_blitter_begin(ctx, R600_DECOMPRESS);
- util_blitter_custom_depth_stencil(rctx->blitter, zsurf, cbsurf, ~0, rctx->custom_dsa_flush, depth);
+ util_blitter_custom_depth_stencil(rctx->blitter, zsurf, cbsurf, ~0, custom_dsa, depth);
r600_blitter_end(ctx);
pipe_surface_reference(&zsurf, NULL);
pipe_surface_reference(&cbsurf, NULL);
}
- }
- if (!staging)
- texture->dirty_db = FALSE;
+ /* The texture will always be dirty if some layers aren't flushed.
+ * I don't think this case can occur though. */
+ if (!staging && first_layer == 0 && last_layer == max_layer) {
+ texture->dirty_db_mask &= ~(1 << level);
+ }
+ }
}
-void si_flush_depth_textures(struct r600_context *rctx)
+static void si_blit_decompress_depth_in_place(struct r600_context *rctx,
+ struct r600_resource_texture *texture,
+ unsigned first_level, unsigned last_level,
+ unsigned first_layer, unsigned last_layer)
{
- unsigned int i;
+ struct pipe_surface *zsurf, surf_tmpl = {{0}};
+ unsigned layer, max_layer, checked_last_layer, level;
- /* FIXME: This handles fragment shader textures only. */
+ surf_tmpl.format = texture->resource.b.b.format;
- for (i = 0; i < rctx->ps_samplers.n_views; ++i) {
- struct si_pipe_sampler_view *view;
- struct r600_resource_texture *tex;
+ for (level = first_level; level <= last_level; level++) {
+ if (!(texture->dirty_db_mask & (1 << level)))
+ continue;
- view = rctx->ps_samplers.views[i];
- if (!view) continue;
+ surf_tmpl.u.tex.level = level;
- tex = (struct r600_resource_texture *)view->base.texture;
- if (!tex->is_depth)
- continue;
+ /* The smaller the mipmap level, the less layers there are
+ * as far as 3D textures are concerned. */
+ max_layer = u_max_layer(&texture->resource.b.b, level);
+ checked_last_layer = last_layer < max_layer ? last_layer : max_layer;
- if (tex->is_flushing_texture)
- continue;
+ for (layer = first_layer; layer <= checked_last_layer; layer++) {
+ surf_tmpl.u.tex.first_layer = layer;
+ surf_tmpl.u.tex.last_layer = layer;
+
+ zsurf = rctx->context.create_surface(&rctx->context, &texture->resource.b.b, &surf_tmpl);
- si_blit_uncompress_depth(&rctx->context, tex, NULL);
+ r600_blitter_begin(&rctx->context, R600_DECOMPRESS);
+ util_blitter_custom_depth_stencil(rctx->blitter, zsurf, NULL, ~0,
+ rctx->custom_dsa_flush_inplace,
+ 1.0f);
+ r600_blitter_end(&rctx->context);
+
+ pipe_surface_reference(&zsurf, NULL);
+ }
+
+ /* The texture will always be dirty if some layers aren't flushed.
+ * I don't think this case occurs often though. */
+ if (first_layer == 0 && last_layer == max_layer) {
+ texture->dirty_db_mask &= ~(1 << level);
+ }
}
+}
+
+void si_flush_depth_textures(struct r600_context *rctx,
+ struct r600_textures_info *textures)
+{
+ unsigned i;
- /* also check CB here */
- for (i = 0; i < rctx->framebuffer.nr_cbufs; i++) {
+ for (i = 0; i < textures->n_views; ++i) {
+ struct pipe_sampler_view *view;
struct r600_resource_texture *tex;
- tex = (struct r600_resource_texture *)rctx->framebuffer.cbufs[i]->texture;
- if (!tex->is_depth)
- continue;
+ view = &textures->views[i]->base;
+ if (!view) continue;
- if (tex->is_flushing_texture)
+ tex = (struct r600_resource_texture *)view->texture;
+ if (!tex->is_depth || tex->is_flushing_texture)
continue;
- si_blit_uncompress_depth(&rctx->context, tex, NULL);
+ si_blit_decompress_depth_in_place(rctx, tex,
+ view->u.tex.first_level, view->u.tex.last_level,
+ 0, u_max_layer(&tex->resource.b.b, view->u.tex.first_level));
}
}
@@ -322,8 +377,12 @@ static void r600_resource_copy_region(struct pipe_context *ctx,
return;
}
- if (rsrc->is_depth && !rsrc->is_flushing_texture)
- r600_texture_depth_flush(ctx, src, NULL);
+ /* This must be done before entering u_blitter to avoid recursion. */
+ if (rsrc->is_depth && !rsrc->is_flushing_texture) {
+ si_blit_decompress_depth_in_place(rctx, rsrc,
+ src_level, src_level,
+ src_box->z, src_box->z + src_box->depth - 1);
+ }
restore_orig[0] = restore_orig[1] = FALSE;
@@ -376,8 +435,12 @@ static void si_blit(struct pipe_context *ctx,
return;
}
- if (rsrc->is_depth && !rsrc->is_flushing_texture)
- r600_texture_depth_flush(ctx, info->src.resource, NULL);
+ if (rsrc->is_depth && !rsrc->is_flushing_texture) {
+ si_blit_decompress_depth_in_place(rctx, rsrc,
+ info->src.level, info->src.level,
+ info->src.box.z,
+ info->src.box.z + info->src.box.depth - 1);
+ }
r600_blitter_begin(ctx, R600_BLIT);
util_blitter_blit(rctx->blitter, info);