summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/radeonsi
diff options
context:
space:
mode:
authorMarek Olšák <[email protected]>2018-04-08 21:53:25 -0400
committerMarek Olšák <[email protected]>2018-04-27 17:56:04 -0400
commit788d66553af418d3a195b58c3debd87a40f8174c (patch)
tree11ce5a6780896f615ab7291b8514fe0ad96248ca /src/gallium/drivers/radeonsi
parent6fadfc01c6f1600de89e8cd74f2ba78f503b5e6b (diff)
radeonsi: rename r600_texture::resource to buffer
r600_resource could be renamed to si_buffer. Reviewed-by: Nicolai Hähnle <[email protected]>
Diffstat (limited to 'src/gallium/drivers/radeonsi')
-rw-r--r--src/gallium/drivers/radeonsi/cik_sdma.c22
-rw-r--r--src/gallium/drivers/radeonsi/si_blit.c50
-rw-r--r--src/gallium/drivers/radeonsi/si_clear.c36
-rw-r--r--src/gallium/drivers/radeonsi/si_descriptors.c24
-rw-r--r--src/gallium/drivers/radeonsi/si_dma.c16
-rw-r--r--src/gallium/drivers/radeonsi/si_pipe.h4
-rw-r--r--src/gallium/drivers/radeonsi/si_state.c70
-rw-r--r--src/gallium/drivers/radeonsi/si_texture.c168
-rw-r--r--src/gallium/drivers/radeonsi/si_uvd.c10
9 files changed, 200 insertions, 200 deletions
diff --git a/src/gallium/drivers/radeonsi/cik_sdma.c b/src/gallium/drivers/radeonsi/cik_sdma.c
index 690e7ff5499..7a4b479b7eb 100644
--- a/src/gallium/drivers/radeonsi/cik_sdma.c
+++ b/src/gallium/drivers/radeonsi/cik_sdma.c
@@ -147,9 +147,9 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
struct r600_texture *rsrc = (struct r600_texture*)src;
struct r600_texture *rdst = (struct r600_texture*)dst;
unsigned bpp = rdst->surface.bpe;
- uint64_t dst_address = rdst->resource.gpu_address +
+ uint64_t dst_address = rdst->buffer.gpu_address +
rdst->surface.u.legacy.level[dst_level].offset;
- uint64_t src_address = rsrc->resource.gpu_address +
+ uint64_t src_address = rsrc->buffer.gpu_address +
rsrc->surface.u.legacy.level[src_level].offset;
unsigned dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
unsigned src_mode = rsrc->surface.u.legacy.level[src_level].mode;
@@ -167,13 +167,13 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
unsigned src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x;
uint64_t dst_slice_pitch = ((uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4) / bpp;
uint64_t src_slice_pitch = ((uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4) / bpp;
- unsigned dst_width = minify_as_blocks(rdst->resource.b.b.width0,
+ unsigned dst_width = minify_as_blocks(rdst->buffer.b.b.width0,
dst_level, rdst->surface.blk_w);
- unsigned src_width = minify_as_blocks(rsrc->resource.b.b.width0,
+ unsigned src_width = minify_as_blocks(rsrc->buffer.b.b.width0,
src_level, rsrc->surface.blk_w);
- unsigned dst_height = minify_as_blocks(rdst->resource.b.b.height0,
+ unsigned dst_height = minify_as_blocks(rdst->buffer.b.b.height0,
dst_level, rdst->surface.blk_h);
- unsigned src_height = minify_as_blocks(rsrc->resource.b.b.height0,
+ unsigned src_height = minify_as_blocks(rsrc->buffer.b.b.height0,
src_level, rsrc->surface.blk_h);
unsigned srcx = src_box->x / rsrc->surface.blk_w;
unsigned srcy = src_box->y / rsrc->surface.blk_h;
@@ -186,10 +186,10 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
assert(dst_level <= dst->last_level);
assert(rdst->surface.u.legacy.level[dst_level].offset +
dst_slice_pitch * bpp * (dstz + src_box->depth) <=
- rdst->resource.buf->size);
+ rdst->buffer.buf->size);
assert(rsrc->surface.u.legacy.level[src_level].offset +
src_slice_pitch * bpp * (srcz + src_box->depth) <=
- rsrc->resource.buf->size);
+ rsrc->buffer.buf->size);
if (!si_prepare_for_dma_blit(sctx, rdst, dst_level, dstx, dsty,
dstz, rsrc, src_level, src_box))
@@ -232,7 +232,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
srcy + copy_height != (1 << 14)))) {
struct radeon_winsys_cs *cs = sctx->dma_cs;
- si_need_dma_space(sctx, 13, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(sctx, 13, &rdst->buffer, &rsrc->buffer);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) |
@@ -395,7 +395,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
struct radeon_winsys_cs *cs = sctx->dma_cs;
uint32_t direction = linear == rdst ? 1u << 31 : 0;
- si_need_dma_space(sctx, 14, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(sctx, 14, &rdst->buffer, &rsrc->buffer);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) |
@@ -489,7 +489,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
dstx + copy_width != (1 << 14)))) {
struct radeon_winsys_cs *cs = sctx->dma_cs;
- si_need_dma_space(sctx, 15, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(sctx, 15, &rdst->buffer, &rsrc->buffer);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0));
diff --git a/src/gallium/drivers/radeonsi/si_blit.c b/src/gallium/drivers/radeonsi/si_blit.c
index bd20a900e69..1cbd26f46e0 100644
--- a/src/gallium/drivers/radeonsi/si_blit.c
+++ b/src/gallium/drivers/radeonsi/si_blit.c
@@ -120,7 +120,7 @@ si_blit_dbcb_copy(struct si_context *sctx,
/* The smaller the mipmap level, the less layers there are
* as far as 3D textures are concerned. */
- max_layer = util_max_layer(&src->resource.b.b, level);
+ max_layer = util_max_layer(&src->buffer.b.b, level);
checked_last_layer = MIN2(last_layer, max_layer);
surf_tmpl.u.tex.level = level;
@@ -128,14 +128,14 @@ si_blit_dbcb_copy(struct si_context *sctx,
for (layer = first_layer; layer <= checked_last_layer; layer++) {
struct pipe_surface *zsurf, *cbsurf;
- surf_tmpl.format = src->resource.b.b.format;
+ surf_tmpl.format = src->buffer.b.b.format;
surf_tmpl.u.tex.first_layer = layer;
surf_tmpl.u.tex.last_layer = layer;
- zsurf = sctx->b.create_surface(&sctx->b, &src->resource.b.b, &surf_tmpl);
+ zsurf = sctx->b.create_surface(&sctx->b, &src->buffer.b.b, &surf_tmpl);
- surf_tmpl.format = dst->resource.b.b.format;
- cbsurf = sctx->b.create_surface(&sctx->b, &dst->resource.b.b, &surf_tmpl);
+ surf_tmpl.format = dst->buffer.b.b.format;
+ cbsurf = sctx->b.create_surface(&sctx->b, &dst->buffer.b.b, &surf_tmpl);
for (sample = first_sample; sample <= last_sample; sample++) {
if (sample != sctx->dbcb_copy_sample) {
@@ -154,7 +154,7 @@ si_blit_dbcb_copy(struct si_context *sctx,
}
if (first_layer == 0 && last_layer >= max_layer &&
- first_sample == 0 && last_sample >= u_max_sample(&src->resource.b.b))
+ first_sample == 0 && last_sample >= u_max_sample(&src->buffer.b.b))
fully_copied_levels |= 1u << level;
}
@@ -178,7 +178,7 @@ void si_blit_decompress_depth(struct pipe_context *ctx,
assert(staging != NULL && "use si_blit_decompress_zs_in_place instead");
- desc = util_format_description(staging->resource.b.b.format);
+ desc = util_format_description(staging->buffer.b.b.format);
if (util_format_has_depth(desc))
planes |= PIPE_MASK_Z;
@@ -212,7 +212,7 @@ si_blit_decompress_zs_planes_in_place(struct si_context *sctx,
sctx->db_flush_depth_inplace = true;
si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
- surf_tmpl.format = texture->resource.b.b.format;
+ surf_tmpl.format = texture->buffer.b.b.format;
sctx->decompression_enabled = true;
@@ -223,14 +223,14 @@ si_blit_decompress_zs_planes_in_place(struct si_context *sctx,
/* The smaller the mipmap level, the less layers there are
* as far as 3D textures are concerned. */
- max_layer = util_max_layer(&texture->resource.b.b, level);
+ max_layer = util_max_layer(&texture->buffer.b.b, level);
checked_last_layer = MIN2(last_layer, max_layer);
for (layer = first_layer; layer <= checked_last_layer; layer++) {
surf_tmpl.u.tex.first_layer = layer;
surf_tmpl.u.tex.last_layer = layer;
- zsurf = sctx->b.create_surface(&sctx->b, &texture->resource.b.b, &surf_tmpl);
+ zsurf = sctx->b.create_surface(&sctx->b, &texture->buffer.b.b, &surf_tmpl);
si_blitter_begin(sctx, SI_DECOMPRESS);
util_blitter_custom_depth_stencil(sctx->blitter, zsurf, NULL, ~0,
@@ -341,14 +341,14 @@ si_decompress_depth(struct si_context *sctx,
*/
if (copy_planes &&
(tex->flushed_depth_texture ||
- si_init_flushed_depth_texture(&sctx->b, &tex->resource.b.b, NULL))) {
+ si_init_flushed_depth_texture(&sctx->b, &tex->buffer.b.b, NULL))) {
struct r600_texture *dst = tex->flushed_depth_texture;
unsigned fully_copied_levels;
unsigned levels = 0;
assert(tex->flushed_depth_texture);
- if (util_format_is_depth_and_stencil(dst->resource.b.b.format))
+ if (util_format_is_depth_and_stencil(dst->buffer.b.b.format))
copy_planes = PIPE_MASK_Z | PIPE_MASK_S;
if (copy_planes & PIPE_MASK_Z) {
@@ -363,7 +363,7 @@ si_decompress_depth(struct si_context *sctx,
fully_copied_levels = si_blit_dbcb_copy(
sctx, tex, dst, copy_planes, levels,
first_layer, last_layer,
- 0, u_max_sample(&tex->resource.b.b));
+ 0, u_max_sample(&tex->buffer.b.b));
if (copy_planes & PIPE_MASK_Z)
tex->dirty_level_mask &= ~fully_copied_levels;
@@ -398,15 +398,15 @@ si_decompress_depth(struct si_context *sctx,
/* Only in-place decompression needs to flush DB caches, or
* when we don't decompress but TC-compatible planes are dirty.
*/
- si_make_DB_shader_coherent(sctx, tex->resource.b.b.nr_samples,
+ si_make_DB_shader_coherent(sctx, tex->buffer.b.b.nr_samples,
inplace_planes & PIPE_MASK_S,
tc_compat_htile);
}
/* set_framebuffer_state takes care of coherency for single-sample.
* The DB->CB copy uses CB for the final writes.
*/
- if (copy_planes && tex->resource.b.b.nr_samples > 1)
- si_make_CB_shader_coherent(sctx, tex->resource.b.b.nr_samples,
+ if (copy_planes && tex->buffer.b.b.nr_samples > 1)
+ si_make_CB_shader_coherent(sctx, tex->buffer.b.b.nr_samples,
false);
}
@@ -434,7 +434,7 @@ si_decompress_sampler_depth_textures(struct si_context *sctx,
si_decompress_depth(sctx, tex,
sview->is_stencil_sampler ? PIPE_MASK_S : PIPE_MASK_Z,
view->u.tex.first_level, view->u.tex.last_level,
- 0, util_max_layer(&tex->resource.b.b, view->u.tex.first_level));
+ 0, util_max_layer(&tex->buffer.b.b, view->u.tex.first_level));
}
}
@@ -483,17 +483,17 @@ static void si_blit_decompress_color(struct si_context *sctx,
/* The smaller the mipmap level, the less layers there are
* as far as 3D textures are concerned. */
- max_layer = util_max_layer(&rtex->resource.b.b, level);
+ max_layer = util_max_layer(&rtex->buffer.b.b, level);
checked_last_layer = MIN2(last_layer, max_layer);
for (layer = first_layer; layer <= checked_last_layer; layer++) {
struct pipe_surface *cbsurf, surf_tmpl;
- surf_tmpl.format = rtex->resource.b.b.format;
+ surf_tmpl.format = rtex->buffer.b.b.format;
surf_tmpl.u.tex.level = level;
surf_tmpl.u.tex.first_layer = layer;
surf_tmpl.u.tex.last_layer = layer;
- cbsurf = sctx->b.create_surface(&sctx->b, &rtex->resource.b.b, &surf_tmpl);
+ cbsurf = sctx->b.create_surface(&sctx->b, &rtex->buffer.b.b, &surf_tmpl);
/* Required before and after FMASK and DCC_DECOMPRESS. */
if (custom_blend == sctx->custom_blend_fmask_decompress ||
@@ -519,7 +519,7 @@ static void si_blit_decompress_color(struct si_context *sctx,
}
sctx->decompression_enabled = false;
- si_make_CB_shader_coherent(sctx, rtex->resource.b.b.nr_samples,
+ si_make_CB_shader_coherent(sctx, rtex->buffer.b.b.nr_samples,
vi_dcc_enabled(rtex, first_level));
}
@@ -532,7 +532,7 @@ si_decompress_color_texture(struct si_context *sctx, struct r600_texture *tex,
return;
si_blit_decompress_color(sctx, tex, first_level, last_level, 0,
- util_max_layer(&tex->resource.b.b, first_level),
+ util_max_layer(&tex->buffer.b.b, first_level),
false);
}
@@ -750,7 +750,7 @@ static void si_decompress_resident_textures(struct si_context *sctx)
si_decompress_depth(sctx, tex,
sview->is_stencil_sampler ? PIPE_MASK_S : PIPE_MASK_Z,
view->u.tex.first_level, view->u.tex.last_level,
- 0, util_max_layer(&tex->resource.b.b, view->u.tex.first_level));
+ 0, util_max_layer(&tex->buffer.b.b, view->u.tex.first_level));
}
}
@@ -1328,8 +1328,8 @@ void si_decompress_dcc(struct si_context *sctx, struct r600_texture *rtex)
if (!rtex->dcc_offset)
return;
- si_blit_decompress_color(sctx, rtex, 0, rtex->resource.b.b.last_level,
- 0, util_max_layer(&rtex->resource.b.b, 0),
+ si_blit_decompress_color(sctx, rtex, 0, rtex->buffer.b.b.last_level,
+ 0, util_max_layer(&rtex->buffer.b.b, 0),
true);
}
diff --git a/src/gallium/drivers/radeonsi/si_clear.c b/src/gallium/drivers/radeonsi/si_clear.c
index f01b95c9325..0de51488f59 100644
--- a/src/gallium/drivers/radeonsi/si_clear.c
+++ b/src/gallium/drivers/radeonsi/si_clear.c
@@ -234,19 +234,19 @@ void vi_dcc_clear_level(struct si_context *sctx,
dcc_buffer = &rtex->dcc_separate_buffer->b.b;
dcc_offset = 0;
} else {
- dcc_buffer = &rtex->resource.b.b;
+ dcc_buffer = &rtex->buffer.b.b;
dcc_offset = rtex->dcc_offset;
}
if (sctx->chip_class >= GFX9) {
/* Mipmap level clears aren't implemented. */
- assert(rtex->resource.b.b.last_level == 0);
+ assert(rtex->buffer.b.b.last_level == 0);
/* 4x and 8x MSAA needs a sophisticated compute shader for
* the clear. See AMDVLK. */
- assert(rtex->resource.b.b.nr_samples <= 2);
+ assert(rtex->buffer.b.b.nr_samples <= 2);
clear_size = rtex->surface.dcc_size;
} else {
- unsigned num_layers = util_num_layers(&rtex->resource.b.b, level);
+ unsigned num_layers = util_num_layers(&rtex->buffer.b.b, level);
/* If this is 0, fast clear isn't possible. (can occur with MSAA) */
assert(rtex->surface.u.legacy.level[level].dcc_fast_clear_size);
@@ -254,7 +254,7 @@ void vi_dcc_clear_level(struct si_context *sctx,
* dcc_fast_clear_size bytes for each layer. A compute shader
* would be more efficient than separate per-layer clear operations.
*/
- assert(rtex->resource.b.b.nr_samples <= 2 || num_layers == 1);
+ assert(rtex->buffer.b.b.nr_samples <= 2 || num_layers == 1);
dcc_offset += rtex->surface.u.legacy.level[level].dcc_offset;
clear_size = rtex->surface.u.legacy.level[level].dcc_fast_clear_size *
@@ -272,14 +272,14 @@ void vi_dcc_clear_level(struct si_context *sctx,
static void si_set_optimal_micro_tile_mode(struct si_screen *sscreen,
struct r600_texture *rtex)
{
- if (rtex->resource.b.is_shared ||
- rtex->resource.b.b.nr_samples <= 1 ||
+ if (rtex->buffer.b.is_shared ||
+ rtex->buffer.b.b.nr_samples <= 1 ||
rtex->surface.micro_tile_mode == rtex->last_msaa_resolve_target_micro_mode)
return;
assert(sscreen->info.chip_class >= GFX9 ||
rtex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
- assert(rtex->resource.b.b.last_level == 0);
+ assert(rtex->buffer.b.b.last_level == 0);
if (sscreen->info.chip_class >= GFX9) {
/* 4K or larger tiles only. 0 is linear. 1-3 are 256B tiles. */
@@ -411,12 +411,12 @@ static void si_do_fast_color_clear(struct si_context *sctx,
* organized in a 2D plane).
*/
if (sctx->chip_class >= GFX9 &&
- tex->resource.b.b.last_level > 0)
+ tex->buffer.b.b.last_level > 0)
continue;
/* the clear is allowed if all layers are bound */
if (fb->cbufs[i]->u.tex.first_layer != 0 ||
- fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->resource.b.b, 0)) {
+ fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->buffer.b.b, 0)) {
continue;
}
@@ -429,8 +429,8 @@ static void si_do_fast_color_clear(struct si_context *sctx,
* because there is no way to communicate the clear color among
* all clients
*/
- if (tex->resource.b.is_shared &&
- !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
+ if (tex->buffer.b.is_shared &&
+ !(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
continue;
/* fast color clear with 1D tiling doesn't work on old kernels and CIK */
@@ -466,9 +466,9 @@ static void si_do_fast_color_clear(struct si_context *sctx,
*
* This helps on both dGPUs and APUs, even small APUs like Mullins.
*/
- bool too_small = tex->resource.b.b.nr_samples <= 1 &&
- tex->resource.b.b.width0 *
- tex->resource.b.b.height0 <= 512 * 512;
+ bool too_small = tex->buffer.b.b.nr_samples <= 1 &&
+ tex->buffer.b.b.width0 *
+ tex->buffer.b.b.height0 <= 512 * 512;
/* Try to clear DCC first, otherwise try CMASK. */
if (vi_dcc_enabled(tex, 0)) {
@@ -483,7 +483,7 @@ static void si_do_fast_color_clear(struct si_context *sctx,
!tex->surface.u.legacy.level[level].dcc_fast_clear_size)
continue;
- if (!vi_get_fast_clear_parameters(tex->resource.b.b.format,
+ if (!vi_get_fast_clear_parameters(tex->buffer.b.b.format,
fb->cbufs[i]->format,
color, &reset_value,
&eliminate_needed))
@@ -493,7 +493,7 @@ static void si_do_fast_color_clear(struct si_context *sctx,
continue;
/* DCC fast clear with MSAA should clear CMASK to 0xC. */
- if (tex->resource.b.b.nr_samples >= 2 && tex->cmask.size) {
+ if (tex->buffer.b.b.nr_samples >= 2 && tex->cmask.size) {
/* TODO: This doesn't work with MSAA. */
if (eliminate_needed)
continue;
@@ -585,7 +585,7 @@ static void si_clear(struct pipe_context *ctx, unsigned buffers,
if (zstex &&
si_htile_enabled(zstex, zsbuf->u.tex.level) &&
zsbuf->u.tex.first_layer == 0 &&
- zsbuf->u.tex.last_layer == util_max_layer(&zstex->resource.b.b, 0)) {
+ zsbuf->u.tex.last_layer == util_max_layer(&zstex->buffer.b.b, 0)) {
/* TC-compatible HTILE only supports depth clears to 0 or 1. */
if (buffers & PIPE_CLEAR_DEPTH &&
(!zstex->tc_compatible_htile ||
diff --git a/src/gallium/drivers/radeonsi/si_descriptors.c b/src/gallium/drivers/radeonsi/si_descriptors.c
index 6771b62a9fb..9f2433a5b72 100644
--- a/src/gallium/drivers/radeonsi/si_descriptors.c
+++ b/src/gallium/drivers/radeonsi/si_descriptors.c
@@ -259,7 +259,7 @@ static void si_sampler_view_add_buffer(struct si_context *sctx,
struct r600_texture *tex = (struct r600_texture*)resource;
if (tex->is_depth && !si_can_sample_zs(tex, is_stencil_sampler))
- resource = &tex->flushed_depth_texture->resource.b.b;
+ resource = &tex->flushed_depth_texture->buffer.b.b;
}
rres = r600_resource(resource);
@@ -330,7 +330,7 @@ void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
is_stencil = false;
}
- va = tex->resource.gpu_address;
+ va = tex->buffer.gpu_address;
if (sscreen->info.chip_class >= GFX9) {
/* Only stencil_offset needs to be added here. */
@@ -358,7 +358,7 @@ void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
state[7] = 0;
if (vi_dcc_enabled(tex, first_level)) {
- meta_va = (!tex->dcc_separate_buffer ? tex->resource.gpu_address : 0) +
+ meta_va = (!tex->dcc_separate_buffer ? tex->buffer.gpu_address : 0) +
tex->dcc_offset;
if (sscreen->info.chip_class == VI) {
@@ -368,7 +368,7 @@ void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
meta_va |= (uint32_t)tex->surface.tile_swizzle << 8;
} else if (vi_tc_compat_htile_enabled(tex, first_level)) {
- meta_va = tex->resource.gpu_address + tex->htile_offset;
+ meta_va = tex->buffer.gpu_address + tex->htile_offset;
}
if (meta_va) {
@@ -437,7 +437,7 @@ static void si_set_sampler_view_desc(struct si_context *sctx,
{
struct pipe_sampler_view *view = &sview->base;
struct r600_texture *rtex = (struct r600_texture *)view->texture;
- bool is_buffer = rtex->resource.b.b.target == PIPE_BUFFER;
+ bool is_buffer = rtex->buffer.b.b.target == PIPE_BUFFER;
if (unlikely(!is_buffer && sview->dcc_incompatible)) {
if (vi_dcc_enabled(rtex, view->u.tex.first_level))
@@ -451,7 +451,7 @@ static void si_set_sampler_view_desc(struct si_context *sctx,
memcpy(desc, sview->state, 8*4);
if (is_buffer) {
- si_set_buf_desc_address(&rtex->resource,
+ si_set_buf_desc_address(&rtex->buffer,
sview->base.u.buf.offset,
desc + 4);
} else {
@@ -517,8 +517,8 @@ static void si_set_sampler_view(struct si_context *sctx,
si_set_sampler_view_desc(sctx, rview,
samplers->sampler_states[slot], desc);
- if (rtex->resource.b.b.target == PIPE_BUFFER) {
- rtex->resource.bind_history |= PIPE_BIND_SAMPLER_VIEW;
+ if (rtex->buffer.b.b.target == PIPE_BUFFER) {
+ rtex->buffer.bind_history |= PIPE_BIND_SAMPLER_VIEW;
samplers->needs_depth_decompress_mask &= ~(1u << slot);
samplers->needs_color_decompress_mask &= ~(1u << slot);
} else {
@@ -906,9 +906,9 @@ void si_update_ps_colorbuf0_slot(struct si_context *sctx)
*/
si_texture_disable_dcc(sctx, tex);
- if (tex->resource.b.b.nr_samples <= 1 && tex->cmask_buffer) {
+ if (tex->buffer.b.b.nr_samples <= 1 && tex->cmask_buffer) {
/* Disable CMASK. */
- assert(tex->cmask_buffer != &tex->resource);
+ assert(tex->cmask_buffer != &tex->buffer);
si_eliminate_fast_color_clear(sctx, tex);
si_texture_discard_cmask(sctx->screen, tex);
}
@@ -925,9 +925,9 @@ void si_update_ps_colorbuf0_slot(struct si_context *sctx)
memset(desc, 0, 16 * 4);
si_set_shader_image_desc(sctx, &view, true, desc, desc + 8);
- pipe_resource_reference(&buffers->buffers[slot], &tex->resource.b.b);
+ pipe_resource_reference(&buffers->buffers[slot], &tex->buffer.b.b);
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- &tex->resource, RADEON_USAGE_READ,
+ &tex->buffer, RADEON_USAGE_READ,
RADEON_PRIO_SHADER_RW_IMAGE);
buffers->enabled_mask |= 1u << slot;
} else {
diff --git a/src/gallium/drivers/radeonsi/si_dma.c b/src/gallium/drivers/radeonsi/si_dma.c
index 909c301d9f8..7bdee525be1 100644
--- a/src/gallium/drivers/radeonsi/si_dma.c
+++ b/src/gallium/drivers/radeonsi/si_dma.c
@@ -163,7 +163,7 @@ static void si_dma_copy_tile(struct si_context *ctx,
tiled_y = detile ? src_y : dst_y;
tiled_z = detile ? src_z : dst_z;
- assert(!util_format_is_depth_and_stencil(rtiled->resource.b.b.format));
+ assert(!util_format_is_depth_and_stencil(rtiled->buffer.b.b.format));
array_mode = G_009910_ARRAY_MODE(tile_mode);
slice_tile_max = (rtiled->surface.u.legacy.level[tiled_lvl].nblk_x *
@@ -184,14 +184,14 @@ static void si_dma_copy_tile(struct si_context *ctx,
/* Non-depth modes don't have TILE_SPLIT set. */
tile_split = util_logbase2(rtiled->surface.u.legacy.tile_split >> 6);
nbanks = G_009910_NUM_BANKS(tile_mode);
- base += rtiled->resource.gpu_address;
- addr += rlinear->resource.gpu_address;
+ base += rtiled->buffer.gpu_address;
+ addr += rlinear->buffer.gpu_address;
pipe_config = G_009910_PIPE_CONFIG(tile_mode);
mt = G_009910_MICRO_TILE_MODE(tile_mode);
size = copy_height * pitch;
ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
- si_need_dma_space(ctx, ncopy * 9, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(ctx, ncopy * 9, &rdst->buffer, &rsrc->buffer);
for (i = 0; i < ncopy; i++) {
cheight = copy_height;
@@ -271,16 +271,16 @@ static void si_dma_copy(struct pipe_context *ctx,
bpp = rdst->surface.bpe;
dst_pitch = rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.bpe;
src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.bpe;
- src_w = u_minify(rsrc->resource.b.b.width0, src_level);
- dst_w = u_minify(rdst->resource.b.b.width0, dst_level);
+ src_w = u_minify(rsrc->buffer.b.b.width0, src_level);
+ dst_w = u_minify(rdst->buffer.b.b.width0, dst_level);
dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
src_mode = rsrc->surface.u.legacy.level[src_level].mode;
if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w ||
src_box->width != src_w ||
- src_box->height != u_minify(rsrc->resource.b.b.height0, src_level) ||
- src_box->height != u_minify(rdst->resource.b.b.height0, dst_level) ||
+ src_box->height != u_minify(rsrc->buffer.b.b.height0, src_level) ||
+ src_box->height != u_minify(rdst->buffer.b.b.height0, dst_level) ||
rsrc->surface.u.legacy.level[src_level].nblk_y !=
rdst->surface.u.legacy.level[dst_level].nblk_y) {
/* FIXME si can do partial blit */
diff --git a/src/gallium/drivers/radeonsi/si_pipe.h b/src/gallium/drivers/radeonsi/si_pipe.h
index 6da1d73d26d..823509524d4 100644
--- a/src/gallium/drivers/radeonsi/si_pipe.h
+++ b/src/gallium/drivers/radeonsi/si_pipe.h
@@ -250,7 +250,7 @@ struct r600_cmask_info {
};
struct r600_texture {
- struct r600_resource resource;
+ struct r600_resource buffer;
struct radeon_surf surface;
uint64_t size;
@@ -1296,7 +1296,7 @@ r600_resource_reference(struct r600_resource **ptr, struct r600_resource *res)
static inline void
r600_texture_reference(struct r600_texture **ptr, struct r600_texture *res)
{
- pipe_resource_reference((struct pipe_resource **)ptr, &res->resource.b.b);
+ pipe_resource_reference((struct pipe_resource **)ptr, &res->buffer.b.b);
}
static inline bool
diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c
index 41c614ab7e9..0c4e6d08855 100644
--- a/src/gallium/drivers/radeonsi/si_state.c
+++ b/src/gallium/drivers/radeonsi/si_state.c
@@ -1893,7 +1893,7 @@ static unsigned si_tex_compare(unsigned compare)
static unsigned si_tex_dim(struct si_screen *sscreen, struct r600_texture *rtex,
unsigned view_target, unsigned nr_samples)
{
- unsigned res_target = rtex->resource.b.b.target;
+ unsigned res_target = rtex->buffer.b.b.target;
if (view_target == PIPE_TEXTURE_CUBE ||
view_target == PIPE_TEXTURE_CUBE_ARRAY)
@@ -2408,8 +2408,8 @@ static void si_initialize_color_surface(struct si_context *sctx,
color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == PIPE_SWIZZLE_1 ||
util_format_is_intensity(surf->base.format));
- if (rtex->resource.b.b.nr_samples > 1) {
- unsigned log_samples = util_logbase2(rtex->resource.b.b.nr_samples);
+ if (rtex->buffer.b.b.nr_samples > 1) {
+ unsigned log_samples = util_logbase2(rtex->buffer.b.b.nr_samples);
color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
S_028C74_NUM_FRAGMENTS(log_samples);
@@ -2436,7 +2436,7 @@ static void si_initialize_color_surface(struct si_context *sctx,
if (!sctx->screen->info.has_dedicated_vram)
min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_64B;
- if (rtex->resource.b.b.nr_samples > 1) {
+ if (rtex->buffer.b.b.nr_samples > 1) {
if (rtex->surface.bpe == 1)
max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
else if (rtex->surface.bpe == 2)
@@ -2458,14 +2458,14 @@ static void si_initialize_color_surface(struct si_context *sctx,
S_028C6C_SLICE_MAX(surf->base.u.tex.last_layer);
if (sctx->chip_class >= GFX9) {
- unsigned mip0_depth = util_max_layer(&rtex->resource.b.b, 0);
+ unsigned mip0_depth = util_max_layer(&rtex->buffer.b.b, 0);
color_view |= S_028C6C_MIP_LEVEL(surf->base.u.tex.level);
color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
S_028C74_RESOURCE_TYPE(rtex->surface.u.gfx9.resource_type);
surf->cb_color_attrib2 = S_028C68_MIP0_WIDTH(surf->width0 - 1) |
S_028C68_MIP0_HEIGHT(surf->height0 - 1) |
- S_028C68_MAX_MIP(rtex->resource.b.b.last_level);
+ S_028C68_MAX_MIP(rtex->buffer.b.b.last_level);
}
surf->cb_color_view = color_view;
@@ -2492,7 +2492,7 @@ static void si_init_depth_surface(struct si_context *sctx,
assert(format != V_028040_Z_INVALID);
if (format == V_028040_Z_INVALID)
- PRINT_ERR("Invalid DB format: %d, disabling DB.\n", rtex->resource.b.b.format);
+ PRINT_ERR("Invalid DB format: %d, disabling DB.\n", rtex->buffer.b.b.format);
surf->db_depth_view = S_028008_SLICE_START(surf->base.u.tex.first_layer) |
S_028008_SLICE_MAX(surf->base.u.tex.last_layer);
@@ -2501,20 +2501,20 @@ static void si_init_depth_surface(struct si_context *sctx,
if (sctx->chip_class >= GFX9) {
assert(rtex->surface.u.gfx9.surf_offset == 0);
- surf->db_depth_base = rtex->resource.gpu_address >> 8;
- surf->db_stencil_base = (rtex->resource.gpu_address +
+ surf->db_depth_base = rtex->buffer.gpu_address >> 8;
+ surf->db_stencil_base = (rtex->buffer.gpu_address +
rtex->surface.u.gfx9.stencil_offset) >> 8;
z_info = S_028038_FORMAT(format) |
- S_028038_NUM_SAMPLES(util_logbase2(rtex->resource.b.b.nr_samples)) |
+ S_028038_NUM_SAMPLES(util_logbase2(rtex->buffer.b.b.nr_samples)) |
S_028038_SW_MODE(rtex->surface.u.gfx9.surf.swizzle_mode) |
- S_028038_MAXMIP(rtex->resource.b.b.last_level);
+ S_028038_MAXMIP(rtex->buffer.b.b.last_level);
s_info = S_02803C_FORMAT(stencil_format) |
S_02803C_SW_MODE(rtex->surface.u.gfx9.stencil.swizzle_mode);
surf->db_z_info2 = S_028068_EPITCH(rtex->surface.u.gfx9.surf.epitch);
surf->db_stencil_info2 = S_02806C_EPITCH(rtex->surface.u.gfx9.stencil.epitch);
surf->db_depth_view |= S_028008_MIPID(level);
- surf->db_depth_size = S_02801C_X_MAX(rtex->resource.b.b.width0 - 1) |
- S_02801C_Y_MAX(rtex->resource.b.b.height0 - 1);
+ surf->db_depth_size = S_02801C_X_MAX(rtex->buffer.b.b.width0 - 1) |
+ S_02801C_Y_MAX(rtex->buffer.b.b.height0 - 1);
if (si_htile_enabled(rtex, level)) {
z_info |= S_028038_TILE_SURFACE_ENABLE(1) |
@@ -2524,7 +2524,7 @@ static void si_init_depth_surface(struct si_context *sctx,
unsigned max_zplanes = 4;
if (rtex->db_render_format == PIPE_FORMAT_Z16_UNORM &&
- rtex->resource.b.b.nr_samples > 1)
+ rtex->buffer.b.b.nr_samples > 1)
max_zplanes = 2;
z_info |= S_028038_DECOMPRESS_ON_N_ZPLANES(max_zplanes + 1) |
@@ -2536,13 +2536,13 @@ static void si_init_depth_surface(struct si_context *sctx,
/* Stencil buffer workaround ported from the SI-CI-VI code.
* See that for explanation.
*/
- s_info |= S_02803C_ALLOW_EXPCLEAR(rtex->resource.b.b.nr_samples <= 1);
+ s_info |= S_02803C_ALLOW_EXPCLEAR(rtex->buffer.b.b.nr_samples <= 1);
} else {
/* Use all HTILE for depth if there's no stencil. */
s_info |= S_02803C_TILE_STENCIL_DISABLE(1);
}
- surf->db_htile_data_base = (rtex->resource.gpu_address +
+ surf->db_htile_data_base = (rtex->buffer.gpu_address +
rtex->htile_offset) >> 8;
surf->db_htile_surface = S_028ABC_FULL_CACHE(1) |
S_028ABC_PIPE_ALIGNED(rtex->surface.u.gfx9.htile.pipe_aligned) |
@@ -2554,13 +2554,13 @@ static void si_init_depth_surface(struct si_context *sctx,
assert(levelinfo->nblk_x % 8 == 0 && levelinfo->nblk_y % 8 == 0);
- surf->db_depth_base = (rtex->resource.gpu_address +
+ surf->db_depth_base = (rtex->buffer.gpu_address +
rtex->surface.u.legacy.level[level].offset) >> 8;
- surf->db_stencil_base = (rtex->resource.gpu_address +
+ surf->db_stencil_base = (rtex->buffer.gpu_address +
rtex->surface.u.legacy.stencil_level[level].offset) >> 8;
z_info = S_028040_FORMAT(format) |
- S_028040_NUM_SAMPLES(util_logbase2(rtex->resource.b.b.nr_samples));
+ S_028040_NUM_SAMPLES(util_logbase2(rtex->buffer.b.b.nr_samples));
s_info = S_028044_FORMAT(stencil_format);
surf->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(!rtex->tc_compatible_htile);
@@ -2610,7 +2610,7 @@ static void si_init_depth_surface(struct si_context *sctx,
* Check piglit's arb_texture_multisample-stencil-clear
* test if you want to try changing this.
*/
- if (rtex->resource.b.b.nr_samples <= 1)
+ if (rtex->buffer.b.b.nr_samples <= 1)
s_info |= S_028044_ALLOW_EXPCLEAR(1);
} else if (!rtex->tc_compatible_htile) {
/* Use all of the htile_buffer for depth if there's no stencil.
@@ -2620,16 +2620,16 @@ static void si_init_depth_surface(struct si_context *sctx,
s_info |= S_028044_TILE_STENCIL_DISABLE(1);
}
- surf->db_htile_data_base = (rtex->resource.gpu_address +
+ surf->db_htile_data_base = (rtex->buffer.gpu_address +
rtex->htile_offset) >> 8;
surf->db_htile_surface = S_028ABC_FULL_CACHE(1);
if (rtex->tc_compatible_htile) {
surf->db_htile_surface |= S_028ABC_TC_COMPATIBLE(1);
- if (rtex->resource.b.b.nr_samples <= 1)
+ if (rtex->buffer.b.b.nr_samples <= 1)
z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(5);
- else if (rtex->resource.b.b.nr_samples <= 4)
+ else if (rtex->buffer.b.b.nr_samples <= 4)
z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(3);
else
z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(2);
@@ -2959,12 +2959,12 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
tex = (struct r600_texture *)cb->base.texture;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- &tex->resource, RADEON_USAGE_READWRITE,
- tex->resource.b.b.nr_samples > 1 ?
+ &tex->buffer, RADEON_USAGE_READWRITE,
+ tex->buffer.b.b.nr_samples > 1 ?
RADEON_PRIO_COLOR_BUFFER_MSAA :
RADEON_PRIO_COLOR_BUFFER);
- if (tex->cmask_buffer && tex->cmask_buffer != &tex->resource) {
+ if (tex->cmask_buffer && tex->cmask_buffer != &tex->buffer) {
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
tex->cmask_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_CMASK);
@@ -2977,7 +2977,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
RADEON_PRIO_DCC);
/* Compute mutable surface parameters. */
- cb_color_base = tex->resource.gpu_address >> 8;
+ cb_color_base = tex->buffer.gpu_address >> 8;
cb_color_fmask = 0;
cb_color_cmask = tex->cmask.base_address_reg;
cb_dcc_base = 0;
@@ -2988,7 +2988,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
cb_color_info &= C_028C70_FAST_CLEAR;
if (tex->fmask.size) {
- cb_color_fmask = (tex->resource.gpu_address + tex->fmask.offset) >> 8;
+ cb_color_fmask = (tex->buffer.gpu_address + tex->fmask.offset) >> 8;
cb_color_fmask |= tex->fmask.tile_swizzle;
}
@@ -3002,7 +3002,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
if (!is_msaa_resolve_dst)
cb_color_info |= S_028C70_DCC_ENABLE(1);
- cb_dcc_base = ((!tex->dcc_separate_buffer ? tex->resource.gpu_address : 0) +
+ cb_dcc_base = ((!tex->dcc_separate_buffer ? tex->buffer.gpu_address : 0) +
tex->dcc_offset) >> 8;
cb_dcc_base |= tex->surface.tile_swizzle;
}
@@ -3117,7 +3117,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
struct r600_texture *rtex = (struct r600_texture*)zb->base.texture;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- &rtex->resource, RADEON_USAGE_READWRITE,
+ &rtex->buffer, RADEON_USAGE_READWRITE,
zb->base.texture->nr_samples > 1 ?
RADEON_PRIO_DEPTH_BUFFER_MSAA :
RADEON_PRIO_DEPTH_BUFFER);
@@ -3535,7 +3535,7 @@ si_make_texture_descriptor(struct si_screen *screen,
uint32_t *state,
uint32_t *fmask_state)
{
- struct pipe_resource *res = &tex->resource.b.b;
+ struct pipe_resource *res = &tex->buffer.b.b;
const struct util_format_description *desc;
unsigned char swizzle[4];
int first_non_void;
@@ -3714,7 +3714,7 @@ si_make_texture_descriptor(struct si_screen *screen,
state[4] |= S_008F20_BC_SWIZZLE(bc_swizzle);
state[5] |= S_008F24_MAX_MIP(res->nr_samples > 1 ?
util_logbase2(res->nr_samples) :
- tex->resource.b.b.last_level);
+ tex->buffer.b.b.last_level);
} else {
state[3] |= S_008F1C_POW2_PAD(res->last_level > 0);
state[4] |= S_008F20_DEPTH(depth - 1);
@@ -3739,7 +3739,7 @@ si_make_texture_descriptor(struct si_screen *screen,
if (tex->fmask.size) {
uint32_t data_format, num_format;
- va = tex->resource.gpu_address + tex->fmask.offset;
+ va = tex->buffer.gpu_address + tex->fmask.offset;
if (screen->info.chip_class >= GFX9) {
data_format = V_008F14_IMG_DATA_FORMAT_FMASK;
@@ -3907,8 +3907,8 @@ si_create_sampler_view_custom(struct pipe_context *ctx,
/* Override format for the case where the flushed texture
* contains only Z or only S.
*/
- if (tmp->flushed_depth_texture->resource.b.b.format != tmp->resource.b.b.format)
- pipe_format = tmp->flushed_depth_texture->resource.b.b.format;
+ if (tmp->flushed_depth_texture->buffer.b.b.format != tmp->buffer.b.b.format)
+ pipe_format = tmp->flushed_depth_texture->buffer.b.b.format;
tmp = tmp->flushed_depth_texture;
}
diff --git a/src/gallium/drivers/radeonsi/si_texture.c b/src/gallium/drivers/radeonsi/si_texture.c
index 34cb052db35..17f87a3039b 100644
--- a/src/gallium/drivers/radeonsi/si_texture.c
+++ b/src/gallium/drivers/radeonsi/si_texture.c
@@ -58,8 +58,8 @@ bool si_prepare_for_dma_blit(struct si_context *sctx,
return false;
/* MSAA: Blits don't exist in the real world. */
- if (rsrc->resource.b.b.nr_samples > 1 ||
- rdst->resource.b.b.nr_samples > 1)
+ if (rsrc->buffer.b.b.nr_samples > 1 ||
+ rdst->buffer.b.b.nr_samples > 1)
return false;
/* Depth-stencil surfaces:
@@ -85,7 +85,7 @@ bool si_prepare_for_dma_blit(struct si_context *sctx,
if (rdst->cmask.size && rdst->dirty_level_mask & (1 << dst_level)) {
/* The CMASK clear is only enabled for the first level. */
assert(dst_level == 0);
- if (!util_texrange_covers_whole_level(&rdst->resource.b.b, dst_level,
+ if (!util_texrange_covers_whole_level(&rdst->buffer.b.b, dst_level,
dstx, dsty, dstz, src_box->width,
src_box->height, src_box->depth))
return false;
@@ -95,7 +95,7 @@ bool si_prepare_for_dma_blit(struct si_context *sctx,
/* All requirements are met. Prepare textures for SDMA. */
if (rsrc->cmask.size && rsrc->dirty_level_mask & (1 << src_level))
- sctx->b.flush_resource(&sctx->b, &rsrc->resource.b.b);
+ sctx->b.flush_resource(&sctx->b, &rsrc->buffer.b.b);
assert(!(rsrc->dirty_level_mask & (1 << src_level)));
assert(!(rdst->dirty_level_mask & (1 << dst_level)));
@@ -401,7 +401,7 @@ void si_eliminate_fast_color_clear(struct si_context *sctx,
mtx_lock(&sscreen->aux_context_lock);
unsigned n = sctx->num_decompress_calls;
- ctx->flush_resource(ctx, &rtex->resource.b.b);
+ ctx->flush_resource(ctx, &rtex->buffer.b.b);
/* Flush only if any fast clear elimination took place. */
if (n != sctx->num_decompress_calls)
@@ -417,16 +417,16 @@ void si_texture_discard_cmask(struct si_screen *sscreen,
if (!rtex->cmask.size)
return;
- assert(rtex->resource.b.b.nr_samples <= 1);
+ assert(rtex->buffer.b.b.nr_samples <= 1);
/* Disable CMASK. */
memset(&rtex->cmask, 0, sizeof(rtex->cmask));
- rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8;
+ rtex->cmask.base_address_reg = rtex->buffer.gpu_address >> 8;
rtex->dirty_level_mask = 0;
rtex->cb_color_info &= ~S_028C70_FAST_CLEAR(1);
- if (rtex->cmask_buffer != &rtex->resource)
+ if (rtex->cmask_buffer != &rtex->buffer)
r600_resource_reference(&rtex->cmask_buffer, NULL);
/* Notify all contexts about the change. */
@@ -438,8 +438,8 @@ static bool si_can_disable_dcc(struct r600_texture *rtex)
{
/* We can't disable DCC if it can be written by another process. */
return rtex->dcc_offset &&
- (!rtex->resource.b.is_shared ||
- !(rtex->resource.external_usage & PIPE_HANDLE_USAGE_WRITE));
+ (!rtex->buffer.b.is_shared ||
+ !(rtex->buffer.external_usage & PIPE_HANDLE_USAGE_WRITE));
}
static bool si_texture_discard_dcc(struct si_screen *sscreen,
@@ -507,12 +507,12 @@ static void si_reallocate_texture_inplace(struct si_context *sctx,
{
struct pipe_screen *screen = sctx->b.screen;
struct r600_texture *new_tex;
- struct pipe_resource templ = rtex->resource.b.b;
+ struct pipe_resource templ = rtex->buffer.b.b;
unsigned i;
templ.bind |= new_bind_flag;
- if (rtex->resource.b.is_shared)
+ if (rtex->buffer.b.is_shared)
return;
if (new_bind_flag == PIPE_BIND_LINEAR) {
@@ -538,8 +538,8 @@ static void si_reallocate_texture_inplace(struct si_context *sctx,
u_minify(templ.width0, i), u_minify(templ.height0, i),
util_num_layers(&templ, i), &box);
- sctx->dma_copy(&sctx->b, &new_tex->resource.b.b, i, 0, 0, 0,
- &rtex->resource.b.b, i, &box);
+ sctx->dma_copy(&sctx->b, &new_tex->buffer.b.b, i, 0, 0, 0,
+ &rtex->buffer.b.b, i, &box);
}
}
@@ -549,15 +549,15 @@ static void si_reallocate_texture_inplace(struct si_context *sctx,
}
/* Replace the structure fields of rtex. */
- rtex->resource.b.b.bind = templ.bind;
- pb_reference(&rtex->resource.buf, new_tex->resource.buf);
- rtex->resource.gpu_address = new_tex->resource.gpu_address;
- rtex->resource.vram_usage = new_tex->resource.vram_usage;
- rtex->resource.gart_usage = new_tex->resource.gart_usage;
- rtex->resource.bo_size = new_tex->resource.bo_size;
- rtex->resource.bo_alignment = new_tex->resource.bo_alignment;
- rtex->resource.domains = new_tex->resource.domains;
- rtex->resource.flags = new_tex->resource.flags;
+ rtex->buffer.b.b.bind = templ.bind;
+ pb_reference(&rtex->buffer.buf, new_tex->buffer.buf);
+ rtex->buffer.gpu_address = new_tex->buffer.gpu_address;
+ rtex->buffer.vram_usage = new_tex->buffer.vram_usage;
+ rtex->buffer.gart_usage = new_tex->buffer.gart_usage;
+ rtex->buffer.bo_size = new_tex->buffer.bo_size;
+ rtex->buffer.bo_alignment = new_tex->buffer.bo_alignment;
+ rtex->buffer.domains = new_tex->buffer.domains;
+ rtex->buffer.flags = new_tex->buffer.flags;
rtex->size = new_tex->size;
rtex->db_render_format = new_tex->db_render_format;
rtex->db_compatible = new_tex->db_compatible;
@@ -597,7 +597,7 @@ static void si_query_opaque_metadata(struct si_screen *sscreen,
struct r600_texture *rtex,
struct radeon_bo_metadata *md)
{
- struct pipe_resource *res = &rtex->resource.b.b;
+ struct pipe_resource *res = &rtex->buffer.b.b;
static const unsigned char swizzle[] = {
PIPE_SWIZZLE_X,
PIPE_SWIZZLE_Y,
@@ -711,7 +711,7 @@ static boolean si_texture_get_handle(struct pipe_screen* screen,
/* Move a suballocated texture into a non-suballocated allocation. */
if (sscreen->ws->buffer_is_suballocated(res->buf) ||
rtex->surface.tile_swizzle ||
- (rtex->resource.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
+ (rtex->buffer.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
sscreen->info.has_local_buffers &&
whandle->type != DRM_API_HANDLE_TYPE_KMS)) {
assert(!res->b.is_shared);
@@ -774,7 +774,7 @@ static boolean si_texture_get_handle(struct pipe_screen* screen,
/* Move a suballocated buffer into a non-suballocated allocation. */
if (sscreen->ws->buffer_is_suballocated(res->buf) ||
/* A DMABUF export always fails if the BO is local. */
- (rtex->resource.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
+ (rtex->buffer.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
sscreen->info.has_local_buffers)) {
assert(!res->b.is_shared);
@@ -830,11 +830,11 @@ static void si_texture_destroy(struct pipe_screen *screen,
struct pipe_resource *ptex)
{
struct r600_texture *rtex = (struct r600_texture*)ptex;
- struct r600_resource *resource = &rtex->resource;
+ struct r600_resource *resource = &rtex->buffer;
r600_texture_reference(&rtex->flushed_depth_texture, NULL);
- if (rtex->cmask_buffer != &rtex->resource) {
+ if (rtex->cmask_buffer != &rtex->buffer) {
r600_resource_reference(&rtex->cmask_buffer, NULL);
}
pb_reference(&resource->buf, NULL);
@@ -852,7 +852,7 @@ void si_texture_get_fmask_info(struct si_screen *sscreen,
struct r600_fmask_info *out)
{
/* FMASK is allocated like an ordinary texture. */
- struct pipe_resource templ = rtex->resource.b.b;
+ struct pipe_resource templ = rtex->buffer.b.b;
struct radeon_surf fmask = {};
unsigned flags, bpe;
@@ -905,7 +905,7 @@ static void si_texture_allocate_fmask(struct si_screen *sscreen,
struct r600_texture *rtex)
{
si_texture_get_fmask_info(sscreen, rtex,
- rtex->resource.b.b.nr_samples, &rtex->fmask);
+ rtex->buffer.b.b.nr_samples, &rtex->fmask);
rtex->fmask.offset = align64(rtex->size, rtex->fmask.alignment);
rtex->size = rtex->fmask.offset + rtex->fmask.size;
@@ -949,8 +949,8 @@ void si_texture_get_cmask_info(struct si_screen *sscreen,
unsigned base_align = num_pipes * pipe_interleave_bytes;
- unsigned width = align(rtex->resource.b.b.width0, cl_width*8);
- unsigned height = align(rtex->resource.b.b.height0, cl_height*8);
+ unsigned width = align(rtex->buffer.b.b.width0, cl_width*8);
+ unsigned height = align(rtex->buffer.b.b.height0, cl_height*8);
unsigned slice_elements = (width * height) / (8*8);
/* Each element of CMASK is a nibble. */
@@ -961,7 +961,7 @@ void si_texture_get_cmask_info(struct si_screen *sscreen,
out->slice_tile_max -= 1;
out->alignment = MAX2(256, base_align);
- out->size = util_num_layers(&rtex->resource.b.b, 0) *
+ out->size = util_num_layers(&rtex->buffer.b.b, 0) *
align(slice_bytes, base_align);
}
@@ -1029,8 +1029,8 @@ static void si_texture_get_htile_size(struct si_screen *sscreen,
return;
}
- width = align(rtex->resource.b.b.width0, cl_width * 8);
- height = align(rtex->resource.b.b.height0, cl_height * 8);
+ width = align(rtex->buffer.b.b.width0, cl_width * 8);
+ height = align(rtex->buffer.b.b.height0, cl_height * 8);
slice_elements = (width * height) / (8 * 8);
slice_bytes = slice_elements * 4;
@@ -1040,7 +1040,7 @@ static void si_texture_get_htile_size(struct si_screen *sscreen,
rtex->surface.htile_alignment = base_align;
rtex->surface.htile_size =
- util_num_layers(&rtex->resource.b.b, 0) *
+ util_num_layers(&rtex->buffer.b.b, 0) *
align(slice_bytes, base_align);
}
@@ -1066,12 +1066,12 @@ void si_print_texture_info(struct si_screen *sscreen,
u_log_printf(log, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
"blk_h=%u, array_size=%u, last_level=%u, "
"bpe=%u, nsamples=%u, flags=0x%x, %s\n",
- rtex->resource.b.b.width0, rtex->resource.b.b.height0,
- rtex->resource.b.b.depth0, rtex->surface.blk_w,
+ rtex->buffer.b.b.width0, rtex->buffer.b.b.height0,
+ rtex->buffer.b.b.depth0, rtex->surface.blk_w,
rtex->surface.blk_h,
- rtex->resource.b.b.array_size, rtex->resource.b.b.last_level,
- rtex->surface.bpe, rtex->resource.b.b.nr_samples,
- rtex->surface.flags, util_format_short_name(rtex->resource.b.b.format));
+ rtex->buffer.b.b.array_size, rtex->buffer.b.b.last_level,
+ rtex->surface.bpe, rtex->buffer.b.b.nr_samples,
+ rtex->surface.flags, util_format_short_name(rtex->buffer.b.b.format));
if (sscreen->info.chip_class >= GFX9) {
u_log_printf(log, " Surf: size=%"PRIu64", slice_size=%"PRIu64", "
@@ -1162,7 +1162,7 @@ void si_print_texture_info(struct si_screen *sscreen,
u_log_printf(log, " DCC: offset=%"PRIu64", size=%u, alignment=%u\n",
rtex->dcc_offset, rtex->surface.dcc_size,
rtex->surface.dcc_alignment);
- for (i = 0; i <= rtex->resource.b.b.last_level; i++)
+ for (i = 0; i <= rtex->buffer.b.b.last_level; i++)
u_log_printf(log, " DCCLevel[%i]: enabled=%u, offset=%u, "
"fast_clear_size=%u\n",
i, i < rtex->surface.num_dcc_levels,
@@ -1170,15 +1170,15 @@ void si_print_texture_info(struct si_screen *sscreen,
rtex->surface.u.legacy.level[i].dcc_fast_clear_size);
}
- for (i = 0; i <= rtex->resource.b.b.last_level; i++)
+ for (i = 0; i <= rtex->buffer.b.b.last_level; i++)
u_log_printf(log, " Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "
"npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
"mode=%u, tiling_index = %u\n",
i, rtex->surface.u.legacy.level[i].offset,
(uint64_t)rtex->surface.u.legacy.level[i].slice_size_dw * 4,
- u_minify(rtex->resource.b.b.width0, i),
- u_minify(rtex->resource.b.b.height0, i),
- u_minify(rtex->resource.b.b.depth0, i),
+ u_minify(rtex->buffer.b.b.width0, i),
+ u_minify(rtex->buffer.b.b.height0, i),
+ u_minify(rtex->buffer.b.b.depth0, i),
rtex->surface.u.legacy.level[i].nblk_x,
rtex->surface.u.legacy.level[i].nblk_y,
rtex->surface.u.legacy.level[i].mode,
@@ -1187,16 +1187,16 @@ void si_print_texture_info(struct si_screen *sscreen,
if (rtex->surface.has_stencil) {
u_log_printf(log, " StencilLayout: tilesplit=%u\n",
rtex->surface.u.legacy.stencil_tile_split);
- for (i = 0; i <= rtex->resource.b.b.last_level; i++) {
+ for (i = 0; i <= rtex->buffer.b.b.last_level; i++) {
u_log_printf(log, " StencilLevel[%i]: offset=%"PRIu64", "
"slice_size=%"PRIu64", npix_x=%u, "
"npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
"mode=%u, tiling_index = %u\n",
i, rtex->surface.u.legacy.stencil_level[i].offset,
(uint64_t)rtex->surface.u.legacy.stencil_level[i].slice_size_dw * 4,
- u_minify(rtex->resource.b.b.width0, i),
- u_minify(rtex->resource.b.b.height0, i),
- u_minify(rtex->resource.b.b.depth0, i),
+ u_minify(rtex->buffer.b.b.width0, i),
+ u_minify(rtex->buffer.b.b.height0, i),
+ u_minify(rtex->buffer.b.b.depth0, i),
rtex->surface.u.legacy.stencil_level[i].nblk_x,
rtex->surface.u.legacy.stencil_level[i].nblk_y,
rtex->surface.u.legacy.stencil_level[i].mode,
@@ -1220,7 +1220,7 @@ si_texture_create_object(struct pipe_screen *screen,
if (!rtex)
return NULL;
- resource = &rtex->resource;
+ resource = &rtex->buffer;
resource->b.b = *base;
resource->b.b.next = NULL;
resource->b.vtbl = &si_texture_vtbl;
@@ -1228,7 +1228,7 @@ si_texture_create_object(struct pipe_screen *screen,
resource->b.b.screen = screen;
/* don't include stencil-only formats which we don't support for rendering */
- rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));
+ rtex->is_depth = util_format_has_depth(util_format_description(rtex->buffer.b.b.format));
rtex->surface = *surface;
rtex->size = rtex->surface.surf_size;
@@ -1284,7 +1284,7 @@ si_texture_create_object(struct pipe_screen *screen,
!(sscreen->debug_flags & DBG(NO_FMASK))) {
si_texture_allocate_fmask(sscreen, rtex);
si_texture_allocate_cmask(sscreen, rtex);
- rtex->cmask_buffer = &rtex->resource;
+ rtex->cmask_buffer = &rtex->buffer;
if (!rtex->fmask.size || !rtex->cmask.size) {
FREE(rtex);
@@ -1338,7 +1338,7 @@ si_texture_create_object(struct pipe_screen *screen,
if (sscreen->info.chip_class >= GFX9 || rtex->tc_compatible_htile)
clear_value = 0x0000030F;
- si_screen_clear_buffer(sscreen, &rtex->resource.b.b,
+ si_screen_clear_buffer(sscreen, &rtex->buffer.b.b,
rtex->htile_offset,
rtex->surface.htile_size,
clear_value);
@@ -1346,7 +1346,7 @@ si_texture_create_object(struct pipe_screen *screen,
/* Initialize DCC only if the texture is not being imported. */
if (!buf && rtex->dcc_offset) {
- si_screen_clear_buffer(sscreen, &rtex->resource.b.b,
+ si_screen_clear_buffer(sscreen, &rtex->buffer.b.b,
rtex->dcc_offset,
rtex->surface.dcc_size,
0xFFFFFFFF);
@@ -1354,12 +1354,12 @@ si_texture_create_object(struct pipe_screen *screen,
/* Initialize the CMASK base register value. */
rtex->cmask.base_address_reg =
- (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
+ (rtex->buffer.gpu_address + rtex->cmask.offset) >> 8;
if (sscreen->debug_flags & DBG(VM)) {
fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
- rtex->resource.gpu_address,
- rtex->resource.gpu_address + rtex->resource.buf->size,
+ rtex->buffer.gpu_address,
+ rtex->buffer.gpu_address + rtex->buffer.buf->size,
base->width0, base->height0, util_num_layers(base, 0), base->last_level+1,
base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
}
@@ -1517,13 +1517,13 @@ static struct pipe_resource *si_texture_from_handle(struct pipe_screen *screen,
if (!rtex)
return NULL;
- rtex->resource.b.is_shared = true;
- rtex->resource.external_usage = usage;
+ rtex->buffer.b.is_shared = true;
+ rtex->buffer.external_usage = usage;
si_apply_opaque_metadata(sscreen, rtex, &metadata);
assert(rtex->surface.tile_swizzle == 0);
- return &rtex->resource.b.b;
+ return &rtex->buffer.b.b;
}
bool si_init_flushed_depth_texture(struct pipe_context *ctx,
@@ -1626,10 +1626,10 @@ static bool si_can_invalidate_texture(struct si_screen *sscreen,
unsigned transfer_usage,
const struct pipe_box *box)
{
- return !rtex->resource.b.is_shared &&
+ return !rtex->buffer.b.is_shared &&
!(transfer_usage & PIPE_TRANSFER_READ) &&
- rtex->resource.b.b.last_level == 0 &&
- util_texrange_covers_whole_level(&rtex->resource.b.b, 0,
+ rtex->buffer.b.b.last_level == 0 &&
+ util_texrange_covers_whole_level(&rtex->buffer.b.b, 0,
box->x, box->y, box->z,
box->width, box->height,
box->depth);
@@ -1645,11 +1645,11 @@ static void si_texture_invalidate_storage(struct si_context *sctx,
assert(rtex->surface.is_linear);
/* Reallocate the buffer in the same pipe_resource. */
- si_alloc_resource(sscreen, &rtex->resource);
+ si_alloc_resource(sscreen, &rtex->buffer);
/* Initialize the CMASK base address (needed even without CMASK). */
rtex->cmask.base_address_reg =
- (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
+ (rtex->buffer.gpu_address + rtex->cmask.offset) >> 8;
p_atomic_inc(&sscreen->dirty_tex_counter);
@@ -1706,12 +1706,12 @@ static void *si_texture_transfer_map(struct pipe_context *ctx,
use_staging_texture = true;
else if (usage & PIPE_TRANSFER_READ)
use_staging_texture =
- rtex->resource.domains & RADEON_DOMAIN_VRAM ||
- rtex->resource.flags & RADEON_FLAG_GTT_WC;
+ rtex->buffer.domains & RADEON_DOMAIN_VRAM ||
+ rtex->buffer.flags & RADEON_FLAG_GTT_WC;
/* Write & linear only: */
- else if (si_rings_is_buffer_referenced(sctx, rtex->resource.buf,
+ else if (si_rings_is_buffer_referenced(sctx, rtex->buffer.buf,
RADEON_USAGE_READWRITE) ||
- !sctx->ws->buffer_wait(rtex->resource.buf, 0,
+ !sctx->ws->buffer_wait(rtex->buffer.buf, 0,
RADEON_USAGE_READWRITE)) {
/* It's busy. */
if (si_can_invalidate_texture(sctx->screen, rtex,
@@ -1733,7 +1733,7 @@ static void *si_texture_transfer_map(struct pipe_context *ctx,
if (rtex->is_depth) {
struct r600_texture *staging_depth;
- if (rtex->resource.b.b.nr_samples > 1) {
+ if (rtex->buffer.b.b.nr_samples > 1) {
/* MSAA depth buffers need to be converted to single sample buffers.
*
* Mapping MSAA depth buffers can occur if ReadPixels is called
@@ -1789,7 +1789,7 @@ static void *si_texture_transfer_map(struct pipe_context *ctx,
&trans->b.b.layer_stride);
}
- trans->staging = &staging_depth->resource;
+ trans->staging = &staging_depth->buffer;
buf = trans->staging;
} else if (use_staging_texture) {
struct pipe_resource resource;
@@ -1806,7 +1806,7 @@ static void *si_texture_transfer_map(struct pipe_context *ctx,
PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
goto fail_trans;
}
- trans->staging = &staging->resource;
+ trans->staging = &staging->buffer;
/* Just get the strides. */
si_texture_get_offset(sctx->screen, staging, 0, NULL,
@@ -1824,7 +1824,7 @@ static void *si_texture_transfer_map(struct pipe_context *ctx,
offset = si_texture_get_offset(sctx->screen, rtex, level, box,
&trans->b.b.stride,
&trans->b.b.layer_stride);
- buf = &rtex->resource;
+ buf = &rtex->buffer;
}
if (!(map = si_buffer_map_sync_with_rings(sctx, buf, usage)))
@@ -1849,7 +1849,7 @@ static void si_texture_transfer_unmap(struct pipe_context *ctx,
struct r600_texture *rtex = (struct r600_texture*)texture;
if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
- if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
+ if (rtex->is_depth && rtex->buffer.b.b.nr_samples <= 1) {
ctx->resource_copy_region(ctx, texture, transfer->level,
transfer->box.x, transfer->box.y, transfer->box.z,
&rtransfer->staging->b.b, transfer->level,
@@ -2147,7 +2147,7 @@ static unsigned vi_get_context_dcc_stats_index(struct si_context *sctx,
/* Remove zombie textures (textures kept alive by this array only). */
for (i = 0; i < ARRAY_SIZE(sctx->dcc_stats); i++)
if (sctx->dcc_stats[i].tex &&
- sctx->dcc_stats[i].tex->resource.b.b.reference.count == 1)
+ sctx->dcc_stats[i].tex->buffer.b.b.reference.count == 1)
vi_dcc_clean_up_context_slot(sctx, i);
/* Find the texture. */
@@ -2242,10 +2242,10 @@ void vi_separate_dcc_try_enable(struct si_context *sctx,
/* The intent is to use this with shared displayable back buffers,
* but it's not strictly limited only to them.
*/
- if (!tex->resource.b.is_shared ||
- !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) ||
- tex->resource.b.b.target != PIPE_TEXTURE_2D ||
- tex->resource.b.b.last_level > 0 ||
+ if (!tex->buffer.b.is_shared ||
+ !(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) ||
+ tex->buffer.b.b.target != PIPE_TEXTURE_2D ||
+ tex->buffer.b.b.last_level > 0 ||
!tex->surface.dcc_size)
return;
@@ -2317,7 +2317,7 @@ void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
/* Compute the approximate number of fullscreen draws. */
tex->ps_draw_ratio =
result.pipeline_statistics.ps_invocations /
- (tex->resource.b.b.width0 * tex->resource.b.b.height0);
+ (tex->buffer.b.b.width0 * tex->buffer.b.b.height0);
sctx->last_tex_ps_draw_ratio = tex->ps_draw_ratio;
disable = tex->dcc_separate_buffer &&
@@ -2454,12 +2454,12 @@ si_texture_from_memobj(struct pipe_screen *screen,
*/
pb_reference(&buf, memobj->buf);
- rtex->resource.b.is_shared = true;
- rtex->resource.external_usage = PIPE_HANDLE_USAGE_READ_WRITE;
+ rtex->buffer.b.is_shared = true;
+ rtex->buffer.external_usage = PIPE_HANDLE_USAGE_READ_WRITE;
si_apply_opaque_metadata(sscreen, rtex, &metadata);
- return &rtex->resource.b.b;
+ return &rtex->buffer.b.b;
}
static bool si_check_resource_capability(struct pipe_screen *screen,
diff --git a/src/gallium/drivers/radeonsi/si_uvd.c b/src/gallium/drivers/radeonsi/si_uvd.c
index 4165725b0e9..ee8ed58b401 100644
--- a/src/gallium/drivers/radeonsi/si_uvd.c
+++ b/src/gallium/drivers/radeonsi/si_uvd.c
@@ -84,7 +84,7 @@ struct pipe_video_buffer *si_video_buffer_create(struct pipe_context *pipe,
continue;
surfaces[i] = & resources[i]->surface;
- pbs[i] = &resources[i]->resource.buf;
+ pbs[i] = &resources[i]->buffer.buf;
}
si_vid_join_surfaces(ctx, pbs, surfaces);
@@ -94,8 +94,8 @@ struct pipe_video_buffer *si_video_buffer_create(struct pipe_context *pipe,
continue;
/* reset the address */
- resources[i]->resource.gpu_address = ctx->ws->buffer_get_virtual_address(
- resources[i]->resource.buf);
+ resources[i]->buffer.gpu_address = ctx->ws->buffer_get_virtual_address(
+ resources[i]->buffer.buf);
}
vidtemplate.height *= array_size;
@@ -122,7 +122,7 @@ static struct pb_buffer* si_uvd_set_dtb(struct ruvd_msg *msg, struct vl_video_bu
si_uvd_set_dt_surfaces(msg, &luma->surface, (chroma) ? &chroma->surface : NULL, type);
- return luma->resource.buf;
+ return luma->buffer.buf;
}
/* get the radeon resources for VCE */
@@ -133,7 +133,7 @@ static void si_vce_get_buffer(struct pipe_resource *resource,
struct r600_texture *res = (struct r600_texture *)resource;
if (handle)
- *handle = res->resource.buf;
+ *handle = res->buffer.buf;
if (surface)
*surface = &res->surface;