aboutsummaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/r600/r600_state_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/drivers/r600/r600_state_common.c')
-rw-r--r--src/gallium/drivers/r600/r600_state_common.c87
1 files changed, 33 insertions, 54 deletions
diff --git a/src/gallium/drivers/r600/r600_state_common.c b/src/gallium/drivers/r600/r600_state_common.c
index ee6fd2667b3..99ec5e7f7dc 100644
--- a/src/gallium/drivers/r600/r600_state_common.c
+++ b/src/gallium/drivers/r600/r600_state_common.c
@@ -523,20 +523,6 @@ static void r600_delete_vertex_elements(struct pipe_context *ctx, void *state)
FREE(shader);
}
-static void r600_set_index_buffer(struct pipe_context *ctx,
- const struct pipe_index_buffer *ib)
-{
- struct r600_context *rctx = (struct r600_context *)ctx;
-
- if (ib) {
- pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer);
- memcpy(&rctx->index_buffer, ib, sizeof(*ib));
- r600_context_add_resource_size(ctx, ib->buffer);
- } else {
- pipe_resource_reference(&rctx->index_buffer.buffer, NULL);
- }
-}
-
void r600_vertex_buffers_dirty(struct r600_context *rctx)
{
if (rctx->vertex_buffer_state.dirty_mask) {
@@ -1702,14 +1688,16 @@ static inline void r600_emit_rasterizer_prim_state(struct r600_context *rctx)
static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- struct pipe_index_buffer ib = {};
+ struct pipe_resource *indexbuf = info->has_user_indices ? NULL : info->index.resource;
struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off;
+ bool has_user_indices = info->has_user_indices;
uint64_t mask;
- unsigned num_patches, dirty_tex_counter;
+ unsigned num_patches, dirty_tex_counter, index_offset = 0;
+ unsigned index_size = info->index_size;
int index_bias;
- if (!info->indirect && !info->count && (info->indexed || !info->count_from_stream_output)) {
+ if (!info->indirect && !info->count && (index_size || !info->count_from_stream_output)) {
return;
}
@@ -1747,18 +1735,11 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
: (rctx->tes_shader)? rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE]
: info->mode;
- if (info->indexed) {
- /* Initialize the index buffer struct. */
- pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
- ib.user_buffer = rctx->index_buffer.user_buffer;
- ib.index_size = rctx->index_buffer.index_size;
- ib.offset = rctx->index_buffer.offset;
- if (!info->indirect) {
- ib.offset += info->start * ib.index_size;
- }
+ if (index_size) {
+ index_offset += info->start * index_size;
/* Translate 8-bit indices to 16-bit. */
- if (unlikely(ib.index_size == 1)) {
+ if (unlikely(index_size == 1)) {
struct pipe_resource *out_buffer = NULL;
unsigned out_offset;
void *ptr;
@@ -1775,7 +1756,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
PIPE_TRANSFER_READ);
if (data) {
data += info->indirect->offset / sizeof(unsigned);
- start = data[2] * ib.index_size;
+ start = data[2] * index_size;
count = data[0];
}
else {
@@ -1786,19 +1767,16 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
u_upload_alloc(ctx->stream_uploader, start, count * 2,
256, &out_offset, &out_buffer, &ptr);
- if (unlikely(!ptr)) {
- pipe_resource_reference(&ib.buffer, NULL);
+ if (unlikely(!ptr))
return;
- }
util_shorten_ubyte_elts_to_userptr(
- &rctx->b.b, &ib, 0, 0, ib.offset + start, count, ptr);
+ &rctx->b.b, info, 0, 0, index_offset, count, ptr);
- pipe_resource_reference(&ib.buffer, NULL);
- ib.user_buffer = NULL;
- ib.buffer = out_buffer;
- ib.offset = out_offset;
- ib.index_size = 2;
+ indexbuf = out_buffer;
+ index_offset = out_offset;
+ index_size = 2;
+ has_user_indices = false;
}
/* Upload the index buffer.
@@ -1806,13 +1784,14 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
* and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
* Indirect draws never use immediate indices.
* Note: Instanced rendering in combination with immediate indices hangs. */
- if (ib.user_buffer && (R600_BIG_ENDIAN || info->indirect ||
+ if (has_user_indices && (R600_BIG_ENDIAN || info->indirect ||
info->instance_count > 1 ||
- info->count*ib.index_size > 20)) {
+ info->count*index_size > 20)) {
+ indexbuf = NULL;
u_upload_data(ctx->stream_uploader, 0,
- info->count * ib.index_size, 256,
- ib.user_buffer, &ib.offset, &ib.buffer);
- ib.user_buffer = NULL;
+ info->count * index_size, 256,
+ info->index.user, &index_offset, &indexbuf);
+ has_user_indices = false;
}
index_bias = info->index_bias;
} else {
@@ -1840,7 +1819,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
evergreen_setup_tess_constants(rctx, info, &num_patches);
/* Emit states. */
- r600_need_cs_space(rctx, ib.user_buffer ? 5 : 0, TRUE);
+ r600_need_cs_space(rctx, has_user_indices ? 5 : 0, TRUE);
r600_flush_emit(rctx);
mask = rctx->dirty_atoms;
@@ -1937,21 +1916,21 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
RADEON_PRIO_DRAW_INDIRECT));
}
- if (info->indexed) {
+ if (index_size) {
radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
- radeon_emit(cs, ib.index_size == 4 ?
+ radeon_emit(cs, index_size == 4 ?
(VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
(VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0)));
- if (ib.user_buffer) {
- unsigned size_bytes = info->count*ib.index_size;
+ if (has_user_indices) {
+ unsigned size_bytes = info->count*index_size;
unsigned size_dw = align(size_bytes, 4) / 4;
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, render_cond_bit));
radeon_emit(cs, info->count);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_IMMEDIATE);
- radeon_emit_array(cs, ib.user_buffer, size_dw);
+ radeon_emit_array(cs, info->index.user, size_dw);
} else {
- uint64_t va = r600_resource(ib.buffer)->gpu_address + ib.offset;
+ uint64_t va = r600_resource(indexbuf)->gpu_address + index_offset;
if (likely(!info->indirect)) {
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX, 3, render_cond_bit));
@@ -1961,12 +1940,12 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
- (struct r600_resource*)ib.buffer,
+ (struct r600_resource*)indexbuf,
RADEON_USAGE_READ,
RADEON_PRIO_INDEX_BUFFER));
}
else {
- uint32_t max_size = (ib.buffer->width0 - ib.offset) / ib.index_size;
+ uint32_t max_size = (indexbuf->width0 - index_offset) / index_size;
radeon_emit(cs, PKT3(EG_PKT3_INDEX_BASE, 1, 0));
radeon_emit(cs, va);
@@ -1974,7 +1953,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
- (struct r600_resource*)ib.buffer,
+ (struct r600_resource*)indexbuf,
RADEON_USAGE_READ,
RADEON_PRIO_INDEX_BUFFER));
@@ -2064,7 +2043,8 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
rctx->framebuffer.do_update_surf_dirtiness = false;
}
- pipe_resource_reference(&ib.buffer, NULL);
+ if (index_size && indexbuf != info->index.resource)
+ pipe_resource_reference(&indexbuf, NULL);
rctx->b.num_draw_calls++;
}
@@ -2971,7 +2951,6 @@ void r600_init_common_state_functions(struct r600_context *rctx)
rctx->b.b.set_sample_mask = r600_set_sample_mask;
rctx->b.b.set_stencil_ref = r600_set_pipe_stencil_ref;
rctx->b.b.set_vertex_buffers = r600_set_vertex_buffers;
- rctx->b.b.set_index_buffer = r600_set_index_buffer;
rctx->b.b.set_sampler_views = r600_set_sampler_views;
rctx->b.b.sampler_view_destroy = r600_sampler_view_destroy;
rctx->b.b.texture_barrier = r600_texture_barrier;