diff options
author | Marek Olšák <[email protected]> | 2017-09-13 02:26:26 +0200 |
---|---|---|
committer | Marek Olšák <[email protected]> | 2017-09-26 04:21:14 +0200 |
commit | 06bfb2d28f7adca7edc6be9c210a7a3583023652 (patch) | |
tree | a969d20fe1d2a75eb03facabe4c5fb83948178f0 /src/gallium/drivers/radeon | |
parent | e1623da8185ee5d167cd331fb645e6a83961285a (diff) |
r600: fork and import gallium/radeon
This marks the end of code sharing between r600 and radeonsi.
It's getting difficult to work on radeonsi without breaking r600.
A lot of functions had to be renamed to prevent linker conflicts.
There are also minor cleanups.
Acked-by: Dave Airlie <[email protected]>
Reviewed-by: Nicolai Hähnle <[email protected]>
Diffstat (limited to 'src/gallium/drivers/radeon')
22 files changed, 646 insertions, 879 deletions
diff --git a/src/gallium/drivers/radeon/cayman_msaa.c b/src/gallium/drivers/radeon/cayman_msaa.c index 33f1040185a..4649d2cb8ae 100644 --- a/src/gallium/drivers/radeon/cayman_msaa.c +++ b/src/gallium/drivers/radeon/cayman_msaa.c @@ -28,22 +28,22 @@ /* 2xMSAA * There are two locations (4, 4), (-4, -4). */ -const uint32_t eg_sample_locs_2x[4] = { +static const uint32_t eg_sample_locs_2x[4] = { FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4), FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4), FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4), FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4), }; -const unsigned eg_max_dist_2x = 4; +static const unsigned eg_max_dist_2x = 4; /* 4xMSAA * There are 4 locations: (-2, 6), (6, -2), (-6, 2), (2, 6). */ -const uint32_t eg_sample_locs_4x[4] = { +static const uint32_t eg_sample_locs_4x[4] = { FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6), FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6), FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6), FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6), }; -const unsigned eg_max_dist_4x = 6; +static const unsigned eg_max_dist_4x = 6; /* Cayman 8xMSAA */ static const uint32_t cm_sample_locs_8x[] = { @@ -78,7 +78,7 @@ static const uint32_t cm_sample_locs_16x[] = { }; static const unsigned cm_max_dist_16x = 8; -void cayman_get_sample_position(struct pipe_context *ctx, unsigned sample_count, +void si_get_sample_position(struct pipe_context *ctx, unsigned sample_count, unsigned sample_index, float *out_value) { int offset, index; @@ -123,24 +123,24 @@ void cayman_get_sample_position(struct pipe_context *ctx, unsigned sample_count, } } -void cayman_init_msaa(struct pipe_context *ctx) +void si_init_msaa(struct pipe_context *ctx) { struct r600_common_context *rctx = (struct r600_common_context*)ctx; int i; - cayman_get_sample_position(ctx, 1, 0, rctx->sample_locations_1x[0]); + si_get_sample_position(ctx, 1, 0, rctx->sample_locations_1x[0]); for (i = 0; i < 2; i++) - cayman_get_sample_position(ctx, 2, i, rctx->sample_locations_2x[i]); + si_get_sample_position(ctx, 2, i, rctx->sample_locations_2x[i]); for (i = 0; i < 4; i++) - cayman_get_sample_position(ctx, 4, i, rctx->sample_locations_4x[i]); + si_get_sample_position(ctx, 4, i, rctx->sample_locations_4x[i]); for (i = 0; i < 8; i++) - cayman_get_sample_position(ctx, 8, i, rctx->sample_locations_8x[i]); + si_get_sample_position(ctx, 8, i, rctx->sample_locations_8x[i]); for (i = 0; i < 16; i++) - cayman_get_sample_position(ctx, 16, i, rctx->sample_locations_16x[i]); + si_get_sample_position(ctx, 16, i, rctx->sample_locations_16x[i]); } -void cayman_emit_msaa_sample_locs(struct radeon_winsys_cs *cs, int nr_samples) +void si_common_emit_msaa_sample_locs(struct radeon_winsys_cs *cs, int nr_samples) { switch (nr_samples) { default: @@ -201,9 +201,9 @@ void cayman_emit_msaa_sample_locs(struct radeon_winsys_cs *cs, int nr_samples) } } -void cayman_emit_msaa_config(struct radeon_winsys_cs *cs, int nr_samples, - int ps_iter_samples, int overrast_samples, - unsigned sc_mode_cntl_1) +void si_common_emit_msaa_config(struct radeon_winsys_cs *cs, int nr_samples, + int ps_iter_samples, int overrast_samples, + unsigned sc_mode_cntl_1) { int setup_samples = nr_samples > 1 ? nr_samples : overrast_samples > 1 ? overrast_samples : 0; diff --git a/src/gallium/drivers/radeon/r600_buffer_common.c b/src/gallium/drivers/radeon/r600_buffer_common.c index 7515f7d615b..706c7485c35 100644 --- a/src/gallium/drivers/radeon/r600_buffer_common.c +++ b/src/gallium/drivers/radeon/r600_buffer_common.c @@ -30,9 +30,9 @@ #include <inttypes.h> #include <stdio.h> -bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx, - struct pb_buffer *buf, - enum radeon_bo_usage usage) +bool si_rings_is_buffer_referenced(struct r600_common_context *ctx, + struct pb_buffer *buf, + enum radeon_bo_usage usage) { if (ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, buf, usage)) { return true; @@ -44,9 +44,9 @@ bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx, return false; } -void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx, - struct r600_resource *resource, - unsigned usage) +void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx, + struct r600_resource *resource, + unsigned usage) { enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE; bool busy = false; @@ -101,9 +101,9 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx, return ctx->ws->buffer_map(resource->buf, NULL, usage); } -void r600_init_resource_fields(struct r600_common_screen *rscreen, - struct r600_resource *res, - uint64_t size, unsigned alignment) +void si_init_resource_fields(struct r600_common_screen *rscreen, + struct r600_resource *res, + uint64_t size, unsigned alignment) { struct r600_texture *rtex = (struct r600_texture*)res; @@ -205,8 +205,8 @@ void r600_init_resource_fields(struct r600_common_screen *rscreen, res->gart_usage = size; } -bool r600_alloc_resource(struct r600_common_screen *rscreen, - struct r600_resource *res) +bool si_alloc_resource(struct r600_common_screen *rscreen, + struct r600_resource *res) { struct pb_buffer *old_buf, *new_buf; @@ -274,7 +274,7 @@ r600_invalidate_buffer(struct r600_common_context *rctx, return false; /* Check if mapping this buffer would cause waiting for the GPU. */ - if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) || + if (si_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) || !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) { rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b); } else { @@ -285,7 +285,7 @@ r600_invalidate_buffer(struct r600_common_context *rctx, } /* Replace the storage of dst with src. */ -void r600_replace_buffer_storage(struct pipe_context *ctx, +void si_replace_buffer_storage(struct pipe_context *ctx, struct pipe_resource *dst, struct pipe_resource *src) { @@ -308,8 +308,8 @@ void r600_replace_buffer_storage(struct pipe_context *ctx, rctx->rebind_buffer(ctx, dst, old_gpu_address); } -void r600_invalidate_resource(struct pipe_context *ctx, - struct pipe_resource *resource) +void si_invalidate_resource(struct pipe_context *ctx, + struct pipe_resource *resource) { struct r600_common_context *rctx = (struct r600_common_context*)ctx; struct r600_resource *rbuffer = r600_resource(resource); @@ -429,7 +429,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx, /* Check if mapping this buffer would cause waiting for the GPU. */ if (rbuffer->flags & RADEON_FLAG_SPARSE || - r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) || + si_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) || !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) { /* Do a wait-free write-only transfer using a temporary buffer. */ unsigned offset; @@ -472,7 +472,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx, box->x % R600_MAP_BUFFER_ALIGNMENT, 0, 0, resource, 0, box); - data = r600_buffer_map_sync_with_rings(rctx, staging, + data = si_buffer_map_sync_with_rings(rctx, staging, usage & ~PIPE_TRANSFER_UNSYNCHRONIZED); if (!data) { r600_resource_reference(&staging, NULL); @@ -487,7 +487,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx, } } - data = r600_buffer_map_sync_with_rings(rctx, rbuffer, usage); + data = si_buffer_map_sync_with_rings(rctx, rbuffer, usage); if (!data) { return NULL; } @@ -557,10 +557,10 @@ static void r600_buffer_transfer_unmap(struct pipe_context *ctx, slab_free(&rctx->pool_transfers, transfer); } -void r600_buffer_subdata(struct pipe_context *ctx, - struct pipe_resource *buffer, - unsigned usage, unsigned offset, - unsigned size, const void *data) +void si_buffer_subdata(struct pipe_context *ctx, + struct pipe_resource *buffer, + unsigned usage, unsigned offset, + unsigned size, const void *data) { struct pipe_transfer *transfer = NULL; struct pipe_box box; @@ -611,30 +611,30 @@ r600_alloc_buffer_struct(struct pipe_screen *screen, return rbuffer; } -struct pipe_resource *r600_buffer_create(struct pipe_screen *screen, - const struct pipe_resource *templ, - unsigned alignment) +struct pipe_resource *si_buffer_create(struct pipe_screen *screen, + const struct pipe_resource *templ, + unsigned alignment) { struct r600_common_screen *rscreen = (struct r600_common_screen*)screen; struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ); - r600_init_resource_fields(rscreen, rbuffer, templ->width0, alignment); + si_init_resource_fields(rscreen, rbuffer, templ->width0, alignment); if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE) rbuffer->flags |= RADEON_FLAG_SPARSE; - if (!r600_alloc_resource(rscreen, rbuffer)) { + if (!si_alloc_resource(rscreen, rbuffer)) { FREE(rbuffer); return NULL; } return &rbuffer->b.b; } -struct pipe_resource *r600_aligned_buffer_create(struct pipe_screen *screen, - unsigned flags, - unsigned usage, - unsigned size, - unsigned alignment) +struct pipe_resource *si_aligned_buffer_create(struct pipe_screen *screen, + unsigned flags, + unsigned usage, + unsigned size, + unsigned alignment) { struct pipe_resource buffer; @@ -648,13 +648,13 @@ struct pipe_resource *r600_aligned_buffer_create(struct pipe_screen *screen, buffer.height0 = 1; buffer.depth0 = 1; buffer.array_size = 1; - return r600_buffer_create(screen, &buffer, alignment); + return si_buffer_create(screen, &buffer, alignment); } struct pipe_resource * -r600_buffer_from_user_memory(struct pipe_screen *screen, - const struct pipe_resource *templ, - void *user_memory) +si_buffer_from_user_memory(struct pipe_screen *screen, + const struct pipe_resource *templ, + void *user_memory) { struct r600_common_screen *rscreen = (struct r600_common_screen*)screen; struct radeon_winsys *ws = rscreen->ws; diff --git a/src/gallium/drivers/radeon/r600_gpu_load.c b/src/gallium/drivers/radeon/r600_gpu_load.c index d35be4f327a..625370b8ea4 100644 --- a/src/gallium/drivers/radeon/r600_gpu_load.c +++ b/src/gallium/drivers/radeon/r600_gpu_load.c @@ -162,7 +162,7 @@ r600_gpu_load_thread(void *param) return 0; } -void r600_gpu_load_kill_thread(struct r600_common_screen *rscreen) +void si_gpu_load_kill_thread(struct r600_common_screen *rscreen) { if (!rscreen->gpu_load_thread) return; @@ -269,14 +269,14 @@ static unsigned busy_index_from_type(struct r600_common_screen *rscreen, } } -uint64_t r600_begin_counter(struct r600_common_screen *rscreen, unsigned type) +uint64_t si_begin_counter(struct r600_common_screen *rscreen, unsigned type) { unsigned busy_index = busy_index_from_type(rscreen, type); return r600_read_mmio_counter(rscreen, busy_index); } -unsigned r600_end_counter(struct r600_common_screen *rscreen, unsigned type, - uint64_t begin) +unsigned si_end_counter(struct r600_common_screen *rscreen, unsigned type, + uint64_t begin) { unsigned busy_index = busy_index_from_type(rscreen, type); return r600_end_mmio_counter(rscreen, begin, busy_index); diff --git a/src/gallium/drivers/radeon/r600_perfcounter.c b/src/gallium/drivers/radeon/r600_perfcounter.c index 48f609bcb41..13fd1e99e59 100644 --- a/src/gallium/drivers/radeon/r600_perfcounter.c +++ b/src/gallium/drivers/radeon/r600_perfcounter.c @@ -112,7 +112,7 @@ static void r600_pc_query_destroy(struct r600_common_screen *rscreen, FREE(query->counters); - r600_query_hw_destroy(rscreen, rquery); + si_query_hw_destroy(rscreen, rquery); } static bool r600_pc_query_prepare_buffer(struct r600_common_screen *screen, @@ -217,9 +217,9 @@ static void r600_pc_query_add_result(struct r600_common_screen *rscreen, static struct r600_query_ops batch_query_ops = { .destroy = r600_pc_query_destroy, - .begin = r600_query_hw_begin, - .end = r600_query_hw_end, - .get_result = r600_query_hw_get_result + .begin = si_query_hw_begin, + .end = si_query_hw_end, + .get_result = si_query_hw_get_result }; static struct r600_query_hw_ops batch_query_hw_ops = { @@ -297,9 +297,9 @@ static struct r600_pc_group *get_group_state(struct r600_common_screen *screen, return group; } -struct pipe_query *r600_create_batch_query(struct pipe_context *ctx, - unsigned num_queries, - unsigned *query_types) +struct pipe_query *si_create_batch_query(struct pipe_context *ctx, + unsigned num_queries, + unsigned *query_types) { struct r600_common_screen *screen = (struct r600_common_screen *)ctx->screen; @@ -417,7 +417,7 @@ struct pipe_query *r600_create_batch_query(struct pipe_context *ctx, counter->qwords *= block->num_instances; } - if (!r600_query_hw_init(screen, &query->b)) + if (!si_query_hw_init(screen, &query->b)) goto error; return (struct pipe_query *)query; @@ -511,9 +511,9 @@ static bool r600_init_block_names(struct r600_common_screen *screen, return true; } -int r600_get_perfcounter_info(struct r600_common_screen *screen, - unsigned index, - struct pipe_driver_query_info *info) +int si_get_perfcounter_info(struct r600_common_screen *screen, + unsigned index, + struct pipe_driver_query_info *info) { struct r600_perfcounters *pc = screen->perfcounters; struct r600_perfcounter_block *block; @@ -553,9 +553,9 @@ int r600_get_perfcounter_info(struct r600_common_screen *screen, return 1; } -int r600_get_perfcounter_group_info(struct r600_common_screen *screen, - unsigned index, - struct pipe_driver_query_group_info *info) +int si_get_perfcounter_group_info(struct r600_common_screen *screen, + unsigned index, + struct pipe_driver_query_group_info *info) { struct r600_perfcounters *pc = screen->perfcounters; struct r600_perfcounter_block *block; @@ -580,13 +580,13 @@ int r600_get_perfcounter_group_info(struct r600_common_screen *screen, return 1; } -void r600_perfcounters_destroy(struct r600_common_screen *rscreen) +void si_perfcounters_destroy(struct r600_common_screen *rscreen) { if (rscreen->perfcounters) rscreen->perfcounters->cleanup(rscreen); } -bool r600_perfcounters_init(struct r600_perfcounters *pc, +bool si_perfcounters_init(struct r600_perfcounters *pc, unsigned num_blocks) { pc->blocks = CALLOC(num_blocks, sizeof(struct r600_perfcounter_block)); @@ -599,11 +599,11 @@ bool r600_perfcounters_init(struct r600_perfcounters *pc, return true; } -void r600_perfcounters_add_block(struct r600_common_screen *rscreen, - struct r600_perfcounters *pc, - const char *name, unsigned flags, - unsigned counters, unsigned selectors, - unsigned instances, void *data) +void si_perfcounters_add_block(struct r600_common_screen *rscreen, + struct r600_perfcounters *pc, + const char *name, unsigned flags, + unsigned counters, unsigned selectors, + unsigned instances, void *data) { struct r600_perfcounter_block *block = &pc->blocks[pc->num_blocks]; @@ -636,7 +636,7 @@ void r600_perfcounters_add_block(struct r600_common_screen *rscreen, pc->num_groups += block->num_groups; } -void r600_perfcounters_do_destroy(struct r600_perfcounters *pc) +void si_perfcounters_do_destroy(struct r600_perfcounters *pc) { unsigned i; diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c b/src/gallium/drivers/radeon/r600_pipe_common.c index 59fcb63fb7a..62bd5f6a98f 100644 --- a/src/gallium/drivers/radeon/r600_pipe_common.c +++ b/src/gallium/drivers/radeon/r600_pipe_common.c @@ -39,17 +39,8 @@ #include <inttypes.h> #include <sys/utsname.h> -#ifndef HAVE_LLVM -#define HAVE_LLVM 0 -#endif - -#if HAVE_LLVM #include <llvm-c/TargetMachine.h> -#endif -#ifndef MESA_LLVM_VERSION_PATCH -#define MESA_LLVM_VERSION_PATCH 0 -#endif struct r600_multi_fence { struct pipe_reference reference; @@ -66,12 +57,12 @@ struct r600_multi_fence { /* * shader binary helpers. */ -void radeon_shader_binary_init(struct ac_shader_binary *b) +void si_radeon_shader_binary_init(struct ac_shader_binary *b) { memset(b, 0, sizeof(*b)); } -void radeon_shader_binary_clean(struct ac_shader_binary *b) +void si_radeon_shader_binary_clean(struct ac_shader_binary *b) { if (!b) return; @@ -99,11 +90,11 @@ void radeon_shader_binary_clean(struct ac_shader_binary *b) * \param old_value Previous fence value (for a bug workaround) * \param new_value Fence value to write for this event. */ -void r600_gfx_write_event_eop(struct r600_common_context *ctx, - unsigned event, unsigned event_flags, - unsigned data_sel, - struct r600_resource *buf, uint64_t va, - uint32_t new_fence, unsigned query_type) +void si_gfx_write_event_eop(struct r600_common_context *ctx, + unsigned event, unsigned event_flags, + unsigned data_sel, + struct r600_resource *buf, uint64_t va, + uint32_t new_fence, unsigned query_type) { struct radeon_winsys_cs *cs = ctx->gfx.cs; unsigned op = EVENT_TYPE(event) | @@ -183,7 +174,7 @@ void r600_gfx_write_event_eop(struct r600_common_context *ctx, RADEON_PRIO_QUERY); } -unsigned r600_gfx_write_fence_dwords(struct r600_common_screen *screen) +unsigned si_gfx_write_fence_dwords(struct r600_common_screen *screen) { unsigned dwords = 6; @@ -197,8 +188,8 @@ unsigned r600_gfx_write_fence_dwords(struct r600_common_screen *screen) return dwords; } -void r600_gfx_wait_fence(struct r600_common_context *ctx, - uint64_t va, uint32_t ref, uint32_t mask) +void si_gfx_wait_fence(struct r600_common_context *ctx, + uint64_t va, uint32_t ref, uint32_t mask) { struct radeon_winsys_cs *cs = ctx->gfx.cs; @@ -211,11 +202,11 @@ void r600_gfx_wait_fence(struct r600_common_context *ctx, radeon_emit(cs, 4); /* poll interval */ } -void r600_draw_rectangle(struct blitter_context *blitter, - int x1, int y1, int x2, int y2, - float depth, unsigned num_instances, - enum blitter_attrib_type type, - const union blitter_attrib *attrib) +void si_draw_rectangle(struct blitter_context *blitter, + int x1, int y1, int x2, int y2, + float depth, unsigned num_instances, + enum blitter_attrib_type type, + const union blitter_attrib *attrib) { struct r600_common_context *rctx = (struct r600_common_context*)util_blitter_get_pipe(blitter); @@ -309,8 +300,8 @@ static void r600_dma_emit_wait_idle(struct r600_common_context *rctx) } } -void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw, - struct r600_resource *dst, struct r600_resource *src) +void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw, + struct r600_resource *dst, struct r600_resource *src) { uint64_t vram = ctx->dma.cs->used_vram; uint64_t gtt = ctx->dma.cs->used_gart; @@ -387,29 +378,29 @@ static void r600_memory_barrier(struct pipe_context *ctx, unsigned flags) { } -void r600_preflush_suspend_features(struct r600_common_context *ctx) +void si_preflush_suspend_features(struct r600_common_context *ctx) { /* suspend queries */ if (!LIST_IS_EMPTY(&ctx->active_queries)) - r600_suspend_queries(ctx); + si_suspend_queries(ctx); ctx->streamout.suspended = false; if (ctx->streamout.begin_emitted) { - r600_emit_streamout_end(ctx); + si_emit_streamout_end(ctx); ctx->streamout.suspended = true; } } -void r600_postflush_resume_features(struct r600_common_context *ctx) +void si_postflush_resume_features(struct r600_common_context *ctx) { if (ctx->streamout.suspended) { ctx->streamout.append_bitmask = ctx->streamout.enabled_mask; - r600_streamout_buffers_dirty(ctx); + si_streamout_buffers_dirty(ctx); } /* resume queries */ if (!LIST_IS_EMPTY(&ctx->active_queries)) - r600_resume_queries(ctx); + si_resume_queries(ctx); } static void r600_add_fence_dependency(struct r600_common_context *rctx, @@ -542,7 +533,7 @@ static void r600_flush_dma_ring(void *ctx, unsigned flags, } if (check_vm) - radeon_save_cs(rctx->ws, cs, &saved, true); + si_save_cs(rctx->ws, cs, &saved, true); rctx->ws->cs_flush(cs, flags, &rctx->last_sdma_fence); if (fence) @@ -555,7 +546,7 @@ static void r600_flush_dma_ring(void *ctx, unsigned flags, rctx->ws->fence_wait(rctx->ws, rctx->last_sdma_fence, 800*1000*1000); rctx->check_vm_faults(rctx, &saved, RING_DMA); - radeon_clear_saved_cs(&saved); + si_clear_saved_cs(&saved); } } @@ -563,8 +554,8 @@ static void r600_flush_dma_ring(void *ctx, unsigned flags, * Store a linearized copy of all chunks of \p cs together with the buffer * list in \p saved. */ -void radeon_save_cs(struct radeon_winsys *ws, struct radeon_winsys_cs *cs, - struct radeon_saved_cs *saved, bool get_buffer_list) +void si_save_cs(struct radeon_winsys *ws, struct radeon_winsys_cs *cs, + struct radeon_saved_cs *saved, bool get_buffer_list) { uint32_t *buf; unsigned i; @@ -602,7 +593,7 @@ oom: memset(saved, 0, sizeof(*saved)); } -void radeon_clear_saved_cs(struct radeon_saved_cs *saved) +void si_clear_saved_cs(struct radeon_saved_cs *saved) { FREE(saved->ib); FREE(saved->bo_list); @@ -646,7 +637,7 @@ static void r600_set_device_reset_callback(struct pipe_context *ctx, sizeof(rctx->device_reset_callback)); } -bool r600_check_device_reset(struct r600_common_context *rctx) +bool si_check_device_reset(struct r600_common_context *rctx) { enum pipe_reset_status status; @@ -708,9 +699,9 @@ static bool r600_resource_commit(struct pipe_context *pctx, return ctx->ws->buffer_commit(res->buf, box->x, box->width, commit); } -bool r600_common_context_init(struct r600_common_context *rctx, - struct r600_common_screen *rscreen, - unsigned context_flags) +bool si_common_context_init(struct r600_common_context *rctx, + struct r600_common_screen *rscreen, + unsigned context_flags) { slab_create_child(&rctx->pool_transfers, &rscreen->pool_transfers); slab_create_child(&rctx->pool_transfers_unsync, &rscreen->pool_transfers); @@ -720,7 +711,7 @@ bool r600_common_context_init(struct r600_common_context *rctx, rctx->family = rscreen->family; rctx->chip_class = rscreen->chip_class; - rctx->b.invalidate_resource = r600_invalidate_resource; + rctx->b.invalidate_resource = si_invalidate_resource; rctx->b.resource_commit = r600_resource_commit; rctx->b.transfer_map = u_transfer_map_vtbl; rctx->b.transfer_flush_region = u_transfer_flush_region_vtbl; @@ -731,15 +722,7 @@ bool r600_common_context_init(struct r600_common_context *rctx, rctx->b.set_debug_callback = r600_set_debug_callback; rctx->b.fence_server_sync = r600_fence_server_sync; rctx->dma_clear_buffer = r600_dma_clear_buffer_fallback; - - /* evergreen_compute.c has a special codepath for global buffers. - * Everything else can use the direct path. - */ - if ((rscreen->chip_class == EVERGREEN || rscreen->chip_class == CAYMAN) && - (context_flags & PIPE_CONTEXT_COMPUTE_ONLY)) - rctx->b.buffer_subdata = u_default_buffer_subdata; - else - rctx->b.buffer_subdata = r600_buffer_subdata; + rctx->b.buffer_subdata = si_buffer_subdata; if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 43) { rctx->b.get_device_reset_status = r600_get_reset_status; @@ -750,11 +733,11 @@ bool r600_common_context_init(struct r600_common_context *rctx, rctx->b.set_device_reset_callback = r600_set_device_reset_callback; - r600_init_context_texture_functions(rctx); - r600_init_viewport_functions(rctx); - r600_streamout_init(rctx); - r600_query_init(rctx); - cayman_init_msaa(&rctx->b); + si_init_context_texture_functions(rctx); + si_init_viewport_functions(rctx); + si_streamout_init(rctx); + si_init_query_functions(rctx); + si_init_msaa(&rctx->b); if (rctx->chip_class == CIK || rctx->chip_class == VI || @@ -796,7 +779,7 @@ bool r600_common_context_init(struct r600_common_context *rctx, return true; } -void r600_common_context_cleanup(struct r600_common_context *rctx) +void si_common_context_cleanup(struct r600_common_context *rctx) { unsigned i,j; @@ -976,19 +959,14 @@ static void r600_disk_cache_create(struct r600_common_screen *rscreen) &mesa_timestamp)) { char *timestamp_str; int res = -1; - if (rscreen->chip_class < SI) { - res = asprintf(×tamp_str, "%u",mesa_timestamp); - } -#if HAVE_LLVM - else { - uint32_t llvm_timestamp; - if (disk_cache_get_function_timestamp(LLVMInitializeAMDGPUTargetInfo, - &llvm_timestamp)) { - res = asprintf(×tamp_str, "%u_%u", - mesa_timestamp, llvm_timestamp); - } + uint32_t llvm_timestamp; + + if (disk_cache_get_function_timestamp(LLVMInitializeAMDGPUTargetInfo, + &llvm_timestamp)) { + res = asprintf(×tamp_str, "%u_%u", + mesa_timestamp, llvm_timestamp); } -#endif + if (res != -1) { /* These flags affect shader compilation. */ uint64_t shader_debug_flags = @@ -1074,7 +1052,7 @@ static int r600_get_video_param(struct pipe_screen *screen, } } -const char *r600_get_llvm_processor_name(enum radeon_family family) +const char *si_get_llvm_processor_name(enum radeon_family family) { switch (family) { case CHIP_R600: @@ -1161,10 +1139,7 @@ static unsigned get_max_threads_per_block(struct r600_common_screen *screen, /* Up to 40 waves per thread-group on GCN < gfx9. Expose a nice * round number. */ - if (screen->chip_class >= SI) - return 2048; - - return 256; + return 2048; } static int r600_get_compute_param(struct pipe_screen *screen, @@ -1193,7 +1168,7 @@ static int r600_get_compute_param(struct pipe_screen *screen, * GPUs, so we need to use the name of a similar GPU. */ default: - gpu = r600_get_llvm_processor_name(rscreen->family); + gpu = si_get_llvm_processor_name(rscreen->family); break; } if (ret) { @@ -1237,9 +1212,7 @@ static int r600_get_compute_param(struct pipe_screen *screen, case PIPE_COMPUTE_CAP_ADDRESS_BITS: if (ret) { uint32_t *address_bits = ret; - address_bits[0] = 32; - if (rscreen->chip_class >= SI) - address_bits[0] = 64; + address_bits[0] = 64; } return 1 * sizeof(uint32_t); @@ -1319,8 +1292,7 @@ static int r600_get_compute_param(struct pipe_screen *screen, case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK: if (ret) { uint64_t *max_variable_threads_per_block = ret; - if (rscreen->chip_class >= SI && - ir_type == PIPE_SHADER_IR_TGSI) + if (ir_type == PIPE_SHADER_IR_TGSI) *max_variable_threads_per_block = SI_MAX_VARIABLE_THREADS_PER_BLOCK; else *max_variable_threads_per_block = 0; @@ -1444,18 +1416,18 @@ static void r600_query_memory_info(struct pipe_screen *screen, info->nr_device_memory_evictions = info->device_memory_evicted / 64; } -struct pipe_resource *r600_resource_create_common(struct pipe_screen *screen, - const struct pipe_resource *templ) +struct pipe_resource *si_resource_create_common(struct pipe_screen *screen, + const struct pipe_resource *templ) { if (templ->target == PIPE_BUFFER) { - return r600_buffer_create(screen, templ, 256); + return si_buffer_create(screen, templ, 256); } else { - return r600_texture_create(screen, templ); + return si_texture_create(screen, templ); } } -bool r600_common_screen_init(struct r600_common_screen *rscreen, - struct radeon_winsys *ws) +bool si_common_screen_init(struct r600_common_screen *rscreen, + struct radeon_winsys *ws) { char family_name[32] = {}, llvm_string[32] = {}, kernel_version[128] = {}; struct utsname uname_data; @@ -1496,19 +1468,19 @@ bool r600_common_screen_init(struct r600_common_screen *rscreen, rscreen->b.fence_finish = r600_fence_finish; rscreen->b.fence_reference = r600_fence_reference; rscreen->b.resource_destroy = u_resource_destroy_vtbl; - rscreen->b.resource_from_user_memory = r600_buffer_from_user_memory; + rscreen->b.resource_from_user_memory = si_buffer_from_user_memory; rscreen->b.query_memory_info = r600_query_memory_info; if (rscreen->info.has_hw_decode) { - rscreen->b.get_video_param = rvid_get_video_param; - rscreen->b.is_video_format_supported = rvid_is_format_supported; + rscreen->b.get_video_param = si_vid_get_video_param; + rscreen->b.is_video_format_supported = si_vid_is_format_supported; } else { rscreen->b.get_video_param = r600_get_video_param; rscreen->b.is_video_format_supported = vl_video_buffer_is_format_supported; } - r600_init_screen_texture_functions(rscreen); - r600_init_screen_query_functions(rscreen); + si_init_screen_texture_functions(rscreen); + si_init_screen_query_functions(rscreen); rscreen->family = rscreen->info.family; rscreen->chip_class = rscreen->info.chip_class; @@ -1587,10 +1559,10 @@ bool r600_common_screen_init(struct r600_common_screen *rscreen, return true; } -void r600_destroy_common_screen(struct r600_common_screen *rscreen) +void si_destroy_common_screen(struct r600_common_screen *rscreen) { - r600_perfcounters_destroy(rscreen); - r600_gpu_load_kill_thread(rscreen); + si_perfcounters_destroy(rscreen); + si_gpu_load_kill_thread(rscreen); mtx_destroy(&rscreen->gpu_load_mutex); mtx_destroy(&rscreen->aux_context_lock); @@ -1603,20 +1575,20 @@ void r600_destroy_common_screen(struct r600_common_screen *rscreen) FREE(rscreen); } -bool r600_can_dump_shader(struct r600_common_screen *rscreen, - unsigned processor) +bool si_can_dump_shader(struct r600_common_screen *rscreen, + unsigned processor) { return rscreen->debug_flags & (1 << processor); } -bool r600_extra_shader_checks(struct r600_common_screen *rscreen, unsigned processor) +bool si_extra_shader_checks(struct r600_common_screen *rscreen, unsigned processor) { return (rscreen->debug_flags & DBG_CHECK_IR) || - r600_can_dump_shader(rscreen, processor); + si_can_dump_shader(rscreen, processor); } -void r600_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_resource *dst, - uint64_t offset, uint64_t size, unsigned value) +void si_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_resource *dst, + uint64_t offset, uint64_t size, unsigned value) { struct r600_common_context *rctx = (struct r600_common_context*)rscreen->aux_context; diff --git a/src/gallium/drivers/radeon/r600_pipe_common.h b/src/gallium/drivers/radeon/r600_pipe_common.h index bd0dc76ec2b..1259257eead 100644 --- a/src/gallium/drivers/radeon/r600_pipe_common.h +++ b/src/gallium/drivers/radeon/r600_pipe_common.h @@ -141,8 +141,8 @@ struct r600_perfcounters; struct tgsi_shader_info; struct r600_qbo_state; -void radeon_shader_binary_init(struct ac_shader_binary *b); -void radeon_shader_binary_clean(struct ac_shader_binary *b); +void si_radeon_shader_binary_init(struct ac_shader_binary *b); +void si_radeon_shader_binary_clean(struct ac_shader_binary *b); /* Only 32-bit buffer allocations are supported, gallium doesn't support more * at the moment. @@ -723,130 +723,125 @@ struct r600_common_context { }; /* r600_buffer_common.c */ -bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx, - struct pb_buffer *buf, - enum radeon_bo_usage usage); -void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx, - struct r600_resource *resource, - unsigned usage); -void r600_buffer_subdata(struct pipe_context *ctx, - struct pipe_resource *buffer, - unsigned usage, unsigned offset, - unsigned size, const void *data); -void r600_init_resource_fields(struct r600_common_screen *rscreen, - struct r600_resource *res, - uint64_t size, unsigned alignment); -bool r600_alloc_resource(struct r600_common_screen *rscreen, - struct r600_resource *res); -struct pipe_resource *r600_buffer_create(struct pipe_screen *screen, - const struct pipe_resource *templ, - unsigned alignment); -struct pipe_resource * r600_aligned_buffer_create(struct pipe_screen *screen, - unsigned flags, - unsigned usage, - unsigned size, - unsigned alignment); +bool si_rings_is_buffer_referenced(struct r600_common_context *ctx, + struct pb_buffer *buf, + enum radeon_bo_usage usage); +void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx, + struct r600_resource *resource, + unsigned usage); +void si_buffer_subdata(struct pipe_context *ctx, + struct pipe_resource *buffer, + unsigned usage, unsigned offset, + unsigned size, const void *data); +void si_init_resource_fields(struct r600_common_screen *rscreen, + struct r600_resource *res, + uint64_t size, unsigned alignment); +bool si_alloc_resource(struct r600_common_screen *rscreen, + struct r600_resource *res); +struct pipe_resource *si_buffer_create(struct pipe_screen *screen, + const struct pipe_resource *templ, + unsigned alignment); +struct pipe_resource *si_aligned_buffer_create(struct pipe_screen *screen, + unsigned flags, + unsigned usage, + unsigned size, + unsigned alignment); struct pipe_resource * -r600_buffer_from_user_memory(struct pipe_screen *screen, - const struct pipe_resource *templ, - void *user_memory); -void -r600_invalidate_resource(struct pipe_context *ctx, - struct pipe_resource *resource); -void r600_replace_buffer_storage(struct pipe_context *ctx, - struct pipe_resource *dst, - struct pipe_resource *src); +si_buffer_from_user_memory(struct pipe_screen *screen, + const struct pipe_resource *templ, + void *user_memory); +void si_invalidate_resource(struct pipe_context *ctx, + struct pipe_resource *resource); +void si_replace_buffer_storage(struct pipe_context *ctx, + struct pipe_resource *dst, + struct pipe_resource *src); /* r600_common_pipe.c */ -void r600_gfx_write_event_eop(struct r600_common_context *ctx, - unsigned event, unsigned event_flags, - unsigned data_sel, - struct r600_resource *buf, uint64_t va, - uint32_t new_fence, unsigned query_type); -unsigned r600_gfx_write_fence_dwords(struct r600_common_screen *screen); -void r600_gfx_wait_fence(struct r600_common_context *ctx, - uint64_t va, uint32_t ref, uint32_t mask); -void r600_draw_rectangle(struct blitter_context *blitter, - int x1, int y1, int x2, int y2, - float depth, unsigned num_instances, - enum blitter_attrib_type type, - const union blitter_attrib *attrib); -bool r600_common_screen_init(struct r600_common_screen *rscreen, - struct radeon_winsys *ws); -void r600_destroy_common_screen(struct r600_common_screen *rscreen); -void r600_preflush_suspend_features(struct r600_common_context *ctx); -void r600_postflush_resume_features(struct r600_common_context *ctx); -bool r600_common_context_init(struct r600_common_context *rctx, - struct r600_common_screen *rscreen, - unsigned context_flags); -void r600_common_context_cleanup(struct r600_common_context *rctx); -bool r600_can_dump_shader(struct r600_common_screen *rscreen, - unsigned processor); -bool r600_extra_shader_checks(struct r600_common_screen *rscreen, - unsigned processor); -void r600_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_resource *dst, - uint64_t offset, uint64_t size, unsigned value); -struct pipe_resource *r600_resource_create_common(struct pipe_screen *screen, - const struct pipe_resource *templ); -const char *r600_get_llvm_processor_name(enum radeon_family family); -void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw, - struct r600_resource *dst, struct r600_resource *src); -void radeon_save_cs(struct radeon_winsys *ws, struct radeon_winsys_cs *cs, - struct radeon_saved_cs *saved, bool get_buffer_list); -void radeon_clear_saved_cs(struct radeon_saved_cs *saved); -bool r600_check_device_reset(struct r600_common_context *rctx); +void si_gfx_write_event_eop(struct r600_common_context *ctx, + unsigned event, unsigned event_flags, + unsigned data_sel, + struct r600_resource *buf, uint64_t va, + uint32_t new_fence, unsigned query_type); +unsigned si_gfx_write_fence_dwords(struct r600_common_screen *screen); +void si_gfx_wait_fence(struct r600_common_context *ctx, + uint64_t va, uint32_t ref, uint32_t mask); +void si_draw_rectangle(struct blitter_context *blitter, + int x1, int y1, int x2, int y2, + float depth, unsigned num_instances, + enum blitter_attrib_type type, + const union blitter_attrib *attrib); +bool si_common_screen_init(struct r600_common_screen *rscreen, + struct radeon_winsys *ws); +void si_destroy_common_screen(struct r600_common_screen *rscreen); +void si_preflush_suspend_features(struct r600_common_context *ctx); +void si_postflush_resume_features(struct r600_common_context *ctx); +bool si_common_context_init(struct r600_common_context *rctx, + struct r600_common_screen *rscreen, + unsigned context_flags); +void si_common_context_cleanup(struct r600_common_context *rctx); +bool si_can_dump_shader(struct r600_common_screen *rscreen, + unsigned processor); +bool si_extra_shader_checks(struct r600_common_screen *rscreen, + unsigned processor); +void si_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_resource *dst, + uint64_t offset, uint64_t size, unsigned value); +struct pipe_resource *si_resource_create_common(struct pipe_screen *screen, + const struct pipe_resource *templ); +const char *si_get_llvm_processor_name(enum radeon_family family); +void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw, + struct r600_resource *dst, struct r600_resource *src); +void si_save_cs(struct radeon_winsys *ws, struct radeon_winsys_cs *cs, + struct radeon_saved_cs *saved, bool get_buffer_list); +void si_clear_saved_cs(struct radeon_saved_cs *saved); +bool si_check_device_reset(struct r600_common_context *rctx); /* r600_gpu_load.c */ -void r600_gpu_load_kill_thread(struct r600_common_screen *rscreen); -uint64_t r600_begin_counter(struct r600_common_screen *rscreen, unsigned type); -unsigned r600_end_counter(struct r600_common_screen *rscreen, unsigned type, - uint64_t begin); +void si_gpu_load_kill_thread(struct r600_common_screen *rscreen); +uint64_t si_begin_counter(struct r600_common_screen *rscreen, unsigned type); +unsigned si_end_counter(struct r600_common_screen *rscreen, unsigned type, + uint64_t begin); /* r600_perfcounters.c */ -void r600_perfcounters_destroy(struct r600_common_screen *rscreen); +void si_perfcounters_destroy(struct r600_common_screen *rscreen); /* r600_query.c */ -void r600_init_screen_query_functions(struct r600_common_screen *rscreen); -void r600_query_init(struct r600_common_context *rctx); -void r600_suspend_queries(struct r600_common_context *ctx); -void r600_resume_queries(struct r600_common_context *ctx); -void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen); +void si_init_screen_query_functions(struct r600_common_screen *rscreen); +void si_init_query_functions(struct r600_common_context *rctx); +void si_suspend_queries(struct r600_common_context *ctx); +void si_resume_queries(struct r600_common_context *ctx); /* r600_streamout.c */ -void r600_streamout_buffers_dirty(struct r600_common_context *rctx); -void r600_set_streamout_targets(struct pipe_context *ctx, - unsigned num_targets, - struct pipe_stream_output_target **targets, - const unsigned *offset); -void r600_emit_streamout_end(struct r600_common_context *rctx); -void r600_update_prims_generated_query_state(struct r600_common_context *rctx, - unsigned type, int diff); -void r600_streamout_init(struct r600_common_context *rctx); +void si_streamout_buffers_dirty(struct r600_common_context *rctx); +void si_common_set_streamout_targets(struct pipe_context *ctx, + unsigned num_targets, + struct pipe_stream_output_target **targets, + const unsigned *offset); +void si_emit_streamout_end(struct r600_common_context *rctx); +void si_update_prims_generated_query_state(struct r600_common_context *rctx, + unsigned type, int diff); +void si_streamout_init(struct r600_common_context *rctx); /* r600_test_dma.c */ -void r600_test_dma(struct r600_common_screen *rscreen); +void si_test_dma(struct r600_common_screen *rscreen); /* r600_texture.c */ -bool r600_prepare_for_dma_blit(struct r600_common_context *rctx, - struct r600_texture *rdst, - unsigned dst_level, unsigned dstx, - unsigned dsty, unsigned dstz, - struct r600_texture *rsrc, - unsigned src_level, - const struct pipe_box *src_box); -void r600_texture_get_fmask_info(struct r600_common_screen *rscreen, - struct r600_texture *rtex, - unsigned nr_samples, - struct r600_fmask_info *out); -void r600_texture_get_cmask_info(struct r600_common_screen *rscreen, - struct r600_texture *rtex, - struct r600_cmask_info *out); -bool r600_init_flushed_depth_texture(struct pipe_context *ctx, - struct pipe_resource *texture, - struct r600_texture **staging); -void r600_print_texture_info(struct r600_common_screen *rscreen, - struct r600_texture *rtex, struct u_log_context *log); -struct pipe_resource *r600_texture_create(struct pipe_screen *screen, +bool si_prepare_for_dma_blit(struct r600_common_context *rctx, + struct r600_texture *rdst, + unsigned dst_level, unsigned dstx, + unsigned dsty, unsigned dstz, + struct r600_texture *rsrc, + unsigned src_level, + const struct pipe_box *src_box); +void si_texture_get_fmask_info(struct r600_common_screen *rscreen, + struct r600_texture *rtex, + unsigned nr_samples, + struct r600_fmask_info *out); +bool si_init_flushed_depth_texture(struct pipe_context *ctx, + struct pipe_resource *texture, + struct r600_texture **staging); +void si_print_texture_info(struct r600_common_screen *rscreen, + struct r600_texture *rtex, struct u_log_context *log); +struct pipe_resource *si_texture_create(struct pipe_screen *screen, const struct pipe_resource *templ); bool vi_dcc_formats_compatible(enum pipe_format format1, enum pipe_format format2); @@ -857,12 +852,12 @@ void vi_disable_dcc_if_incompatible_format(struct r600_common_context *rctx, struct pipe_resource *tex, unsigned level, enum pipe_format view_format); -struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe, - struct pipe_resource *texture, - const struct pipe_surface *templ, - unsigned width0, unsigned height0, - unsigned width, unsigned height); -unsigned r600_translate_colorswap(enum pipe_format format, bool do_endian_swap); +struct pipe_surface *si_create_surface_custom(struct pipe_context *pipe, + struct pipe_resource *texture, + const struct pipe_surface *templ, + unsigned width0, unsigned height0, + unsigned width, unsigned height); +unsigned si_translate_colorswap(enum pipe_format format, bool do_endian_swap); void vi_separate_dcc_start_query(struct pipe_context *ctx, struct r600_texture *tex); void vi_separate_dcc_stop_query(struct pipe_context *ctx, @@ -872,37 +867,33 @@ void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx, void vi_dcc_clear_level(struct r600_common_context *rctx, struct r600_texture *rtex, unsigned level, unsigned clear_value); -void evergreen_do_fast_color_clear(struct r600_common_context *rctx, - struct pipe_framebuffer_state *fb, - struct r600_atom *fb_state, - unsigned *buffers, ubyte *dirty_cbufs, - const union pipe_color_union *color); -bool r600_texture_disable_dcc(struct r600_common_context *rctx, - struct r600_texture *rtex); -void r600_init_screen_texture_functions(struct r600_common_screen *rscreen); -void r600_init_context_texture_functions(struct r600_common_context *rctx); +void si_do_fast_color_clear(struct r600_common_context *rctx, + struct pipe_framebuffer_state *fb, + struct r600_atom *fb_state, + unsigned *buffers, ubyte *dirty_cbufs, + const union pipe_color_union *color); +bool si_texture_disable_dcc(struct r600_common_context *rctx, + struct r600_texture *rtex); +void si_init_screen_texture_functions(struct r600_common_screen *rscreen); +void si_init_context_texture_functions(struct r600_common_context *rctx); /* r600_viewport.c */ -void evergreen_apply_scissor_bug_workaround(struct r600_common_context *rctx, - struct pipe_scissor_state *scissor); -void r600_viewport_set_rast_deps(struct r600_common_context *rctx, - bool scissor_enable, bool clip_halfz); -void r600_update_vs_writes_viewport_index(struct r600_common_context *rctx, - struct tgsi_shader_info *info); -void r600_init_viewport_functions(struct r600_common_context *rctx); +void si_apply_scissor_bug_workaround(struct r600_common_context *rctx, + struct pipe_scissor_state *scissor); +void si_viewport_set_rast_deps(struct r600_common_context *rctx, + bool scissor_enable, bool clip_halfz); +void si_update_vs_writes_viewport_index(struct r600_common_context *rctx, + struct tgsi_shader_info *info); +void si_init_viewport_functions(struct r600_common_context *rctx); /* cayman_msaa.c */ -extern const uint32_t eg_sample_locs_2x[4]; -extern const unsigned eg_max_dist_2x; -extern const uint32_t eg_sample_locs_4x[4]; -extern const unsigned eg_max_dist_4x; -void cayman_get_sample_position(struct pipe_context *ctx, unsigned sample_count, - unsigned sample_index, float *out_value); -void cayman_init_msaa(struct pipe_context *ctx); -void cayman_emit_msaa_sample_locs(struct radeon_winsys_cs *cs, int nr_samples); -void cayman_emit_msaa_config(struct radeon_winsys_cs *cs, int nr_samples, - int ps_iter_samples, int overrast_samples, - unsigned sc_mode_cntl_1); +void si_get_sample_position(struct pipe_context *ctx, unsigned sample_count, + unsigned sample_index, float *out_value); +void si_init_msaa(struct pipe_context *ctx); +void si_common_emit_msaa_sample_locs(struct radeon_winsys_cs *cs, int nr_samples); +void si_common_emit_msaa_config(struct radeon_winsys_cs *cs, int nr_samples, + int ps_iter_samples, int overrast_samples, + unsigned sc_mode_cntl_1); /* Inline helpers. */ diff --git a/src/gallium/drivers/radeon/r600_query.c b/src/gallium/drivers/radeon/r600_query.c index 66bf4d88258..9d850e97429 100644 --- a/src/gallium/drivers/radeon/r600_query.c +++ b/src/gallium/drivers/radeon/r600_query.c @@ -219,7 +219,7 @@ static bool r600_query_sw_begin(struct r600_common_context *rctx, case R600_QUERY_GPU_SURF_SYNC_BUSY: case R600_QUERY_GPU_CP_DMA_BUSY: case R600_QUERY_GPU_SCRATCH_RAM_BUSY: - query->begin_result = r600_begin_counter(rctx->screen, + query->begin_result = si_begin_counter(rctx->screen, query->b.type); break; case R600_QUERY_NUM_COMPILATIONS: @@ -375,7 +375,7 @@ static bool r600_query_sw_end(struct r600_common_context *rctx, case R600_QUERY_GPU_SURF_SYNC_BUSY: case R600_QUERY_GPU_CP_DMA_BUSY: case R600_QUERY_GPU_SCRATCH_RAM_BUSY: - query->end_result = r600_end_counter(rctx->screen, + query->end_result = si_end_counter(rctx->screen, query->b.type, query->begin_result); query->begin_result = 0; @@ -494,8 +494,8 @@ static struct pipe_query *r600_query_sw_create(unsigned query_type) return (struct pipe_query *)query; } -void r600_query_hw_destroy(struct r600_common_screen *rscreen, - struct r600_query *rquery) +void si_query_hw_destroy(struct r600_common_screen *rscreen, + struct r600_query *rquery) { struct r600_query_hw *query = (struct r600_query_hw *)rquery; struct r600_query_buffer *prev = query->buffer.previous; @@ -583,10 +583,10 @@ static void r600_query_hw_get_result_resource(struct r600_common_context *rctx, unsigned offset); static struct r600_query_ops query_hw_ops = { - .destroy = r600_query_hw_destroy, - .begin = r600_query_hw_begin, - .end = r600_query_hw_end, - .get_result = r600_query_hw_get_result, + .destroy = si_query_hw_destroy, + .begin = si_query_hw_begin, + .end = si_query_hw_end, + .get_result = si_query_hw_get_result, .get_result_resource = r600_query_hw_get_result_resource, }; @@ -612,8 +612,8 @@ static struct r600_query_hw_ops query_hw_default_hw_ops = { .add_result = r600_query_hw_add_result, }; -bool r600_query_hw_init(struct r600_common_screen *rscreen, - struct r600_query_hw *query) +bool si_query_hw_init(struct r600_common_screen *rscreen, + struct r600_query_hw *query) { query->buffer.buf = r600_new_query_buffer(rscreen, query); if (!query->buffer.buf) @@ -641,16 +641,16 @@ static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscree query->result_size = 16 * rscreen->info.num_render_backends; query->result_size += 16; /* for the fence + alignment */ query->num_cs_dw_begin = 6; - query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen); + query->num_cs_dw_end = 6 + si_gfx_write_fence_dwords(rscreen); break; case PIPE_QUERY_TIME_ELAPSED: query->result_size = 24; query->num_cs_dw_begin = 8; - query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen); + query->num_cs_dw_end = 8 + si_gfx_write_fence_dwords(rscreen); break; case PIPE_QUERY_TIMESTAMP: query->result_size = 16; - query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen); + query->num_cs_dw_end = 8 + si_gfx_write_fence_dwords(rscreen); query->flags = R600_QUERY_HW_FLAG_NO_START; break; case PIPE_QUERY_PRIMITIVES_EMITTED: @@ -670,11 +670,11 @@ static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscree query->num_cs_dw_end = 6 * R600_MAX_STREAMS; break; case PIPE_QUERY_PIPELINE_STATISTICS: - /* 11 values on EG, 8 on R600. */ - query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16; + /* 11 values on GCN. */ + query->result_size = 11 * 16; query->result_size += 8; /* for the fence + alignment */ query->num_cs_dw_begin = 6; - query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen); + query->num_cs_dw_end = 6 + si_gfx_write_fence_dwords(rscreen); break; default: assert(0); @@ -682,7 +682,7 @@ static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscree return NULL; } - if (!r600_query_hw_init(rscreen, query)) { + if (!si_query_hw_init(rscreen, query)) { FREE(query); return NULL; } @@ -782,7 +782,7 @@ static void r600_query_hw_do_emit_start(struct r600_common_context *ctx, /* Write the timestamp after the last draw is done. * (bottom-of-pipe) */ - r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, + si_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, EOP_DATA_SEL_TIMESTAMP, NULL, va, 0, query->b.type); } @@ -809,7 +809,7 @@ static void r600_query_hw_emit_start(struct r600_common_context *ctx, return; // previous buffer allocation failure r600_update_occlusion_query_state(ctx, query->b.type, 1); - r600_update_prims_generated_query_state(ctx, query->b.type, 1); + si_update_prims_generated_query_state(ctx, query->b.type, 1); ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end, true); @@ -869,7 +869,7 @@ static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx, va += 8; /* fall through */ case PIPE_QUERY_TIMESTAMP: - r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, + si_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, EOP_DATA_SEL_TIMESTAMP, NULL, va, 0, query->b.type); fence_va = va + 8; @@ -893,7 +893,7 @@ static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx, RADEON_PRIO_QUERY); if (fence_va) - r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, + si_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, EOP_DATA_SEL_VALUE_32BIT, query->buffer.buf, fence_va, 0x80000000, query->b.type); @@ -923,7 +923,7 @@ static void r600_query_hw_emit_stop(struct r600_common_context *ctx, ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end; r600_update_occlusion_query_state(ctx, query->b.type, -1); - r600_update_prims_generated_query_state(ctx, query->b.type, -1); + si_update_prims_generated_query_state(ctx, query->b.type, -1); } static void emit_set_predicate(struct r600_common_context *ctx, @@ -1057,8 +1057,8 @@ static boolean r600_begin_query(struct pipe_context *ctx, return rquery->ops->begin(rctx, rquery); } -void r600_query_hw_reset_buffers(struct r600_common_context *rctx, - struct r600_query_hw *query) +void si_query_hw_reset_buffers(struct r600_common_context *rctx, + struct r600_query_hw *query) { struct r600_query_buffer *prev = query->buffer.previous; @@ -1074,7 +1074,7 @@ void r600_query_hw_reset_buffers(struct r600_common_context *rctx, query->buffer.previous = NULL; /* Obtain a new buffer if the current one can't be mapped without a stall. */ - if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) || + if (si_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) || !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) { r600_resource_reference(&query->buffer.buf, NULL); query->buffer.buf = r600_new_query_buffer(rctx->screen, query); @@ -1084,8 +1084,8 @@ void r600_query_hw_reset_buffers(struct r600_common_context *rctx, } } -bool r600_query_hw_begin(struct r600_common_context *rctx, - struct r600_query *rquery) +bool si_query_hw_begin(struct r600_common_context *rctx, + struct r600_query *rquery) { struct r600_query_hw *query = (struct r600_query_hw *)rquery; @@ -1095,7 +1095,7 @@ bool r600_query_hw_begin(struct r600_common_context *rctx, } if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES)) - r600_query_hw_reset_buffers(rctx, query); + si_query_hw_reset_buffers(rctx, query); r600_resource_reference(&query->workaround_buf, NULL); @@ -1115,13 +1115,13 @@ static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query) return rquery->ops->end(rctx, rquery); } -bool r600_query_hw_end(struct r600_common_context *rctx, - struct r600_query *rquery) +bool si_query_hw_end(struct r600_common_context *rctx, + struct r600_query *rquery) { struct r600_query_hw *query = (struct r600_query_hw *)rquery; if (query->flags & R600_QUERY_HW_FLAG_NO_START) - r600_query_hw_reset_buffers(rctx, query); + si_query_hw_reset_buffers(rctx, query); r600_query_hw_emit_stop(rctx, query); @@ -1287,47 +1287,28 @@ static void r600_query_hw_add_result(struct r600_common_screen *rscreen, } break; case PIPE_QUERY_PIPELINE_STATISTICS: - if (rscreen->chip_class >= EVERGREEN) { - result->pipeline_statistics.ps_invocations += - r600_query_read_result(buffer, 0, 22, false); - result->pipeline_statistics.c_primitives += - r600_query_read_result(buffer, 2, 24, false); - result->pipeline_statistics.c_invocations += - r600_query_read_result(buffer, 4, 26, false); - result->pipeline_statistics.vs_invocations += - r600_query_read_result(buffer, 6, 28, false); - result->pipeline_statistics.gs_invocations += - r600_query_read_result(buffer, 8, 30, false); - result->pipeline_statistics.gs_primitives += - r600_query_read_result(buffer, 10, 32, false); - result->pipeline_statistics.ia_primitives += - r600_query_read_result(buffer, 12, 34, false); - result->pipeline_statistics.ia_vertices += - r600_query_read_result(buffer, 14, 36, false); - result->pipeline_statistics.hs_invocations += - r600_query_read_result(buffer, 16, 38, false); - result->pipeline_statistics.ds_invocations += - r600_query_read_result(buffer, 18, 40, false); - result->pipeline_statistics.cs_invocations += - r600_query_read_result(buffer, 20, 42, false); - } else { - result->pipeline_statistics.ps_invocations += - r600_query_read_result(buffer, 0, 16, false); - result->pipeline_statistics.c_primitives += - r600_query_read_result(buffer, 2, 18, false); - result->pipeline_statistics.c_invocations += - r600_query_read_result(buffer, 4, 20, false); - result->pipeline_statistics.vs_invocations += - r600_query_read_result(buffer, 6, 22, false); - result->pipeline_statistics.gs_invocations += - r600_query_read_result(buffer, 8, 24, false); - result->pipeline_statistics.gs_primitives += - r600_query_read_result(buffer, 10, 26, false); - result->pipeline_statistics.ia_primitives += - r600_query_read_result(buffer, 12, 28, false); - result->pipeline_statistics.ia_vertices += - r600_query_read_result(buffer, 14, 30, false); - } + result->pipeline_statistics.ps_invocations += + r600_query_read_result(buffer, 0, 22, false); + result->pipeline_statistics.c_primitives += + r600_query_read_result(buffer, 2, 24, false); + result->pipeline_statistics.c_invocations += + r600_query_read_result(buffer, 4, 26, false); + result->pipeline_statistics.vs_invocations += + r600_query_read_result(buffer, 6, 28, false); + result->pipeline_statistics.gs_invocations += + r600_query_read_result(buffer, 8, 30, false); + result->pipeline_statistics.gs_primitives += + r600_query_read_result(buffer, 10, 32, false); + result->pipeline_statistics.ia_primitives += + r600_query_read_result(buffer, 12, 34, false); + result->pipeline_statistics.ia_vertices += + r600_query_read_result(buffer, 14, 36, false); + result->pipeline_statistics.hs_invocations += + r600_query_read_result(buffer, 16, 38, false); + result->pipeline_statistics.ds_invocations += + r600_query_read_result(buffer, 18, 40, false); + result->pipeline_statistics.cs_invocations += + r600_query_read_result(buffer, 20, 42, false); #if 0 /* for testing */ printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, " "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, " @@ -1381,9 +1362,9 @@ static void r600_query_hw_clear_result(struct r600_query_hw *query, util_query_clear_result(result, query->b.type); } -bool r600_query_hw_get_result(struct r600_common_context *rctx, - struct r600_query *rquery, - bool wait, union pipe_query_result *result) +bool si_query_hw_get_result(struct r600_common_context *rctx, + struct r600_query *rquery, + bool wait, union pipe_query_result *result) { struct r600_common_screen *rscreen = rctx->screen; struct r600_query_hw *query = (struct r600_query_hw *)rquery; @@ -1400,7 +1381,7 @@ bool r600_query_hw_get_result(struct r600_common_context *rctx, if (rquery->b.flushed) map = rctx->ws->buffer_map(qbuf->buf->buf, NULL, usage); else - map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf, usage); + map = si_buffer_map_sync_with_rings(rctx, qbuf->buf, usage); if (!map) return false; @@ -1787,7 +1768,7 @@ static void r600_query_hw_get_result_resource(struct r600_common_context *rctx, va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size; va += params.fence_offset; - r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000); + si_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000); } rctx->b.launch_grid(&rctx->b, &grid); @@ -1871,7 +1852,7 @@ static void r600_render_condition(struct pipe_context *ctx, rctx->set_atom_dirty(rctx, atom, query != NULL); } -void r600_suspend_queries(struct r600_common_context *ctx) +void si_suspend_queries(struct r600_common_context *ctx) { struct r600_query_hw *query; @@ -1906,7 +1887,7 @@ static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context * return num_dw; } -void r600_resume_queries(struct r600_common_context *ctx) +void si_resume_queries(struct r600_common_context *ctx) { struct r600_query_hw *query; unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries); @@ -1921,84 +1902,6 @@ void r600_resume_queries(struct r600_common_context *ctx) } } -/* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */ -void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen) -{ - struct r600_common_context *ctx = - (struct r600_common_context*)rscreen->aux_context; - struct radeon_winsys_cs *cs = ctx->gfx.cs; - struct r600_resource *buffer; - uint32_t *results; - unsigned i, mask = 0; - unsigned max_rbs = ctx->screen->info.num_render_backends; - - assert(rscreen->chip_class <= CAYMAN); - - /* if backend_map query is supported by the kernel */ - if (rscreen->info.r600_gb_backend_map_valid) { - unsigned num_tile_pipes = rscreen->info.num_tile_pipes; - unsigned backend_map = rscreen->info.r600_gb_backend_map; - unsigned item_width, item_mask; - - if (ctx->chip_class >= EVERGREEN) { - item_width = 4; - item_mask = 0x7; - } else { - item_width = 2; - item_mask = 0x3; - } - - while (num_tile_pipes--) { - i = backend_map & item_mask; - mask |= (1<<i); - backend_map >>= item_width; - } - if (mask != 0) { - rscreen->info.enabled_rb_mask = mask; - return; - } - } - - /* otherwise backup path for older kernels */ - - /* create buffer for event data */ - buffer = (struct r600_resource*) - pipe_buffer_create(ctx->b.screen, 0, - PIPE_USAGE_STAGING, max_rbs * 16); - if (!buffer) - return; - - /* initialize buffer with zeroes */ - results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE); - if (results) { - memset(results, 0, max_rbs * 4 * 4); - - /* emit EVENT_WRITE for ZPASS_DONE */ - radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0)); - radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1)); - radeon_emit(cs, buffer->gpu_address); - radeon_emit(cs, buffer->gpu_address >> 32); - - r600_emit_reloc(ctx, &ctx->gfx, buffer, - RADEON_USAGE_WRITE, RADEON_PRIO_QUERY); - - /* analyze results */ - results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ); - if (results) { - for(i = 0; i < max_rbs; i++) { - /* at least highest bit will be set if backend is used */ - if (results[i*4 + 1]) - mask |= (1<<i); - } - } - } - - r600_resource_reference(&buffer, NULL); - - if (mask) - rscreen->info.enabled_rb_mask = mask; -} - #define XFULL(name_, query_type_, type_, result_type_, group_id_) \ { \ .name = name_, \ @@ -2124,13 +2027,13 @@ static int r600_get_driver_query_info(struct pipe_screen *screen, if (!info) { unsigned num_perfcounters = - r600_get_perfcounter_info(rscreen, 0, NULL); + si_get_perfcounter_info(rscreen, 0, NULL); return num_queries + num_perfcounters; } if (index >= num_queries) - return r600_get_perfcounter_info(rscreen, index - num_queries, info); + return si_get_perfcounter_info(rscreen, index - num_queries, info); *info = r600_driver_query_list[index]; @@ -2177,7 +2080,7 @@ static int r600_get_driver_query_group_info(struct pipe_screen *screen, return num_pc_groups + R600_NUM_SW_QUERY_GROUPS; if (index < num_pc_groups) - return r600_get_perfcounter_group_info(rscreen, index, info); + return si_get_perfcounter_group_info(rscreen, index, info); index -= num_pc_groups; if (index >= R600_NUM_SW_QUERY_GROUPS) @@ -2189,10 +2092,10 @@ static int r600_get_driver_query_group_info(struct pipe_screen *screen, return 1; } -void r600_query_init(struct r600_common_context *rctx) +void si_init_query_functions(struct r600_common_context *rctx) { rctx->b.create_query = r600_create_query; - rctx->b.create_batch_query = r600_create_batch_query; + rctx->b.create_batch_query = si_create_batch_query; rctx->b.destroy_query = r600_destroy_query; rctx->b.begin_query = r600_begin_query; rctx->b.end_query = r600_end_query; @@ -2206,7 +2109,7 @@ void r600_query_init(struct r600_common_context *rctx) LIST_INITHEAD(&rctx->active_queries); } -void r600_init_screen_query_functions(struct r600_common_screen *rscreen) +void si_init_screen_query_functions(struct r600_common_screen *rscreen) { rscreen->b.get_driver_query_info = r600_get_driver_query_info; rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info; diff --git a/src/gallium/drivers/radeon/r600_query.h b/src/gallium/drivers/radeon/r600_query.h index 7455c8e63a8..a20da075c68 100644 --- a/src/gallium/drivers/radeon/r600_query.h +++ b/src/gallium/drivers/radeon/r600_query.h @@ -200,18 +200,18 @@ struct r600_query_hw { unsigned workaround_offset; }; -bool r600_query_hw_init(struct r600_common_screen *rscreen, - struct r600_query_hw *query); -void r600_query_hw_destroy(struct r600_common_screen *rscreen, - struct r600_query *rquery); -bool r600_query_hw_begin(struct r600_common_context *rctx, +bool si_query_hw_init(struct r600_common_screen *rscreen, + struct r600_query_hw *query); +void si_query_hw_destroy(struct r600_common_screen *rscreen, struct r600_query *rquery); -bool r600_query_hw_end(struct r600_common_context *rctx, +bool si_query_hw_begin(struct r600_common_context *rctx, struct r600_query *rquery); -bool r600_query_hw_get_result(struct r600_common_context *rctx, - struct r600_query *rquery, - bool wait, - union pipe_query_result *result); +bool si_query_hw_end(struct r600_common_context *rctx, + struct r600_query *rquery); +bool si_query_hw_get_result(struct r600_common_context *rctx, + struct r600_query *rquery, + bool wait, + union pipe_query_result *result); /* Performance counters */ enum { @@ -297,26 +297,26 @@ struct r600_perfcounters { bool separate_instance; }; -struct pipe_query *r600_create_batch_query(struct pipe_context *ctx, - unsigned num_queries, - unsigned *query_types); - -int r600_get_perfcounter_info(struct r600_common_screen *, - unsigned index, - struct pipe_driver_query_info *info); -int r600_get_perfcounter_group_info(struct r600_common_screen *, - unsigned index, - struct pipe_driver_query_group_info *info); - -bool r600_perfcounters_init(struct r600_perfcounters *, unsigned num_blocks); -void r600_perfcounters_add_block(struct r600_common_screen *, - struct r600_perfcounters *, - const char *name, unsigned flags, - unsigned counters, unsigned selectors, - unsigned instances, void *data); -void r600_perfcounters_do_destroy(struct r600_perfcounters *); -void r600_query_hw_reset_buffers(struct r600_common_context *rctx, - struct r600_query_hw *query); +struct pipe_query *si_create_batch_query(struct pipe_context *ctx, + unsigned num_queries, + unsigned *query_types); + +int si_get_perfcounter_info(struct r600_common_screen *, + unsigned index, + struct pipe_driver_query_info *info); +int si_get_perfcounter_group_info(struct r600_common_screen *, + unsigned index, + struct pipe_driver_query_group_info *info); + +bool si_perfcounters_init(struct r600_perfcounters *, unsigned num_blocks); +void si_perfcounters_add_block(struct r600_common_screen *, + struct r600_perfcounters *, + const char *name, unsigned flags, + unsigned counters, unsigned selectors, + unsigned instances, void *data); +void si_perfcounters_do_destroy(struct r600_perfcounters *); +void si_query_hw_reset_buffers(struct r600_common_context *rctx, + struct r600_query_hw *query); struct r600_qbo_state { void *saved_compute; diff --git a/src/gallium/drivers/radeon/r600_streamout.c b/src/gallium/drivers/radeon/r600_streamout.c index a18089a3b39..40243e6671a 100644 --- a/src/gallium/drivers/radeon/r600_streamout.c +++ b/src/gallium/drivers/radeon/r600_streamout.c @@ -74,7 +74,7 @@ static void r600_so_target_destroy(struct pipe_context *ctx, FREE(t); } -void r600_streamout_buffers_dirty(struct r600_common_context *rctx) +void si_streamout_buffers_dirty(struct r600_common_context *rctx) { struct r600_atom *begin = &rctx->streamout.begin_atom; unsigned num_bufs = util_bitcount(rctx->streamout.enabled_mask); @@ -109,10 +109,10 @@ void r600_streamout_buffers_dirty(struct r600_common_context *rctx) r600_set_streamout_enable(rctx, true); } -void r600_set_streamout_targets(struct pipe_context *ctx, - unsigned num_targets, - struct pipe_stream_output_target **targets, - const unsigned *offsets) +void si_common_set_streamout_targets(struct pipe_context *ctx, + unsigned num_targets, + struct pipe_stream_output_target **targets, + const unsigned *offsets) { struct r600_common_context *rctx = (struct r600_common_context *)ctx; unsigned i; @@ -120,7 +120,7 @@ void r600_set_streamout_targets(struct pipe_context *ctx, /* Stop streamout. */ if (rctx->streamout.num_targets && rctx->streamout.begin_emitted) { - r600_emit_streamout_end(rctx); + si_emit_streamout_end(rctx); } /* Set the new targets. */ @@ -144,7 +144,7 @@ void r600_set_streamout_targets(struct pipe_context *ctx, rctx->streamout.append_bitmask = append_bitmask; if (num_targets) { - r600_streamout_buffers_dirty(rctx); + si_streamout_buffers_dirty(rctx); } else { rctx->set_atom_dirty(rctx, &rctx->streamout.begin_atom, false); r600_set_streamout_enable(rctx, false); @@ -266,7 +266,7 @@ static void r600_emit_streamout_begin(struct r600_common_context *rctx, struct r rctx->streamout.begin_emitted = true; } -void r600_emit_streamout_end(struct r600_common_context *rctx) +void si_emit_streamout_end(struct r600_common_context *rctx) { struct radeon_winsys_cs *cs = rctx->gfx.cs; struct r600_so_target **t = rctx->streamout.targets; @@ -353,8 +353,8 @@ static void r600_set_streamout_enable(struct r600_common_context *rctx, bool ena } } -void r600_update_prims_generated_query_state(struct r600_common_context *rctx, - unsigned type, int diff) +void si_update_prims_generated_query_state(struct r600_common_context *rctx, + unsigned type, int diff) { if (type == PIPE_QUERY_PRIMITIVES_GENERATED) { bool old_strmout_en = r600_get_strmout_en(rctx); @@ -371,7 +371,7 @@ void r600_update_prims_generated_query_state(struct r600_common_context *rctx, } } -void r600_streamout_init(struct r600_common_context *rctx) +void si_streamout_init(struct r600_common_context *rctx) { rctx->b.create_stream_output_target = r600_create_so_target; rctx->b.stream_output_target_destroy = r600_so_target_destroy; diff --git a/src/gallium/drivers/radeon/r600_test_dma.c b/src/gallium/drivers/radeon/r600_test_dma.c index 9e1ff9e5fe0..f7002bc3905 100644 --- a/src/gallium/drivers/radeon/r600_test_dma.c +++ b/src/gallium/drivers/radeon/r600_test_dma.c @@ -171,7 +171,7 @@ static unsigned generate_max_tex_side(unsigned max_tex_side) } } -void r600_test_dma(struct r600_common_screen *rscreen) +void si_test_dma(struct r600_common_screen *rscreen) { struct pipe_screen *screen = &rscreen->b; struct pipe_context *ctx = screen->context_create(screen, NULL, 0); diff --git a/src/gallium/drivers/radeon/r600_texture.c b/src/gallium/drivers/radeon/r600_texture.c index e9507c3f541..f7b9740895b 100644 --- a/src/gallium/drivers/radeon/r600_texture.c +++ b/src/gallium/drivers/radeon/r600_texture.c @@ -44,13 +44,13 @@ r600_choose_tiling(struct r600_common_screen *rscreen, const struct pipe_resource *templ); -bool r600_prepare_for_dma_blit(struct r600_common_context *rctx, - struct r600_texture *rdst, - unsigned dst_level, unsigned dstx, - unsigned dsty, unsigned dstz, - struct r600_texture *rsrc, - unsigned src_level, - const struct pipe_box *src_box) +bool si_prepare_for_dma_blit(struct r600_common_context *rctx, + struct r600_texture *rdst, + unsigned dst_level, unsigned dstx, + unsigned dsty, unsigned dstz, + struct r600_texture *rsrc, + unsigned src_level, + const struct pipe_box *src_box) { if (!rctx->dma.cs) return false; @@ -237,7 +237,7 @@ static int r600_init_surface(struct r600_common_screen *rscreen, is_depth = util_format_has_depth(desc); is_stencil = util_format_has_stencil(desc); - if (rscreen->chip_class >= EVERGREEN && !is_flushed_depth && + if (!is_flushed_depth && ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) { bpe = 4; /* stencil is allocated separately on evergreen */ } else { @@ -408,10 +408,7 @@ static void r600_texture_discard_cmask(struct r600_common_screen *rscreen, rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8; rtex->dirty_level_mask = 0; - if (rscreen->chip_class >= SI) - rtex->cb_color_info &= ~SI_S_028C70_FAST_CLEAR(1); - else - rtex->cb_color_info &= ~EG_S_028C70_FAST_CLEAR(1); + rtex->cb_color_info &= ~SI_S_028C70_FAST_CLEAR(1); if (rtex->cmask_buffer != &rtex->resource) r600_resource_reference(&rtex->cmask_buffer, NULL); @@ -466,8 +463,8 @@ static bool r600_texture_discard_dcc(struct r600_common_screen *rscreen, * \param rctx the current context if you have one, or rscreen->aux_context * if you don't. */ -bool r600_texture_disable_dcc(struct r600_common_context *rctx, - struct r600_texture *rtex) +bool si_texture_disable_dcc(struct r600_common_context *rctx, + struct r600_texture *rtex) { struct r600_common_screen *rscreen = rctx->screen; @@ -624,7 +621,7 @@ static boolean r600_texture_get_handle(struct pipe_screen* screen, * access. */ if (usage & PIPE_HANDLE_USAGE_WRITE && rtex->dcc_offset) { - if (r600_texture_disable_dcc(rctx, rtex)) + if (si_texture_disable_dcc(rctx, rtex)) update_metadata = true; } @@ -681,7 +678,7 @@ static boolean r600_texture_get_handle(struct pipe_screen* screen, rctx->b.resource_copy_region(&rctx->b, newb, 0, 0, 0, 0, &res->b.b, 0, &box); /* Move the new buffer storage to the old pipe_resource. */ - r600_replace_buffer_storage(&rctx->b, &res->b.b, newb); + si_replace_buffer_storage(&rctx->b, &res->b.b, newb); pipe_resource_reference(&newb, NULL); assert(res->b.b.bind & PIPE_BIND_SHARED); @@ -730,10 +727,10 @@ static void r600_texture_destroy(struct pipe_screen *screen, static const struct u_resource_vtbl r600_texture_vtbl; /* The number of samples can be specified independently of the texture. */ -void r600_texture_get_fmask_info(struct r600_common_screen *rscreen, - struct r600_texture *rtex, - unsigned nr_samples, - struct r600_fmask_info *out) +void si_texture_get_fmask_info(struct r600_common_screen *rscreen, + struct r600_texture *rtex, + unsigned nr_samples, + struct r600_fmask_info *out) { /* FMASK is allocated like an ordinary texture. */ struct pipe_resource templ = rtex->resource.b.b; @@ -751,17 +748,6 @@ void r600_texture_get_fmask_info(struct r600_common_screen *rscreen, templ.nr_samples = 1; flags = rtex->surface.flags | RADEON_SURF_FMASK; - if (rscreen->chip_class <= CAYMAN) { - /* Use the same parameters and tile mode. */ - fmask.u.legacy.bankw = rtex->surface.u.legacy.bankw; - fmask.u.legacy.bankh = rtex->surface.u.legacy.bankh; - fmask.u.legacy.mtilea = rtex->surface.u.legacy.mtilea; - fmask.u.legacy.tile_split = rtex->surface.u.legacy.tile_split; - - if (nr_samples <= 4) - fmask.u.legacy.bankh = 4; - } - switch (nr_samples) { case 2: case 4: @@ -775,13 +761,6 @@ void r600_texture_get_fmask_info(struct r600_common_screen *rscreen, return; } - /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption. - * This can be fixed by writing a separate FMASK allocator specifically - * for R600-R700 asics. */ - if (rscreen->chip_class <= R700) { - bpe *= 2; - } - if (rscreen->ws->surface_init(rscreen->ws, &templ, flags, bpe, RADEON_SURF_MODE_2D, &fmask)) { R600_ERR("Got error in surface_init while allocating FMASK.\n"); @@ -805,47 +784,13 @@ void r600_texture_get_fmask_info(struct r600_common_screen *rscreen, static void r600_texture_allocate_fmask(struct r600_common_screen *rscreen, struct r600_texture *rtex) { - r600_texture_get_fmask_info(rscreen, rtex, + si_texture_get_fmask_info(rscreen, rtex, rtex->resource.b.b.nr_samples, &rtex->fmask); rtex->fmask.offset = align64(rtex->size, rtex->fmask.alignment); rtex->size = rtex->fmask.offset + rtex->fmask.size; } -void r600_texture_get_cmask_info(struct r600_common_screen *rscreen, - struct r600_texture *rtex, - struct r600_cmask_info *out) -{ - unsigned cmask_tile_width = 8; - unsigned cmask_tile_height = 8; - unsigned cmask_tile_elements = cmask_tile_width * cmask_tile_height; - unsigned element_bits = 4; - unsigned cmask_cache_bits = 1024; - unsigned num_pipes = rscreen->info.num_tile_pipes; - unsigned pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes; - - unsigned elements_per_macro_tile = (cmask_cache_bits / element_bits) * num_pipes; - unsigned pixels_per_macro_tile = elements_per_macro_tile * cmask_tile_elements; - unsigned sqrt_pixels_per_macro_tile = sqrt(pixels_per_macro_tile); - unsigned macro_tile_width = util_next_power_of_two(sqrt_pixels_per_macro_tile); - unsigned macro_tile_height = pixels_per_macro_tile / macro_tile_width; - - unsigned pitch_elements = align(rtex->resource.b.b.width0, macro_tile_width); - unsigned height = align(rtex->resource.b.b.height0, macro_tile_height); - - unsigned base_align = num_pipes * pipe_interleave_bytes; - unsigned slice_bytes = - ((pitch_elements * height * element_bits + 7) / 8) / cmask_tile_elements; - - assert(macro_tile_width % 128 == 0); - assert(macro_tile_height % 128 == 0); - - out->slice_tile_max = ((pitch_elements * height) / (128*128)) - 1; - out->alignment = MAX2(256, base_align); - out->size = (util_max_layer(&rtex->resource.b.b, 0) + 1) * - align(slice_bytes, base_align); -} - static void si_texture_get_cmask_info(struct r600_common_screen *rscreen, struct r600_texture *rtex, struct r600_cmask_info *out) @@ -903,19 +848,12 @@ static void si_texture_get_cmask_info(struct r600_common_screen *rscreen, static void r600_texture_allocate_cmask(struct r600_common_screen *rscreen, struct r600_texture *rtex) { - if (rscreen->chip_class >= SI) { - si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask); - } else { - r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask); - } + si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask); rtex->cmask.offset = align64(rtex->size, rtex->cmask.alignment); rtex->size = rtex->cmask.offset + rtex->cmask.size; - if (rscreen->chip_class >= SI) - rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1); - else - rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1); + rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1); } static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen, @@ -926,14 +864,10 @@ static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen assert(rtex->cmask.size == 0); - if (rscreen->chip_class >= SI) { - si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask); - } else { - r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask); - } + si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask); rtex->cmask_buffer = (struct r600_resource *) - r600_aligned_buffer_create(&rscreen->b, + si_aligned_buffer_create(&rscreen->b, R600_RESOURCE_FLAG_UNMAPPABLE, PIPE_USAGE_DEFAULT, rtex->cmask.size, @@ -946,10 +880,7 @@ static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen /* update colorbuffer state bits */ rtex->cmask.base_address_reg = rtex->cmask_buffer->gpu_address >> 8; - if (rscreen->chip_class >= SI) - rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1); - else - rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1); + rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1); p_atomic_inc(&rscreen->compressed_colortex_counter); } @@ -965,16 +896,6 @@ static void r600_texture_get_htile_size(struct r600_common_screen *rscreen, rtex->surface.htile_size = 0; - if (rscreen->chip_class <= EVERGREEN && - rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 26) - return; - - /* HW bug on R6xx. */ - if (rscreen->chip_class == R600 && - (rtex->resource.b.b.width0 > 7680 || - rtex->resource.b.b.height0 > 7680)) - return; - /* HTILE is broken with 1D tiling on old kernels and CIK. */ if (rscreen->chip_class >= CIK && rtex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D && @@ -1045,8 +966,8 @@ static void r600_texture_allocate_htile(struct r600_common_screen *rscreen, rtex->size = rtex->htile_offset + rtex->surface.htile_size; } -void r600_print_texture_info(struct r600_common_screen *rscreen, - struct r600_texture *rtex, struct u_log_context *log) +void si_print_texture_info(struct r600_common_screen *rscreen, + struct r600_texture *rtex, struct u_log_context *log) { int i; @@ -1252,21 +1173,12 @@ r600_texture_create_object(struct pipe_screen *screen, rtex->ps_draw_ratio = 0; if (rtex->is_depth) { - if (base->flags & (R600_RESOURCE_FLAG_TRANSFER | - R600_RESOURCE_FLAG_FLUSHED_DEPTH) || - rscreen->chip_class >= EVERGREEN) { - if (rscreen->chip_class >= GFX9) { - rtex->can_sample_z = true; - rtex->can_sample_s = true; - } else { - rtex->can_sample_z = !rtex->surface.u.legacy.depth_adjusted; - rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted; - } + if (rscreen->chip_class >= GFX9) { + rtex->can_sample_z = true; + rtex->can_sample_s = true; } else { - if (rtex->resource.b.b.nr_samples <= 1 && - (rtex->resource.b.b.format == PIPE_FORMAT_Z16_UNORM || - rtex->resource.b.b.format == PIPE_FORMAT_Z32_FLOAT)) - rtex->can_sample_z = true; + rtex->can_sample_z = !rtex->surface.u.legacy.depth_adjusted; + rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted; } if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER | @@ -1304,14 +1216,14 @@ r600_texture_create_object(struct pipe_screen *screen, /* Now create the backing buffer. */ if (!buf) { - r600_init_resource_fields(rscreen, resource, rtex->size, + si_init_resource_fields(rscreen, resource, rtex->size, rtex->surface.surf_alignment); /* Displayable surfaces are not suballocated. */ if (resource->b.b.bind & PIPE_BIND_SCANOUT) resource->flags |= RADEON_FLAG_NO_SUBALLOC; - if (!r600_alloc_resource(rscreen, resource)) { + if (!si_alloc_resource(rscreen, resource)) { FREE(rtex); return NULL; } @@ -1329,7 +1241,7 @@ r600_texture_create_object(struct pipe_screen *screen, if (rtex->cmask.size) { /* Initialize the cmask to 0xCC (= compressed state). */ - r600_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b, + si_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b, rtex->cmask.offset, rtex->cmask.size, 0xCCCCCCCC); } @@ -1339,7 +1251,7 @@ r600_texture_create_object(struct pipe_screen *screen, if (rscreen->chip_class >= GFX9 || rtex->tc_compatible_htile) clear_value = 0x0000030F; - r600_screen_clear_buffer(rscreen, &rtex->resource.b.b, + si_screen_clear_buffer(rscreen, &rtex->resource.b.b, rtex->htile_offset, rtex->surface.htile_size, clear_value); @@ -1347,7 +1259,7 @@ r600_texture_create_object(struct pipe_screen *screen, /* Initialize DCC only if the texture is not being imported. */ if (!buf && rtex->dcc_offset) { - r600_screen_clear_buffer(rscreen, &rtex->resource.b.b, + si_screen_clear_buffer(rscreen, &rtex->resource.b.b, rtex->dcc_offset, rtex->surface.dcc_size, 0xFFFFFFFF); @@ -1369,7 +1281,7 @@ r600_texture_create_object(struct pipe_screen *screen, puts("Texture:"); struct u_log_context log; u_log_context_init(&log); - r600_print_texture_info(rscreen, rtex, &log); + si_print_texture_info(rscreen, rtex, &log); u_log_new_page_print(&log, stdout); fflush(stdout); u_log_context_destroy(&log); @@ -1403,13 +1315,6 @@ r600_choose_tiling(struct r600_common_screen *rscreen, (templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY)) return RADEON_SURF_MODE_2D; - /* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */ - if (rscreen->chip_class >= R600 && rscreen->chip_class <= CAYMAN && - (templ->bind & PIPE_BIND_COMPUTE_RESOURCE) && - (templ->target == PIPE_TEXTURE_2D || - templ->target == PIPE_TEXTURE_3D)) - force_tiling = true; - /* Handle common candidates for the linear mode. * Compressed textures and DB surfaces must always be tiled. */ @@ -1425,8 +1330,7 @@ r600_choose_tiling(struct r600_common_screen *rscreen, /* Cursors are linear on SI. * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */ - if (rscreen->chip_class >= SI && - (templ->bind & PIPE_BIND_CURSOR)) + if (templ->bind & PIPE_BIND_CURSOR) return RADEON_SURF_MODE_LINEAR_ALIGNED; if (templ->bind & PIPE_BIND_LINEAR) @@ -1455,8 +1359,8 @@ r600_choose_tiling(struct r600_common_screen *rscreen, return RADEON_SURF_MODE_2D; } -struct pipe_resource *r600_texture_create(struct pipe_screen *screen, - const struct pipe_resource *templ) +struct pipe_resource *si_texture_create(struct pipe_screen *screen, + const struct pipe_resource *templ) { struct r600_common_screen *rscreen = (struct r600_common_screen*)screen; struct radeon_surf surface = {0}; @@ -1531,9 +1435,9 @@ static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen return &rtex->resource.b.b; } -bool r600_init_flushed_depth_texture(struct pipe_context *ctx, - struct pipe_resource *texture, - struct r600_texture **staging) +bool si_init_flushed_depth_texture(struct pipe_context *ctx, + struct pipe_resource *texture, + struct r600_texture **staging) { struct r600_texture *rtex = (struct r600_texture*)texture; struct pipe_resource resource; @@ -1633,9 +1537,7 @@ static bool r600_can_invalidate_texture(struct r600_common_screen *rscreen, unsigned transfer_usage, const struct pipe_box *box) { - /* r600g doesn't react to dirty_tex_descriptor_counter */ - return rscreen->chip_class >= SI && - !rtex->resource.b.is_shared && + return !rtex->resource.b.is_shared && !(transfer_usage & PIPE_TRANSFER_READ) && rtex->resource.b.b.last_level == 0 && util_texrange_covers_whole_level(&rtex->resource.b.b, 0, @@ -1654,7 +1556,7 @@ static void r600_texture_invalidate_storage(struct r600_common_context *rctx, assert(rtex->surface.is_linear); /* Reallocate the buffer in the same pipe_resource. */ - r600_alloc_resource(rscreen, &rtex->resource); + si_alloc_resource(rscreen, &rtex->resource); /* Initialize the CMASK base address (needed even without CMASK). */ rtex->cmask.base_address_reg = @@ -1718,7 +1620,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx, rtex->resource.domains & RADEON_DOMAIN_VRAM || rtex->resource.flags & RADEON_FLAG_GTT_WC; /* Write & linear only: */ - else if (r600_rings_is_buffer_referenced(rctx, rtex->resource.buf, + else if (si_rings_is_buffer_referenced(rctx, rtex->resource.buf, RADEON_USAGE_READWRITE) || !rctx->ws->buffer_wait(rtex->resource.buf, 0, RADEON_USAGE_READWRITE)) { @@ -1757,7 +1659,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx, r600_init_temp_resource_from_box(&resource, texture, box, level, 0); - if (!r600_init_flushed_depth_texture(ctx, &resource, &staging_depth)) { + if (!si_init_flushed_depth_texture(ctx, &resource, &staging_depth)) { R600_ERR("failed to create temporary texture to hold untiled copy\n"); FREE(trans); return NULL; @@ -1784,7 +1686,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx, } else { /* XXX: only readback the rectangle which is being mapped? */ /* XXX: when discard is true, no need to read back from depth texture */ - if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) { + if (!si_init_flushed_depth_texture(ctx, texture, &staging_depth)) { R600_ERR("failed to create temporary texture to hold untiled copy\n"); FREE(trans); return NULL; @@ -1840,7 +1742,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx, buf = &rtex->resource; } - if (!(map = r600_buffer_map_sync_with_rings(rctx, buf, usage))) { + if (!(map = si_buffer_map_sync_with_rings(rctx, buf, usage))) { r600_resource_reference(&trans->staging, NULL); FREE(trans); return NULL; @@ -2010,15 +1912,15 @@ void vi_disable_dcc_if_incompatible_format(struct r600_common_context *rctx, if (vi_dcc_enabled(rtex, level) && !vi_dcc_formats_compatible(tex->format, view_format)) - if (!r600_texture_disable_dcc(rctx, (struct r600_texture*)tex)) + if (!si_texture_disable_dcc(rctx, (struct r600_texture*)tex)) rctx->decompress_dcc(&rctx->b, rtex); } -struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe, - struct pipe_resource *texture, - const struct pipe_surface *templ, - unsigned width0, unsigned height0, - unsigned width, unsigned height) +struct pipe_surface *si_create_surface_custom(struct pipe_context *pipe, + struct pipe_resource *texture, + const struct pipe_surface *templ, + unsigned width0, unsigned height0, + unsigned width, unsigned height) { struct r600_surface *surface = CALLOC_STRUCT(r600_surface); @@ -2079,7 +1981,7 @@ static struct pipe_surface *r600_create_surface(struct pipe_context *pipe, } } - return r600_create_surface_custom(pipe, tex, templ, + return si_create_surface_custom(pipe, tex, templ, width0, height0, width, height); } @@ -2159,7 +2061,7 @@ static void r600_clear_texture(struct pipe_context *pipe, pipe_surface_reference(&sf, NULL); } -unsigned r600_translate_colorswap(enum pipe_format format, bool do_endian_swap) +unsigned si_translate_colorswap(enum pipe_format format, bool do_endian_swap) { const struct util_format_description *desc = util_format_description(format); @@ -2380,7 +2282,7 @@ static void vi_separate_dcc_try_enable(struct r600_common_context *rctx, tex->last_dcc_separate_buffer = NULL; } else { tex->dcc_separate_buffer = (struct r600_resource*) - r600_aligned_buffer_create(rctx->b.screen, + si_aligned_buffer_create(rctx->b.screen, R600_RESOURCE_FLAG_UNMAPPABLE, PIPE_USAGE_DEFAULT, tex->surface.dcc_size, @@ -2416,7 +2318,7 @@ void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx, /* Read the results. */ ctx->get_query_result(ctx, rctx->dcc_stats[i].ps_stats[2], true, &result); - r600_query_hw_reset_buffers(rctx, + si_query_hw_reset_buffers(rctx, (struct r600_query_hw*) rctx->dcc_stats[i].ps_stats[2]); @@ -2527,7 +2429,7 @@ static bool vi_get_fast_clear_parameters(enum pipe_format surface_format, util_format_is_alpha(surface_format)) { extra_channel = -1; } else if (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) { - if(r600_translate_colorswap(surface_format, false) <= 1) + if(si_translate_colorswap(surface_format, false) <= 1) extra_channel = desc->nr_channels - 1; else extra_channel = 0; @@ -2725,7 +2627,7 @@ static void si_set_optimal_micro_tile_mode(struct r600_common_screen *rscreen, p_atomic_inc(&rscreen->dirty_tex_counter); } -void evergreen_do_fast_color_clear(struct r600_common_context *rctx, +void si_do_fast_color_clear(struct r600_common_context *rctx, struct pipe_framebuffer_state *fb, struct r600_atom *fb_state, unsigned *buffers, ubyte *dirty_cbufs, @@ -2858,8 +2760,7 @@ void evergreen_do_fast_color_clear(struct r600_common_context *rctx, } /* We can change the micro tile mode before a full clear. */ - if (rctx->screen->chip_class >= SI) - si_set_optimal_micro_tile_mode(rctx->screen, tex); + si_set_optimal_micro_tile_mode(rctx->screen, tex); evergreen_set_clear_color(tex, fb->cbufs[i]->format, color); @@ -2982,7 +2883,7 @@ r600_texture_from_memobj(struct pipe_screen *screen, return &rtex->resource.b.b; } -void r600_init_screen_texture_functions(struct r600_common_screen *rscreen) +void si_init_screen_texture_functions(struct r600_common_screen *rscreen) { rscreen->b.resource_from_handle = r600_texture_from_handle; rscreen->b.resource_get_handle = r600_texture_get_handle; @@ -2991,7 +2892,7 @@ void r600_init_screen_texture_functions(struct r600_common_screen *rscreen) rscreen->b.memobj_destroy = r600_memobj_destroy; } -void r600_init_context_texture_functions(struct r600_common_context *rctx) +void si_init_context_texture_functions(struct r600_common_context *rctx) { rctx->b.create_surface = r600_create_surface; rctx->b.surface_destroy = r600_surface_destroy; diff --git a/src/gallium/drivers/radeon/r600_viewport.c b/src/gallium/drivers/radeon/r600_viewport.c index 2de13820545..cf6d5f28ac0 100644 --- a/src/gallium/drivers/radeon/r600_viewport.c +++ b/src/gallium/drivers/radeon/r600_viewport.c @@ -115,8 +115,8 @@ static void r600_scissor_make_union(struct r600_signed_scissor *out, out->maxy = MAX2(out->maxy, in->maxy); } -void evergreen_apply_scissor_bug_workaround(struct r600_common_context *rctx, - struct pipe_scissor_state *scissor) +void si_apply_scissor_bug_workaround(struct r600_common_context *rctx, + struct pipe_scissor_state *scissor) { if (rctx->chip_class == EVERGREEN || rctx->chip_class == CAYMAN) { if (scissor->maxx == 0) @@ -147,7 +147,7 @@ static void r600_emit_one_scissor(struct r600_common_context *rctx, if (scissor) r600_clip_scissor(&final, scissor); - evergreen_apply_scissor_bug_workaround(rctx, &final); + si_apply_scissor_bug_workaround(rctx, &final); radeon_emit(cs, S_028250_TL_X(final.minx) | S_028250_TL_Y(final.miny) | @@ -368,8 +368,8 @@ static void r600_emit_viewport_states(struct r600_common_context *rctx, } /* Set viewport dependencies on pipe_rasterizer_state. */ -void r600_viewport_set_rast_deps(struct r600_common_context *rctx, - bool scissor_enable, bool clip_halfz) +void si_viewport_set_rast_deps(struct r600_common_context *rctx, + bool scissor_enable, bool clip_halfz) { if (rctx->scissor_enabled != scissor_enable) { rctx->scissor_enabled = scissor_enable; @@ -389,8 +389,8 @@ void r600_viewport_set_rast_deps(struct r600_common_context *rctx, * is delayed. When a shader with VIEWPORT_INDEX appears, this should be * called to emit the rest. */ -void r600_update_vs_writes_viewport_index(struct r600_common_context *rctx, - struct tgsi_shader_info *info) +void si_update_vs_writes_viewport_index(struct r600_common_context *rctx, + struct tgsi_shader_info *info) { bool vs_window_space; @@ -420,7 +420,7 @@ void r600_update_vs_writes_viewport_index(struct r600_common_context *rctx, rctx->set_atom_dirty(rctx, &rctx->viewports.atom, true); } -void r600_init_viewport_functions(struct r600_common_context *rctx) +void si_init_viewport_functions(struct r600_common_context *rctx) { rctx->scissors.atom.emit = r600_emit_scissors; rctx->viewports.atom.emit = r600_emit_viewport_states; diff --git a/src/gallium/drivers/radeon/radeon_uvd.c b/src/gallium/drivers/radeon/radeon_uvd.c index fabc73e3834..d381554dd58 100644 --- a/src/gallium/drivers/radeon/radeon_uvd.c +++ b/src/gallium/drivers/radeon/radeon_uvd.c @@ -1101,13 +1101,13 @@ static void ruvd_destroy(struct pipe_video_codec *decoder) dec->ws->cs_destroy(dec->cs); for (i = 0; i < NUM_BUFFERS; ++i) { - rvid_destroy_buffer(&dec->msg_fb_it_buffers[i]); - rvid_destroy_buffer(&dec->bs_buffers[i]); + si_vid_destroy_buffer(&dec->msg_fb_it_buffers[i]); + si_vid_destroy_buffer(&dec->bs_buffers[i]); } - rvid_destroy_buffer(&dec->dpb); - rvid_destroy_buffer(&dec->ctx); - rvid_destroy_buffer(&dec->sessionctx); + si_vid_destroy_buffer(&dec->dpb); + si_vid_destroy_buffer(&dec->ctx); + si_vid_destroy_buffer(&dec->sessionctx); FREE(dec); } @@ -1178,7 +1178,7 @@ static void ruvd_decode_bitstream(struct pipe_video_codec *decoder, if (new_size > buf->res->buf->size) { dec->ws->buffer_unmap(buf->res->buf); - if (!rvid_resize_buffer(dec->screen, dec->cs, buf, new_size)) { + if (!si_vid_resize_buffer(dec->screen, dec->cs, buf, new_size)) { RVID_ERR("Can't resize bitstream buffer!"); return; } @@ -1271,10 +1271,10 @@ static void ruvd_end_frame(struct pipe_video_codec *decoder, ctx_size = calc_ctx_size_h265_main10(dec, (struct pipe_h265_picture_desc*)picture); else ctx_size = calc_ctx_size_h265_main(dec); - if (!rvid_create_buffer(dec->screen, &dec->ctx, ctx_size, PIPE_USAGE_DEFAULT)) { + if (!si_vid_create_buffer(dec->screen, &dec->ctx, ctx_size, PIPE_USAGE_DEFAULT)) { RVID_ERR("Can't allocated context buffer.\n"); } - rvid_clear_buffer(decoder->context, &dec->ctx); + si_vid_clear_buffer(decoder->context, &dec->ctx); } if (dec->ctx.res) @@ -1341,9 +1341,9 @@ static void ruvd_flush(struct pipe_video_codec *decoder) /** * create and UVD decoder */ -struct pipe_video_codec *ruvd_create_decoder(struct pipe_context *context, - const struct pipe_video_codec *templ, - ruvd_set_dtb set_dtb) +struct pipe_video_codec *si_common_uvd_create_decoder(struct pipe_context *context, + const struct pipe_video_codec *templ, + ruvd_set_dtb set_dtb) { struct radeon_winsys* ws = ((struct r600_common_context *)context)->ws; struct r600_common_context *rctx = (struct r600_common_context*)context; @@ -1398,7 +1398,7 @@ struct pipe_video_codec *ruvd_create_decoder(struct pipe_context *context, dec->stream_type = profile2stream_type(dec, info.family); dec->set_dtb = set_dtb; - dec->stream_handle = rvid_alloc_stream_handle(); + dec->stream_handle = si_vid_alloc_stream_handle(); dec->screen = context->screen; dec->ws = ws; dec->cs = ws->cs_create(rctx->ctx, RING_UVD, NULL, NULL); @@ -1415,48 +1415,48 @@ struct pipe_video_codec *ruvd_create_decoder(struct pipe_context *context, STATIC_ASSERT(sizeof(struct ruvd_msg) <= FB_BUFFER_OFFSET); if (have_it(dec)) msg_fb_it_size += IT_SCALING_TABLE_SIZE; - if (!rvid_create_buffer(dec->screen, &dec->msg_fb_it_buffers[i], + if (!si_vid_create_buffer(dec->screen, &dec->msg_fb_it_buffers[i], msg_fb_it_size, PIPE_USAGE_STAGING)) { RVID_ERR("Can't allocated message buffers.\n"); goto error; } - if (!rvid_create_buffer(dec->screen, &dec->bs_buffers[i], + if (!si_vid_create_buffer(dec->screen, &dec->bs_buffers[i], bs_buf_size, PIPE_USAGE_STAGING)) { RVID_ERR("Can't allocated bitstream buffers.\n"); goto error; } - rvid_clear_buffer(context, &dec->msg_fb_it_buffers[i]); - rvid_clear_buffer(context, &dec->bs_buffers[i]); + si_vid_clear_buffer(context, &dec->msg_fb_it_buffers[i]); + si_vid_clear_buffer(context, &dec->bs_buffers[i]); } dpb_size = calc_dpb_size(dec); if (dpb_size) { - if (!rvid_create_buffer(dec->screen, &dec->dpb, dpb_size, PIPE_USAGE_DEFAULT)) { + if (!si_vid_create_buffer(dec->screen, &dec->dpb, dpb_size, PIPE_USAGE_DEFAULT)) { RVID_ERR("Can't allocated dpb.\n"); goto error; } - rvid_clear_buffer(context, &dec->dpb); + si_vid_clear_buffer(context, &dec->dpb); } if (dec->stream_type == RUVD_CODEC_H264_PERF && info.family >= CHIP_POLARIS10) { unsigned ctx_size = calc_ctx_size_h264_perf(dec); - if (!rvid_create_buffer(dec->screen, &dec->ctx, ctx_size, PIPE_USAGE_DEFAULT)) { + if (!si_vid_create_buffer(dec->screen, &dec->ctx, ctx_size, PIPE_USAGE_DEFAULT)) { RVID_ERR("Can't allocated context buffer.\n"); goto error; } - rvid_clear_buffer(context, &dec->ctx); + si_vid_clear_buffer(context, &dec->ctx); } if (info.family >= CHIP_POLARIS10 && info.drm_minor >= 3) { - if (!rvid_create_buffer(dec->screen, &dec->sessionctx, + if (!si_vid_create_buffer(dec->screen, &dec->sessionctx, UVD_SESSION_CONTEXT_SIZE, PIPE_USAGE_DEFAULT)) { RVID_ERR("Can't allocated session ctx.\n"); goto error; } - rvid_clear_buffer(context, &dec->sessionctx); + si_vid_clear_buffer(context, &dec->sessionctx); } if (info.family >= CHIP_VEGA10) { @@ -1492,13 +1492,13 @@ error: if (dec->cs) dec->ws->cs_destroy(dec->cs); for (i = 0; i < NUM_BUFFERS; ++i) { - rvid_destroy_buffer(&dec->msg_fb_it_buffers[i]); - rvid_destroy_buffer(&dec->bs_buffers[i]); + si_vid_destroy_buffer(&dec->msg_fb_it_buffers[i]); + si_vid_destroy_buffer(&dec->bs_buffers[i]); } - rvid_destroy_buffer(&dec->dpb); - rvid_destroy_buffer(&dec->ctx); - rvid_destroy_buffer(&dec->sessionctx); + si_vid_destroy_buffer(&dec->dpb); + si_vid_destroy_buffer(&dec->ctx); + si_vid_destroy_buffer(&dec->sessionctx); FREE(dec); @@ -1551,8 +1551,8 @@ static unsigned bank_wh(unsigned bankwh) /** * fill decoding target field from the luma and chroma surfaces */ -void ruvd_set_dt_surfaces(struct ruvd_msg *msg, struct radeon_surf *luma, - struct radeon_surf *chroma, enum ruvd_surface_type type) +void si_uvd_set_dt_surfaces(struct ruvd_msg *msg, struct radeon_surf *luma, + struct radeon_surf *chroma, enum ruvd_surface_type type) { switch (type) { default: diff --git a/src/gallium/drivers/radeon/radeon_uvd.h b/src/gallium/drivers/radeon/radeon_uvd.h index a927c843dac..2bb2ce21dd7 100644 --- a/src/gallium/drivers/radeon/radeon_uvd.h +++ b/src/gallium/drivers/radeon/radeon_uvd.h @@ -437,11 +437,11 @@ typedef struct pb_buffer* (*ruvd_set_dtb) (struct ruvd_msg* msg, struct vl_video_buffer *vb); /* create an UVD decode */ -struct pipe_video_codec *ruvd_create_decoder(struct pipe_context *context, - const struct pipe_video_codec *templat, - ruvd_set_dtb set_dtb); +struct pipe_video_codec *si_common_uvd_create_decoder(struct pipe_context *context, + const struct pipe_video_codec *templat, + ruvd_set_dtb set_dtb); /* fill decoding target field from the luma and chroma surfaces */ -void ruvd_set_dt_surfaces(struct ruvd_msg *msg, struct radeon_surf *luma, - struct radeon_surf *chroma, enum ruvd_surface_type type); +void si_uvd_set_dt_surfaces(struct ruvd_msg *msg, struct radeon_surf *luma, + struct radeon_surf *chroma, enum ruvd_surface_type type); #endif diff --git a/src/gallium/drivers/radeon/radeon_vce.c b/src/gallium/drivers/radeon/radeon_vce.c index 6b07a72f02c..0edbe0ffd19 100644 --- a/src/gallium/drivers/radeon/radeon_vce.c +++ b/src/gallium/drivers/radeon/radeon_vce.c @@ -198,7 +198,7 @@ static unsigned get_cpb_num(struct rvce_encoder *enc) /** * Get the slot for the currently encoded frame */ -struct rvce_cpb_slot *current_slot(struct rvce_encoder *enc) +struct rvce_cpb_slot *si_current_slot(struct rvce_encoder *enc) { return LIST_ENTRY(struct rvce_cpb_slot, enc->cpb_slots.prev, list); } @@ -206,7 +206,7 @@ struct rvce_cpb_slot *current_slot(struct rvce_encoder *enc) /** * Get the slot for L0 */ -struct rvce_cpb_slot *l0_slot(struct rvce_encoder *enc) +struct rvce_cpb_slot *si_l0_slot(struct rvce_encoder *enc) { return LIST_ENTRY(struct rvce_cpb_slot, enc->cpb_slots.next, list); } @@ -214,7 +214,7 @@ struct rvce_cpb_slot *l0_slot(struct rvce_encoder *enc) /** * Get the slot for L1 */ -struct rvce_cpb_slot *l1_slot(struct rvce_encoder *enc) +struct rvce_cpb_slot *si_l1_slot(struct rvce_encoder *enc) { return LIST_ENTRY(struct rvce_cpb_slot, enc->cpb_slots.next->next, list); } @@ -222,8 +222,8 @@ struct rvce_cpb_slot *l1_slot(struct rvce_encoder *enc) /** * Calculate the offsets into the CPB */ -void rvce_frame_offset(struct rvce_encoder *enc, struct rvce_cpb_slot *slot, - signed *luma_offset, signed *chroma_offset) +void si_vce_frame_offset(struct rvce_encoder *enc, struct rvce_cpb_slot *slot, + signed *luma_offset, signed *chroma_offset) { struct r600_common_screen *rscreen = (struct r600_common_screen *)enc->screen; unsigned pitch, vpitch, fsize; @@ -249,15 +249,15 @@ static void rvce_destroy(struct pipe_video_codec *encoder) struct rvce_encoder *enc = (struct rvce_encoder*)encoder; if (enc->stream_handle) { struct rvid_buffer fb; - rvid_create_buffer(enc->screen, &fb, 512, PIPE_USAGE_STAGING); + si_vid_create_buffer(enc->screen, &fb, 512, PIPE_USAGE_STAGING); enc->fb = &fb; enc->session(enc); enc->feedback(enc); enc->destroy(enc); flush(enc); - rvid_destroy_buffer(&fb); + si_vid_destroy_buffer(&fb); } - rvid_destroy_buffer(&enc->cpb); + si_vid_destroy_buffer(&enc->cpb); enc->ws->cs_destroy(enc->cs); FREE(enc->cpb_array); FREE(enc); @@ -278,7 +278,7 @@ static void rvce_begin_frame(struct pipe_video_codec *encoder, enc->pic.quant_b_frames != pic->quant_b_frames; enc->pic = *pic; - get_pic_param(enc, pic); + si_get_pic_param(enc, pic); enc->get_buffer(vid_buf->resources[0], &enc->handle, &enc->luma); enc->get_buffer(vid_buf->resources[1], NULL, &enc->chroma); @@ -291,8 +291,8 @@ static void rvce_begin_frame(struct pipe_video_codec *encoder, if (!enc->stream_handle) { struct rvid_buffer fb; - enc->stream_handle = rvid_alloc_stream_handle(); - rvid_create_buffer(enc->screen, &fb, 512, PIPE_USAGE_STAGING); + enc->stream_handle = si_vid_alloc_stream_handle(); + si_vid_create_buffer(enc->screen, &fb, 512, PIPE_USAGE_STAGING); enc->fb = &fb; enc->session(enc); enc->create(enc); @@ -300,7 +300,7 @@ static void rvce_begin_frame(struct pipe_video_codec *encoder, enc->feedback(enc); flush(enc); //dump_feedback(enc, &fb); - rvid_destroy_buffer(&fb); + si_vid_destroy_buffer(&fb); need_rate_control = false; } @@ -321,7 +321,7 @@ static void rvce_encode_bitstream(struct pipe_video_codec *encoder, enc->bs_size = destination->width0; *fb = enc->fb = CALLOC_STRUCT(rvid_buffer); - if (!rvid_create_buffer(enc->screen, enc->fb, 512, PIPE_USAGE_STAGING)) { + if (!si_vid_create_buffer(enc->screen, enc->fb, 512, PIPE_USAGE_STAGING)) { RVID_ERR("Can't create feedback buffer.\n"); return; } @@ -370,7 +370,7 @@ static void rvce_get_feedback(struct pipe_video_codec *encoder, enc->ws->buffer_unmap(fb->res->buf); } //dump_feedback(enc, fb); - rvid_destroy_buffer(fb); + si_vid_destroy_buffer(fb); FREE(fb); } @@ -390,10 +390,10 @@ static void rvce_cs_flush(void *ctx, unsigned flags, // just ignored } -struct pipe_video_codec *rvce_create_encoder(struct pipe_context *context, - const struct pipe_video_codec *templ, - struct radeon_winsys* ws, - rvce_get_buffer get_buffer) +struct pipe_video_codec *si_vce_create_encoder(struct pipe_context *context, + const struct pipe_video_codec *templ, + struct radeon_winsys* ws, + rvce_get_buffer get_buffer) { struct r600_common_screen *rscreen = (struct r600_common_screen *)context->screen; struct r600_common_context *rctx = (struct r600_common_context*)context; @@ -406,7 +406,7 @@ struct pipe_video_codec *rvce_create_encoder(struct pipe_context *context, RVID_ERR("Kernel doesn't supports VCE!\n"); return NULL; - } else if (!rvce_is_fw_version_supported(rscreen)) { + } else if (!si_vce_is_fw_version_supported(rscreen)) { RVID_ERR("Unsupported VCE fw version loaded!\n"); return NULL; } @@ -479,7 +479,7 @@ struct pipe_video_codec *rvce_create_encoder(struct pipe_context *context, cpb_size += RVCE_MAX_AUX_BUFFER_NUM * RVCE_MAX_BITSTREAM_OUTPUT_ROW_SIZE * 2; tmp_buf->destroy(tmp_buf); - if (!rvid_create_buffer(enc->screen, &enc->cpb, cpb_size, PIPE_USAGE_DEFAULT)) { + if (!si_vid_create_buffer(enc->screen, &enc->cpb, cpb_size, PIPE_USAGE_DEFAULT)) { RVID_ERR("Can't create CPB buffer.\n"); goto error; } @@ -492,29 +492,29 @@ struct pipe_video_codec *rvce_create_encoder(struct pipe_context *context, switch (rscreen->info.vce_fw_version) { case FW_40_2_2: - radeon_vce_40_2_2_init(enc); - get_pic_param = radeon_vce_40_2_2_get_param; + si_vce_40_2_2_init(enc); + si_get_pic_param = si_vce_40_2_2_get_param; break; case FW_50_0_1: case FW_50_1_2: case FW_50_10_2: case FW_50_17_3: - radeon_vce_50_init(enc); - get_pic_param = radeon_vce_50_get_param; + si_vce_50_init(enc); + si_get_pic_param = si_vce_50_get_param; break; case FW_52_0_3: case FW_52_4_3: case FW_52_8_3: - radeon_vce_52_init(enc); - get_pic_param = radeon_vce_52_get_param; + si_vce_52_init(enc); + si_get_pic_param = si_vce_52_get_param; break; default: if ((rscreen->info.vce_fw_version & (0xff << 24)) == FW_53) { - radeon_vce_52_init(enc); - get_pic_param = radeon_vce_52_get_param; + si_vce_52_init(enc); + si_get_pic_param = si_vce_52_get_param; } else goto error; } @@ -525,7 +525,7 @@ error: if (enc->cs) enc->ws->cs_destroy(enc->cs); - rvid_destroy_buffer(&enc->cpb); + si_vid_destroy_buffer(&enc->cpb); FREE(enc->cpb_array); FREE(enc); @@ -535,7 +535,7 @@ error: /** * check if kernel has the right fw version loaded */ -bool rvce_is_fw_version_supported(struct r600_common_screen *rscreen) +bool si_vce_is_fw_version_supported(struct r600_common_screen *rscreen) { switch (rscreen->info.vce_fw_version) { case FW_40_2_2: @@ -558,9 +558,9 @@ bool rvce_is_fw_version_supported(struct r600_common_screen *rscreen) /** * Add the buffer as relocation to the current command submission */ -void rvce_add_buffer(struct rvce_encoder *enc, struct pb_buffer *buf, - enum radeon_bo_usage usage, enum radeon_bo_domain domain, - signed offset) +void si_vce_add_buffer(struct rvce_encoder *enc, struct pb_buffer *buf, + enum radeon_bo_usage usage, enum radeon_bo_domain domain, + signed offset) { int reloc_idx; diff --git a/src/gallium/drivers/radeon/radeon_vce.h b/src/gallium/drivers/radeon/radeon_vce.h index f79e65c9ac2..f34a8eaf826 100644 --- a/src/gallium/drivers/radeon/radeon_vce.h +++ b/src/gallium/drivers/radeon/radeon_vce.h @@ -40,9 +40,9 @@ #define RVCE_BEGIN(cmd) { \ uint32_t *begin = &enc->cs->current.buf[enc->cs->current.cdw++]; \ RVCE_CS(cmd) -#define RVCE_READ(buf, domain, off) rvce_add_buffer(enc, (buf), RADEON_USAGE_READ, (domain), (off)) -#define RVCE_WRITE(buf, domain, off) rvce_add_buffer(enc, (buf), RADEON_USAGE_WRITE, (domain), (off)) -#define RVCE_READWRITE(buf, domain, off) rvce_add_buffer(enc, (buf), RADEON_USAGE_READWRITE, (domain), (off)) +#define RVCE_READ(buf, domain, off) si_vce_add_buffer(enc, (buf), RADEON_USAGE_READ, (domain), (off)) +#define RVCE_WRITE(buf, domain, off) si_vce_add_buffer(enc, (buf), RADEON_USAGE_WRITE, (domain), (off)) +#define RVCE_READWRITE(buf, domain, off) si_vce_add_buffer(enc, (buf), RADEON_USAGE_READWRITE, (domain), (off)) #define RVCE_END() *begin = (&enc->cs->current.buf[enc->cs->current.cdw] - begin) * 4; } #define RVCE_MAX_BITSTREAM_OUTPUT_ROW_SIZE (4096 * 16 * 2.5) @@ -417,46 +417,46 @@ struct rvce_encoder { }; /* CPB handling functions */ -struct rvce_cpb_slot *current_slot(struct rvce_encoder *enc); -struct rvce_cpb_slot *l0_slot(struct rvce_encoder *enc); -struct rvce_cpb_slot *l1_slot(struct rvce_encoder *enc); -void rvce_frame_offset(struct rvce_encoder *enc, struct rvce_cpb_slot *slot, - signed *luma_offset, signed *chroma_offset); +struct rvce_cpb_slot *si_current_slot(struct rvce_encoder *enc); +struct rvce_cpb_slot *si_l0_slot(struct rvce_encoder *enc); +struct rvce_cpb_slot *si_l1_slot(struct rvce_encoder *enc); +void si_vce_frame_offset(struct rvce_encoder *enc, struct rvce_cpb_slot *slot, + signed *luma_offset, signed *chroma_offset); -struct pipe_video_codec *rvce_create_encoder(struct pipe_context *context, - const struct pipe_video_codec *templat, - struct radeon_winsys* ws, - rvce_get_buffer get_buffer); +struct pipe_video_codec *si_vce_create_encoder(struct pipe_context *context, + const struct pipe_video_codec *templat, + struct radeon_winsys* ws, + rvce_get_buffer get_buffer); -bool rvce_is_fw_version_supported(struct r600_common_screen *rscreen); +bool si_vce_is_fw_version_supported(struct r600_common_screen *rscreen); -void rvce_add_buffer(struct rvce_encoder *enc, struct pb_buffer *buf, - enum radeon_bo_usage usage, enum radeon_bo_domain domain, - signed offset); +void si_vce_add_buffer(struct rvce_encoder *enc, struct pb_buffer *buf, + enum radeon_bo_usage usage, enum radeon_bo_domain domain, + signed offset); /* init vce fw 40.2.2 specific callbacks */ -void radeon_vce_40_2_2_init(struct rvce_encoder *enc); +void si_vce_40_2_2_init(struct rvce_encoder *enc); /* init vce fw 50 specific callbacks */ -void radeon_vce_50_init(struct rvce_encoder *enc); +void si_vce_50_init(struct rvce_encoder *enc); /* init vce fw 52 specific callbacks */ -void radeon_vce_52_init(struct rvce_encoder *enc); +void si_vce_52_init(struct rvce_encoder *enc); /* version specific function for getting parameters */ -void (*get_pic_param)(struct rvce_encoder *enc, +void (*si_get_pic_param)(struct rvce_encoder *enc, struct pipe_h264_enc_picture_desc *pic); /* get parameters for vce 40.2.2 */ -void radeon_vce_40_2_2_get_param(struct rvce_encoder *enc, - struct pipe_h264_enc_picture_desc *pic); +void si_vce_40_2_2_get_param(struct rvce_encoder *enc, + struct pipe_h264_enc_picture_desc *pic); /* get parameters for vce 50 */ -void radeon_vce_50_get_param(struct rvce_encoder *enc, - struct pipe_h264_enc_picture_desc *pic); +void si_vce_50_get_param(struct rvce_encoder *enc, + struct pipe_h264_enc_picture_desc *pic); /* get parameters for vce 52 */ -void radeon_vce_52_get_param(struct rvce_encoder *enc, - struct pipe_h264_enc_picture_desc *pic); +void si_vce_52_get_param(struct rvce_encoder *enc, + struct pipe_h264_enc_picture_desc *pic); #endif diff --git a/src/gallium/drivers/radeon/radeon_vce_40_2_2.c b/src/gallium/drivers/radeon/radeon_vce_40_2_2.c index b9afd089af8..abfb74b9093 100644 --- a/src/gallium/drivers/radeon/radeon_vce_40_2_2.c +++ b/src/gallium/drivers/radeon/radeon_vce_40_2_2.c @@ -363,8 +363,8 @@ static void encode(struct rvce_encoder *enc) RVCE_CS(0x00000000); // pictureStructure if(enc->pic.picture_type == PIPE_H264_ENC_PICTURE_TYPE_P || enc->pic.picture_type == PIPE_H264_ENC_PICTURE_TYPE_B) { - struct rvce_cpb_slot *l0 = l0_slot(enc); - rvce_frame_offset(enc, l0, &luma_offset, &chroma_offset); + struct rvce_cpb_slot *l0 = si_l0_slot(enc); + si_vce_frame_offset(enc, l0, &luma_offset, &chroma_offset); RVCE_CS(l0->picture_type); // encPicType RVCE_CS(l0->frame_num); // frameNumber RVCE_CS(l0->pic_order_cnt); // pictureOrderCount @@ -389,8 +389,8 @@ static void encode(struct rvce_encoder *enc) // encReferencePictureL1[0] RVCE_CS(0x00000000); // pictureStructure if(enc->pic.picture_type == PIPE_H264_ENC_PICTURE_TYPE_B) { - struct rvce_cpb_slot *l1 = l1_slot(enc); - rvce_frame_offset(enc, l1, &luma_offset, &chroma_offset); + struct rvce_cpb_slot *l1 = si_l1_slot(enc); + si_vce_frame_offset(enc, l1, &luma_offset, &chroma_offset); RVCE_CS(l1->picture_type); // encPicType RVCE_CS(l1->frame_num); // frameNumber RVCE_CS(l1->pic_order_cnt); // pictureOrderCount @@ -404,7 +404,7 @@ static void encode(struct rvce_encoder *enc) RVCE_CS(0xffffffff); // chromaOffset } - rvce_frame_offset(enc, current_slot(enc), &luma_offset, &chroma_offset); + si_vce_frame_offset(enc, si_current_slot(enc), &luma_offset, &chroma_offset); RVCE_CS(luma_offset); // encReconstructedLumaOffset RVCE_CS(chroma_offset); // encReconstructedChromaOffset RVCE_CS(0x00000000); // encColocBufferOffset @@ -431,11 +431,11 @@ static void destroy(struct rvce_encoder *enc) RVCE_END(); } -void radeon_vce_40_2_2_get_param(struct rvce_encoder *enc, struct pipe_h264_enc_picture_desc *pic) +void si_vce_40_2_2_get_param(struct rvce_encoder *enc, struct pipe_h264_enc_picture_desc *pic) { } -void radeon_vce_40_2_2_init(struct rvce_encoder *enc) +void si_vce_40_2_2_init(struct rvce_encoder *enc) { enc->session = session; enc->task_info = task_info; diff --git a/src/gallium/drivers/radeon/radeon_vce_50.c b/src/gallium/drivers/radeon/radeon_vce_50.c index 0d11814515d..96bb557ebb2 100644 --- a/src/gallium/drivers/radeon/radeon_vce_50.c +++ b/src/gallium/drivers/radeon/radeon_vce_50.c @@ -173,8 +173,8 @@ static void encode(struct rvce_encoder *enc) RVCE_CS(0x00000000); // pictureStructure if(enc->pic.picture_type == PIPE_H264_ENC_PICTURE_TYPE_P || enc->pic.picture_type == PIPE_H264_ENC_PICTURE_TYPE_B) { - struct rvce_cpb_slot *l0 = l0_slot(enc); - rvce_frame_offset(enc, l0, &luma_offset, &chroma_offset); + struct rvce_cpb_slot *l0 = si_l0_slot(enc); + si_vce_frame_offset(enc, l0, &luma_offset, &chroma_offset); RVCE_CS(l0->picture_type); // encPicType RVCE_CS(l0->frame_num); // frameNumber RVCE_CS(l0->pic_order_cnt); // pictureOrderCount @@ -199,8 +199,8 @@ static void encode(struct rvce_encoder *enc) // encReferencePictureL1[0] RVCE_CS(0x00000000); // pictureStructure if(enc->pic.picture_type == PIPE_H264_ENC_PICTURE_TYPE_B) { - struct rvce_cpb_slot *l1 = l1_slot(enc); - rvce_frame_offset(enc, l1, &luma_offset, &chroma_offset); + struct rvce_cpb_slot *l1 = si_l1_slot(enc); + si_vce_frame_offset(enc, l1, &luma_offset, &chroma_offset); RVCE_CS(l1->picture_type); // encPicType RVCE_CS(l1->frame_num); // frameNumber RVCE_CS(l1->pic_order_cnt); // pictureOrderCount @@ -214,7 +214,7 @@ static void encode(struct rvce_encoder *enc) RVCE_CS(0xffffffff); // chromaOffset } - rvce_frame_offset(enc, current_slot(enc), &luma_offset, &chroma_offset); + si_vce_frame_offset(enc, si_current_slot(enc), &luma_offset, &chroma_offset); RVCE_CS(luma_offset); // encReconstructedLumaOffset RVCE_CS(chroma_offset); // encReconstructedChromaOffset RVCE_CS(0x00000000); // encColocBufferOffset @@ -233,13 +233,13 @@ static void encode(struct rvce_encoder *enc) RVCE_END(); } -void radeon_vce_50_get_param(struct rvce_encoder *enc, struct pipe_h264_enc_picture_desc *pic) +void si_vce_50_get_param(struct rvce_encoder *enc, struct pipe_h264_enc_picture_desc *pic) { } -void radeon_vce_50_init(struct rvce_encoder *enc) +void si_vce_50_init(struct rvce_encoder *enc) { - radeon_vce_40_2_2_init(enc); + si_vce_40_2_2_init(enc); /* only the two below are different */ enc->rate_control = rate_control; diff --git a/src/gallium/drivers/radeon/radeon_vce_52.c b/src/gallium/drivers/radeon/radeon_vce_52.c index 36cf4804722..3f2e6cbcda5 100644 --- a/src/gallium/drivers/radeon/radeon_vce_52.c +++ b/src/gallium/drivers/radeon/radeon_vce_52.c @@ -138,7 +138,7 @@ static void get_vui_param(struct rvce_encoder *enc, struct pipe_h264_enc_picture enc->enc_pic.vui.max_dec_frame_buffering = 0x00000003; } -void radeon_vce_52_get_param(struct rvce_encoder *enc, struct pipe_h264_enc_picture_desc *pic) +void si_vce_52_get_param(struct rvce_encoder *enc, struct pipe_h264_enc_picture_desc *pic) { get_rate_control_param(enc, pic); get_motion_estimation_param(enc, pic); @@ -319,8 +319,8 @@ static void encode(struct rvce_encoder *enc) RVCE_CS(0x00000000); // pictureStructure if(enc->enc_pic.picture_type == PIPE_H264_ENC_PICTURE_TYPE_P || enc->enc_pic.picture_type == PIPE_H264_ENC_PICTURE_TYPE_B) { - struct rvce_cpb_slot *l0 = l0_slot(enc); - rvce_frame_offset(enc, l0, &luma_offset, &chroma_offset); + struct rvce_cpb_slot *l0 = si_l0_slot(enc); + si_vce_frame_offset(enc, l0, &luma_offset, &chroma_offset); RVCE_CS(l0->picture_type); RVCE_CS(l0->frame_num); RVCE_CS(l0->pic_order_cnt); @@ -356,8 +356,8 @@ static void encode(struct rvce_encoder *enc) // encReferencePictureL1[0] RVCE_CS(0x00000000); // pictureStructure if(enc->enc_pic.picture_type == PIPE_H264_ENC_PICTURE_TYPE_B) { - struct rvce_cpb_slot *l1 = l1_slot(enc); - rvce_frame_offset(enc, l1, &luma_offset, &chroma_offset); + struct rvce_cpb_slot *l1 = si_l1_slot(enc); + si_vce_frame_offset(enc, l1, &luma_offset, &chroma_offset); RVCE_CS(l1->picture_type); RVCE_CS(l1->frame_num); RVCE_CS(l1->pic_order_cnt); @@ -376,7 +376,7 @@ static void encode(struct rvce_encoder *enc) RVCE_CS(enc->enc_pic.eo.l1_chroma_offset); } - rvce_frame_offset(enc, current_slot(enc), &luma_offset, &chroma_offset); + si_vce_frame_offset(enc, si_current_slot(enc), &luma_offset, &chroma_offset); RVCE_CS(luma_offset); RVCE_CS(chroma_offset); RVCE_CS(enc->enc_pic.eo.enc_coloc_buffer_offset); @@ -646,7 +646,7 @@ static void vui(struct rvce_encoder *enc) RVCE_END(); } -void radeon_vce_52_init(struct rvce_encoder *enc) +void si_vce_52_init(struct rvce_encoder *enc) { enc->session = session; enc->task_info = task_info; diff --git a/src/gallium/drivers/radeon/radeon_vcn_dec.c b/src/gallium/drivers/radeon/radeon_vcn_dec.c index a7a57f2969e..2ece4a3fdaf 100644 --- a/src/gallium/drivers/radeon/radeon_vcn_dec.c +++ b/src/gallium/drivers/radeon/radeon_vcn_dec.c @@ -678,9 +678,9 @@ static struct pb_buffer *rvcn_dec_message_decode(struct radeon_decoder *dec, (struct pipe_h265_picture_desc*)picture); else ctx_size = calc_ctx_size_h265_main(dec); - if (!rvid_create_buffer(dec->screen, &dec->ctx, ctx_size, PIPE_USAGE_DEFAULT)) + if (!si_vid_create_buffer(dec->screen, &dec->ctx, ctx_size, PIPE_USAGE_DEFAULT)) RVID_ERR("Can't allocated context buffer.\n"); - rvid_clear_buffer(dec->base.context, &dec->ctx); + si_vid_clear_buffer(dec->base.context, &dec->ctx); } break; } @@ -1026,13 +1026,13 @@ static void radeon_dec_destroy(struct pipe_video_codec *decoder) dec->ws->cs_destroy(dec->cs); for (i = 0; i < NUM_BUFFERS; ++i) { - rvid_destroy_buffer(&dec->msg_fb_it_buffers[i]); - rvid_destroy_buffer(&dec->bs_buffers[i]); + si_vid_destroy_buffer(&dec->msg_fb_it_buffers[i]); + si_vid_destroy_buffer(&dec->bs_buffers[i]); } - rvid_destroy_buffer(&dec->dpb); - rvid_destroy_buffer(&dec->ctx); - rvid_destroy_buffer(&dec->sessionctx); + si_vid_destroy_buffer(&dec->dpb); + si_vid_destroy_buffer(&dec->ctx); + si_vid_destroy_buffer(&dec->sessionctx); FREE(dec); } @@ -1096,7 +1096,7 @@ static void radeon_dec_decode_bitstream(struct pipe_video_codec *decoder, if (new_size > buf->res->buf->size) { dec->ws->buffer_unmap(buf->res->buf); - if (!rvid_resize_buffer(dec->screen, dec->cs, buf, new_size)) { + if (!si_vid_resize_buffer(dec->screen, dec->cs, buf, new_size)) { RVID_ERR("Can't resize bitstream buffer!"); return; } @@ -1227,7 +1227,7 @@ struct pipe_video_codec *radeon_create_decoder(struct pipe_context *context, dec->base.flush = radeon_dec_flush; dec->stream_type = stream_type; - dec->stream_handle = rvid_alloc_stream_handle(); + dec->stream_handle = si_vid_alloc_stream_handle(); dec->screen = context->screen; dec->ws = ws; dec->cs = ws->cs_create(rctx->ctx, RING_VCN_DEC, NULL, NULL); @@ -1242,47 +1242,47 @@ struct pipe_video_codec *radeon_create_decoder(struct pipe_context *context, if (have_it(dec)) msg_fb_it_size += IT_SCALING_TABLE_SIZE; /* use vram to improve performance, workaround an unknown bug */ - if (!rvid_create_buffer(dec->screen, &dec->msg_fb_it_buffers[i], - msg_fb_it_size, PIPE_USAGE_DEFAULT)) { + if (!si_vid_create_buffer(dec->screen, &dec->msg_fb_it_buffers[i], + msg_fb_it_size, PIPE_USAGE_DEFAULT)) { RVID_ERR("Can't allocated message buffers.\n"); goto error; } - if (!rvid_create_buffer(dec->screen, &dec->bs_buffers[i], - bs_buf_size, PIPE_USAGE_STAGING)) { + if (!si_vid_create_buffer(dec->screen, &dec->bs_buffers[i], + bs_buf_size, PIPE_USAGE_STAGING)) { RVID_ERR("Can't allocated bitstream buffers.\n"); goto error; } - rvid_clear_buffer(context, &dec->msg_fb_it_buffers[i]); - rvid_clear_buffer(context, &dec->bs_buffers[i]); + si_vid_clear_buffer(context, &dec->msg_fb_it_buffers[i]); + si_vid_clear_buffer(context, &dec->bs_buffers[i]); } dpb_size = calc_dpb_size(dec); - if (!rvid_create_buffer(dec->screen, &dec->dpb, dpb_size, PIPE_USAGE_DEFAULT)) { + if (!si_vid_create_buffer(dec->screen, &dec->dpb, dpb_size, PIPE_USAGE_DEFAULT)) { RVID_ERR("Can't allocated dpb.\n"); goto error; } - rvid_clear_buffer(context, &dec->dpb); + si_vid_clear_buffer(context, &dec->dpb); if (dec->stream_type == RDECODE_CODEC_H264_PERF) { unsigned ctx_size = calc_ctx_size_h264_perf(dec); - if (!rvid_create_buffer(dec->screen, &dec->ctx, ctx_size, PIPE_USAGE_DEFAULT)) { + if (!si_vid_create_buffer(dec->screen, &dec->ctx, ctx_size, PIPE_USAGE_DEFAULT)) { RVID_ERR("Can't allocated context buffer.\n"); goto error; } - rvid_clear_buffer(context, &dec->ctx); + si_vid_clear_buffer(context, &dec->ctx); } - if (!rvid_create_buffer(dec->screen, &dec->sessionctx, - RDECODE_SESSION_CONTEXT_SIZE, - PIPE_USAGE_DEFAULT)) { + if (!si_vid_create_buffer(dec->screen, &dec->sessionctx, + RDECODE_SESSION_CONTEXT_SIZE, + PIPE_USAGE_DEFAULT)) { RVID_ERR("Can't allocated session ctx.\n"); goto error; } - rvid_clear_buffer(context, &dec->sessionctx); + si_vid_clear_buffer(context, &dec->sessionctx); map_msg_fb_it_buf(dec); rvcn_dec_message_create(dec); @@ -1299,13 +1299,13 @@ error: if (dec->cs) dec->ws->cs_destroy(dec->cs); for (i = 0; i < NUM_BUFFERS; ++i) { - rvid_destroy_buffer(&dec->msg_fb_it_buffers[i]); - rvid_destroy_buffer(&dec->bs_buffers[i]); + si_vid_destroy_buffer(&dec->msg_fb_it_buffers[i]); + si_vid_destroy_buffer(&dec->bs_buffers[i]); } - rvid_destroy_buffer(&dec->dpb); - rvid_destroy_buffer(&dec->ctx); - rvid_destroy_buffer(&dec->sessionctx); + si_vid_destroy_buffer(&dec->dpb); + si_vid_destroy_buffer(&dec->ctx); + si_vid_destroy_buffer(&dec->sessionctx); FREE(dec); diff --git a/src/gallium/drivers/radeon/radeon_video.c b/src/gallium/drivers/radeon/radeon_video.c index 99b6676fee1..08710fb8332 100644 --- a/src/gallium/drivers/radeon/radeon_video.c +++ b/src/gallium/drivers/radeon/radeon_video.c @@ -46,7 +46,7 @@ #define UVD_FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) /* generate an stream handle */ -unsigned rvid_alloc_stream_handle() +unsigned si_vid_alloc_stream_handle() { static unsigned counter = 0; unsigned stream_handle = 0; @@ -61,8 +61,8 @@ unsigned rvid_alloc_stream_handle() } /* create a buffer in the winsys */ -bool rvid_create_buffer(struct pipe_screen *screen, struct rvid_buffer *buffer, - unsigned size, unsigned usage) +bool si_vid_create_buffer(struct pipe_screen *screen, struct rvid_buffer *buffer, + unsigned size, unsigned usage) { memset(buffer, 0, sizeof(*buffer)); buffer->usage = usage; @@ -79,14 +79,14 @@ bool rvid_create_buffer(struct pipe_screen *screen, struct rvid_buffer *buffer, } /* destroy a buffer */ -void rvid_destroy_buffer(struct rvid_buffer *buffer) +void si_vid_destroy_buffer(struct rvid_buffer *buffer) { r600_resource_reference(&buffer->res, NULL); } /* reallocate a buffer, preserving its content */ -bool rvid_resize_buffer(struct pipe_screen *screen, struct radeon_winsys_cs *cs, - struct rvid_buffer *new_buf, unsigned new_size) +bool si_vid_resize_buffer(struct pipe_screen *screen, struct radeon_winsys_cs *cs, + struct rvid_buffer *new_buf, unsigned new_size) { struct r600_common_screen *rscreen = (struct r600_common_screen *)screen; struct radeon_winsys* ws = rscreen->ws; @@ -94,7 +94,7 @@ bool rvid_resize_buffer(struct pipe_screen *screen, struct radeon_winsys_cs *cs, struct rvid_buffer old_buf = *new_buf; void *src = NULL, *dst = NULL; - if (!rvid_create_buffer(screen, new_buf, new_size, new_buf->usage)) + if (!si_vid_create_buffer(screen, new_buf, new_size, new_buf->usage)) goto error; src = ws->buffer_map(old_buf.res->buf, cs, PIPE_TRANSFER_READ); @@ -113,19 +113,19 @@ bool rvid_resize_buffer(struct pipe_screen *screen, struct radeon_winsys_cs *cs, } ws->buffer_unmap(new_buf->res->buf); ws->buffer_unmap(old_buf.res->buf); - rvid_destroy_buffer(&old_buf); + si_vid_destroy_buffer(&old_buf); return true; error: if (src) ws->buffer_unmap(old_buf.res->buf); - rvid_destroy_buffer(new_buf); + si_vid_destroy_buffer(new_buf); *new_buf = old_buf; return false; } /* clear the buffer with zeros */ -void rvid_clear_buffer(struct pipe_context *context, struct rvid_buffer* buffer) +void si_vid_clear_buffer(struct pipe_context *context, struct rvid_buffer* buffer) { struct r600_common_context *rctx = (struct r600_common_context*)context; @@ -138,9 +138,9 @@ void rvid_clear_buffer(struct pipe_context *context, struct rvid_buffer* buffer) * join surfaces into the same buffer with identical tiling params * sumup their sizes and replace the backend buffers with a single bo */ -void rvid_join_surfaces(struct r600_common_context *rctx, - struct pb_buffer** buffers[VL_NUM_COMPONENTS], - struct radeon_surf *surfaces[VL_NUM_COMPONENTS]) +void si_vid_join_surfaces(struct r600_common_context *rctx, + struct pb_buffer** buffers[VL_NUM_COMPONENTS], + struct radeon_surf *surfaces[VL_NUM_COMPONENTS]) { struct radeon_winsys* ws; unsigned best_tiling, best_wh, off; @@ -218,10 +218,10 @@ void rvid_join_surfaces(struct r600_common_context *rctx, pb_reference(&pb, NULL); } -int rvid_get_video_param(struct pipe_screen *screen, - enum pipe_video_profile profile, - enum pipe_video_entrypoint entrypoint, - enum pipe_video_cap param) +int si_vid_get_video_param(struct pipe_screen *screen, + enum pipe_video_profile profile, + enum pipe_video_entrypoint entrypoint, + enum pipe_video_cap param) { struct r600_common_screen *rscreen = (struct r600_common_screen *)screen; enum pipe_video_format codec = u_reduce_video_profile(profile); @@ -233,7 +233,7 @@ int rvid_get_video_param(struct pipe_screen *screen, switch (param) { case PIPE_VIDEO_CAP_SUPPORTED: return codec == PIPE_VIDEO_FORMAT_MPEG4_AVC && - rvce_is_fw_version_supported(rscreen); + si_vce_is_fw_version_supported(rscreen); case PIPE_VIDEO_CAP_NPOT_TEXTURES: return 1; case PIPE_VIDEO_CAP_MAX_WIDTH: @@ -354,10 +354,10 @@ int rvid_get_video_param(struct pipe_screen *screen, } } -boolean rvid_is_format_supported(struct pipe_screen *screen, - enum pipe_format format, - enum pipe_video_profile profile, - enum pipe_video_entrypoint entrypoint) +boolean si_vid_is_format_supported(struct pipe_screen *screen, + enum pipe_format format, + enum pipe_video_profile profile, + enum pipe_video_entrypoint entrypoint) { /* HEVC 10 bit decoding should use P016 instead of NV12 if possible */ if (profile == PIPE_VIDEO_PROFILE_HEVC_MAIN_10) diff --git a/src/gallium/drivers/radeon/radeon_video.h b/src/gallium/drivers/radeon/radeon_video.h index 3347c4ebced..7e70be98bf1 100644 --- a/src/gallium/drivers/radeon/radeon_video.h +++ b/src/gallium/drivers/radeon/radeon_video.h @@ -48,38 +48,38 @@ struct rvid_buffer }; /* generate an stream handle */ -unsigned rvid_alloc_stream_handle(void); +unsigned si_vid_alloc_stream_handle(void); /* create a buffer in the winsys */ -bool rvid_create_buffer(struct pipe_screen *screen, struct rvid_buffer *buffer, - unsigned size, unsigned usage); +bool si_vid_create_buffer(struct pipe_screen *screen, struct rvid_buffer *buffer, + unsigned size, unsigned usage); /* destroy a buffer */ -void rvid_destroy_buffer(struct rvid_buffer *buffer); +void si_vid_destroy_buffer(struct rvid_buffer *buffer); /* reallocate a buffer, preserving its content */ -bool rvid_resize_buffer(struct pipe_screen *screen, struct radeon_winsys_cs *cs, - struct rvid_buffer *new_buf, unsigned new_size); +bool si_vid_resize_buffer(struct pipe_screen *screen, struct radeon_winsys_cs *cs, + struct rvid_buffer *new_buf, unsigned new_size); /* clear the buffer with zeros */ -void rvid_clear_buffer(struct pipe_context *context, struct rvid_buffer* buffer); +void si_vid_clear_buffer(struct pipe_context *context, struct rvid_buffer* buffer); /* join surfaces into the same buffer with identical tiling params sumup their sizes and replace the backend buffers with a single bo */ -void rvid_join_surfaces(struct r600_common_context *rctx, - struct pb_buffer** buffers[VL_NUM_COMPONENTS], - struct radeon_surf *surfaces[VL_NUM_COMPONENTS]); +void si_vid_join_surfaces(struct r600_common_context *rctx, + struct pb_buffer** buffers[VL_NUM_COMPONENTS], + struct radeon_surf *surfaces[VL_NUM_COMPONENTS]); /* returns supported codecs and other parameters */ -int rvid_get_video_param(struct pipe_screen *screen, - enum pipe_video_profile profile, - enum pipe_video_entrypoint entrypoint, - enum pipe_video_cap param); +int si_vid_get_video_param(struct pipe_screen *screen, + enum pipe_video_profile profile, + enum pipe_video_entrypoint entrypoint, + enum pipe_video_cap param); /* the hardware only supports NV12 */ -boolean rvid_is_format_supported(struct pipe_screen *screen, - enum pipe_format format, - enum pipe_video_profile profile, - enum pipe_video_entrypoint entrypoint); +boolean si_vid_is_format_supported(struct pipe_screen *screen, + enum pipe_format format, + enum pipe_video_profile profile, + enum pipe_video_entrypoint entrypoint); #endif // RADEON_VIDEO_H |