summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/r600/r600_pipe.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/drivers/r600/r600_pipe.c')
-rw-r--r--src/gallium/drivers/r600/r600_pipe.c313
1 files changed, 151 insertions, 162 deletions
diff --git a/src/gallium/drivers/r600/r600_pipe.c b/src/gallium/drivers/r600/r600_pipe.c
index edd50f00b6d..b4b8c886f64 100644
--- a/src/gallium/drivers/r600/r600_pipe.c
+++ b/src/gallium/drivers/r600/r600_pipe.c
@@ -92,7 +92,7 @@ static struct r600_fence *r600_create_fence(struct r600_context *rctx)
if (!rscreen->fences.bo) {
/* Create the shared buffer object */
rscreen->fences.bo = (struct r600_resource*)
- pipe_buffer_create(&rscreen->screen, PIPE_BIND_CUSTOM,
+ pipe_buffer_create(&rscreen->b.b, PIPE_BIND_CUSTOM,
PIPE_USAGE_STAGING, 4096);
if (!rscreen->fences.bo) {
R600_ERR("r600: failed to create bo for fence objects\n");
@@ -148,10 +148,10 @@ static struct r600_fence *r600_create_fence(struct r600_context *rctx)
/* Create a dummy BO so that fence_finish without a timeout can sleep waiting for completion */
fence->sleep_bo = (struct r600_resource*)
- pipe_buffer_create(&rctx->screen->screen, PIPE_BIND_CUSTOM,
+ pipe_buffer_create(&rctx->screen->b.b, PIPE_BIND_CUSTOM,
PIPE_USAGE_STAGING, 1);
/* Add the fence as a dummy relocation. */
- r600_context_bo_reloc(rctx, &rctx->rings.gfx, fence->sleep_bo, RADEON_USAGE_READWRITE);
+ r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, fence->sleep_bo, RADEON_USAGE_READWRITE);
out:
pipe_mutex_unlock(rscreen->fences.mutex);
@@ -165,10 +165,10 @@ static void r600_flush(struct pipe_context *ctx, unsigned flags)
unsigned render_cond_mode = 0;
boolean render_cond_cond = FALSE;
- if (rctx->rings.gfx.cs->cdw == rctx->initial_gfx_cs_size)
+ if (rctx->b.rings.gfx.cs->cdw == rctx->initial_gfx_cs_size)
return;
- rctx->rings.gfx.flushing = true;
+ rctx->b.rings.gfx.flushing = true;
/* Disable render condition. */
if (rctx->current_render_cond) {
render_cond = rctx->current_render_cond;
@@ -178,7 +178,7 @@ static void r600_flush(struct pipe_context *ctx, unsigned flags)
}
r600_context_flush(rctx, flags);
- rctx->rings.gfx.flushing = false;
+ rctx->b.rings.gfx.flushing = false;
r600_begin_new_cs(rctx);
/* Re-enable render condition. */
@@ -186,7 +186,7 @@ static void r600_flush(struct pipe_context *ctx, unsigned flags)
ctx->render_condition(ctx, render_cond, render_cond_cond, render_cond_mode);
}
- rctx->initial_gfx_cs_size = rctx->rings.gfx.cs->cdw;
+ rctx->initial_gfx_cs_size = rctx->b.rings.gfx.cs->cdw;
}
static void r600_flush_from_st(struct pipe_context *ctx,
@@ -202,10 +202,10 @@ static void r600_flush_from_st(struct pipe_context *ctx,
*rfence = r600_create_fence(rctx);
}
/* flush gfx & dma ring, order does not matter as only one can be live */
- if (rctx->rings.dma.cs) {
- rctx->rings.dma.flush(rctx, fflags);
+ if (rctx->b.rings.dma.cs) {
+ rctx->b.rings.dma.flush(rctx, fflags);
}
- rctx->rings.gfx.flush(rctx, fflags);
+ rctx->b.rings.gfx.flush(rctx, fflags);
}
static void r600_flush_gfx_ring(void *ctx, unsigned flags)
@@ -216,7 +216,7 @@ static void r600_flush_gfx_ring(void *ctx, unsigned flags)
static void r600_flush_dma_ring(void *ctx, unsigned flags)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- struct radeon_winsys_cs *cs = rctx->rings.dma.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs;
unsigned padding_dw, i;
if (!cs->cdw) {
@@ -231,20 +231,20 @@ static void r600_flush_dma_ring(void *ctx, unsigned flags)
}
}
- rctx->rings.dma.flushing = true;
- rctx->ws->cs_flush(cs, flags, 0);
- rctx->rings.dma.flushing = false;
+ rctx->b.rings.dma.flushing = true;
+ rctx->b.ws->cs_flush(cs, flags, 0);
+ rctx->b.rings.dma.flushing = false;
}
boolean r600_rings_is_buffer_referenced(struct r600_context *ctx,
struct radeon_winsys_cs_handle *buf,
enum radeon_bo_usage usage)
{
- if (ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs, buf, usage)) {
+ if (ctx->b.ws->cs_is_buffer_referenced(ctx->b.rings.gfx.cs, buf, usage)) {
return TRUE;
}
- if (ctx->rings.dma.cs) {
- if (ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs, buf, usage)) {
+ if (ctx->b.rings.dma.cs) {
+ if (ctx->b.ws->cs_is_buffer_referenced(ctx->b.rings.dma.cs, buf, usage)) {
return TRUE;
}
}
@@ -260,7 +260,7 @@ void *r600_buffer_mmap_sync_with_rings(struct r600_context *ctx,
bool sync_flush = TRUE;
if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
- return ctx->ws->buffer_map(resource->cs_buf, NULL, usage);
+ return ctx->b.ws->buffer_map(resource->cs_buf, NULL, usage);
}
if (!(usage & PIPE_TRANSFER_WRITE)) {
@@ -271,15 +271,15 @@ void *r600_buffer_mmap_sync_with_rings(struct r600_context *ctx,
flags |= RADEON_FLUSH_ASYNC;
}
- if (ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs, resource->cs_buf, rusage) && ctx->rings.gfx.cs->cdw) {
- ctx->rings.gfx.flush(ctx, flags);
+ if (ctx->b.ws->cs_is_buffer_referenced(ctx->b.rings.gfx.cs, resource->cs_buf, rusage) && ctx->b.rings.gfx.cs->cdw) {
+ ctx->b.rings.gfx.flush(ctx, flags);
if (usage & PIPE_TRANSFER_DONTBLOCK) {
return NULL;
}
}
- if (ctx->rings.dma.cs) {
- if (ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs, resource->cs_buf, rusage) && ctx->rings.dma.cs->cdw) {
- ctx->rings.dma.flush(ctx, flags);
+ if (ctx->b.rings.dma.cs) {
+ if (ctx->b.ws->cs_is_buffer_referenced(ctx->b.rings.dma.cs, resource->cs_buf, rusage) && ctx->b.rings.dma.cs->cdw) {
+ ctx->b.rings.dma.flush(ctx, flags);
if (usage & PIPE_TRANSFER_DONTBLOCK) {
return NULL;
}
@@ -287,34 +287,34 @@ void *r600_buffer_mmap_sync_with_rings(struct r600_context *ctx,
}
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- if (ctx->ws->buffer_is_busy(resource->buf, rusage)) {
+ if (ctx->b.ws->buffer_is_busy(resource->buf, rusage)) {
return NULL;
}
}
if (sync_flush) {
/* Try to avoid busy-waiting in radeon_bo_wait. */
- ctx->ws->cs_sync_flush(ctx->rings.gfx.cs);
- if (ctx->rings.dma.cs) {
- ctx->ws->cs_sync_flush(ctx->rings.dma.cs);
+ ctx->b.ws->cs_sync_flush(ctx->b.rings.gfx.cs);
+ if (ctx->b.rings.dma.cs) {
+ ctx->b.ws->cs_sync_flush(ctx->b.rings.dma.cs);
}
}
/* at this point everything is synchronized */
- return ctx->ws->buffer_map(resource->cs_buf, NULL, usage);
+ return ctx->b.ws->buffer_map(resource->cs_buf, NULL, usage);
}
static void r600_flush_from_winsys(void *ctx, unsigned flags)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- rctx->rings.gfx.flush(rctx, flags);
+ rctx->b.rings.gfx.flush(rctx, flags);
}
static void r600_flush_dma_from_winsys(void *ctx, unsigned flags)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- rctx->rings.dma.flush(rctx, flags);
+ rctx->b.rings.dma.flush(rctx, flags);
}
static void r600_destroy_context(struct pipe_context *context)
@@ -329,16 +329,16 @@ static void r600_destroy_context(struct pipe_context *context)
pipe_resource_reference((struct pipe_resource**)&rctx->dummy_fmask, NULL);
if (rctx->dummy_pixel_shader) {
- rctx->context.delete_fs_state(&rctx->context, rctx->dummy_pixel_shader);
+ rctx->b.b.delete_fs_state(&rctx->b.b, rctx->dummy_pixel_shader);
}
if (rctx->custom_dsa_flush) {
- rctx->context.delete_depth_stencil_alpha_state(&rctx->context, rctx->custom_dsa_flush);
+ rctx->b.b.delete_depth_stencil_alpha_state(&rctx->b.b, rctx->custom_dsa_flush);
}
if (rctx->custom_blend_resolve) {
- rctx->context.delete_blend_state(&rctx->context, rctx->custom_blend_resolve);
+ rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_resolve);
}
if (rctx->custom_blend_decompress) {
- rctx->context.delete_blend_state(&rctx->context, rctx->custom_blend_decompress);
+ rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_decompress);
}
util_unreference_framebuffer_state(&rctx->framebuffer.state);
@@ -348,9 +348,6 @@ static void r600_destroy_context(struct pipe_context *context)
if (rctx->uploader) {
u_upload_destroy(rctx->uploader);
}
- if (rctx->allocator_so_filled_size) {
- u_suballocator_destroy(rctx->allocator_so_filled_size);
- }
if (rctx->allocator_fetch_shader) {
u_suballocator_destroy(rctx->allocator_fetch_shader);
}
@@ -358,13 +355,14 @@ static void r600_destroy_context(struct pipe_context *context)
r600_release_command_buffer(&rctx->start_cs_cmd);
- if (rctx->rings.gfx.cs) {
- rctx->ws->cs_destroy(rctx->rings.gfx.cs);
+ if (rctx->b.rings.gfx.cs) {
+ rctx->b.ws->cs_destroy(rctx->b.rings.gfx.cs);
}
- if (rctx->rings.dma.cs) {
- rctx->ws->cs_destroy(rctx->rings.dma.cs);
+ if (rctx->b.rings.dma.cs) {
+ rctx->b.ws->cs_destroy(rctx->b.rings.dma.cs);
}
+ r600_common_context_cleanup(&rctx->b);
FREE(rctx);
}
@@ -380,17 +378,16 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
sizeof(struct r600_transfer), 64,
UTIL_SLAB_SINGLETHREADED);
- rctx->context.screen = screen;
- rctx->context.priv = priv;
- rctx->context.destroy = r600_destroy_context;
- rctx->context.flush = r600_flush_from_st;
+ rctx->b.b.screen = screen;
+ rctx->b.b.priv = priv;
+ rctx->b.b.destroy = r600_destroy_context;
+ rctx->b.b.flush = r600_flush_from_st;
+
+ if (!r600_common_context_init(&rctx->b, &rscreen->b))
+ goto fail;
- /* Easy accessing of screen/winsys. */
rctx->screen = rscreen;
- rctx->ws = rscreen->ws;
- rctx->family = rscreen->family;
- rctx->chip_class = rscreen->chip_class;
- rctx->keep_tiling_flags = rscreen->info.drm_minor >= 12;
+ rctx->keep_tiling_flags = rscreen->b.info.drm_minor >= 12;
LIST_INITHEAD(&rctx->active_nontimer_queries);
@@ -399,31 +396,31 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
r600_init_context_resource_functions(rctx);
r600_init_surface_functions(rctx);
- if (rscreen->info.has_uvd) {
- rctx->context.create_video_codec = r600_uvd_create_decoder;
- rctx->context.create_video_buffer = r600_video_buffer_create;
+ if (rscreen->b.info.has_uvd) {
+ rctx->b.b.create_video_codec = r600_uvd_create_decoder;
+ rctx->b.b.create_video_buffer = r600_video_buffer_create;
} else {
- rctx->context.create_video_codec = vl_create_decoder;
- rctx->context.create_video_buffer = vl_video_buffer_create;
+ rctx->b.b.create_video_codec = vl_create_decoder;
+ rctx->b.b.create_video_buffer = vl_video_buffer_create;
}
r600_init_common_state_functions(rctx);
- switch (rctx->chip_class) {
+ switch (rctx->b.chip_class) {
case R600:
case R700:
r600_init_state_functions(rctx);
r600_init_atom_start_cs(rctx);
rctx->max_db = 4;
rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx);
- rctx->custom_blend_resolve = rctx->chip_class == R700 ? r700_create_resolve_blend(rctx)
+ rctx->custom_blend_resolve = rctx->b.chip_class == R700 ? r700_create_resolve_blend(rctx)
: r600_create_resolve_blend(rctx);
rctx->custom_blend_decompress = r600_create_decompress_blend(rctx);
- rctx->has_vertex_cache = !(rctx->family == CHIP_RV610 ||
- rctx->family == CHIP_RV620 ||
- rctx->family == CHIP_RS780 ||
- rctx->family == CHIP_RS880 ||
- rctx->family == CHIP_RV710);
+ rctx->has_vertex_cache = !(rctx->b.family == CHIP_RV610 ||
+ rctx->b.family == CHIP_RV620 ||
+ rctx->b.family == CHIP_RS780 ||
+ rctx->b.family == CHIP_RS880 ||
+ rctx->b.family == CHIP_RV710);
break;
case EVERGREEN:
case CAYMAN:
@@ -434,57 +431,52 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx);
rctx->custom_blend_resolve = evergreen_create_resolve_blend(rctx);
rctx->custom_blend_decompress = evergreen_create_decompress_blend(rctx);
- rctx->has_vertex_cache = !(rctx->family == CHIP_CEDAR ||
- rctx->family == CHIP_PALM ||
- rctx->family == CHIP_SUMO ||
- rctx->family == CHIP_SUMO2 ||
- rctx->family == CHIP_CAICOS ||
- rctx->family == CHIP_CAYMAN ||
- rctx->family == CHIP_ARUBA);
+ rctx->has_vertex_cache = !(rctx->b.family == CHIP_CEDAR ||
+ rctx->b.family == CHIP_PALM ||
+ rctx->b.family == CHIP_SUMO ||
+ rctx->b.family == CHIP_SUMO2 ||
+ rctx->b.family == CHIP_CAICOS ||
+ rctx->b.family == CHIP_CAYMAN ||
+ rctx->b.family == CHIP_ARUBA);
break;
default:
- R600_ERR("Unsupported chip class %d.\n", rctx->chip_class);
+ R600_ERR("Unsupported chip class %d.\n", rctx->b.chip_class);
goto fail;
}
if (rscreen->trace_bo) {
- rctx->rings.gfx.cs = rctx->ws->cs_create(rctx->ws, RING_GFX, rscreen->trace_bo->cs_buf);
+ rctx->b.rings.gfx.cs = rctx->b.ws->cs_create(rctx->b.ws, RING_GFX, rscreen->trace_bo->cs_buf);
} else {
- rctx->rings.gfx.cs = rctx->ws->cs_create(rctx->ws, RING_GFX, NULL);
+ rctx->b.rings.gfx.cs = rctx->b.ws->cs_create(rctx->b.ws, RING_GFX, NULL);
}
- rctx->rings.gfx.flush = r600_flush_gfx_ring;
- rctx->ws->cs_set_flush_callback(rctx->rings.gfx.cs, r600_flush_from_winsys, rctx);
- rctx->rings.gfx.flushing = false;
-
- rctx->rings.dma.cs = NULL;
- if (rscreen->info.r600_has_dma && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
- rctx->rings.dma.cs = rctx->ws->cs_create(rctx->ws, RING_DMA, NULL);
- rctx->rings.dma.flush = r600_flush_dma_ring;
- rctx->ws->cs_set_flush_callback(rctx->rings.dma.cs, r600_flush_dma_from_winsys, rctx);
- rctx->rings.dma.flushing = false;
+ rctx->b.rings.gfx.flush = r600_flush_gfx_ring;
+ rctx->b.ws->cs_set_flush_callback(rctx->b.rings.gfx.cs, r600_flush_from_winsys, rctx);
+ rctx->b.rings.gfx.flushing = false;
+
+ rctx->b.rings.dma.cs = NULL;
+ if (rscreen->b.info.r600_has_dma && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
+ rctx->b.rings.dma.cs = rctx->b.ws->cs_create(rctx->b.ws, RING_DMA, NULL);
+ rctx->b.rings.dma.flush = r600_flush_dma_ring;
+ rctx->b.ws->cs_set_flush_callback(rctx->b.rings.dma.cs, r600_flush_dma_from_winsys, rctx);
+ rctx->b.rings.dma.flushing = false;
}
- rctx->uploader = u_upload_create(&rctx->context, 1024 * 1024, 256,
+ rctx->uploader = u_upload_create(&rctx->b.b, 1024 * 1024, 256,
PIPE_BIND_INDEX_BUFFER |
PIPE_BIND_CONSTANT_BUFFER);
if (!rctx->uploader)
goto fail;
- rctx->allocator_fetch_shader = u_suballocator_create(&rctx->context, 64 * 1024, 256,
+ rctx->allocator_fetch_shader = u_suballocator_create(&rctx->b.b, 64 * 1024, 256,
0, PIPE_USAGE_STATIC, FALSE);
if (!rctx->allocator_fetch_shader)
goto fail;
- rctx->allocator_so_filled_size = u_suballocator_create(&rctx->context, 4096, 4,
- 0, PIPE_USAGE_STATIC, TRUE);
- if (!rctx->allocator_so_filled_size)
- goto fail;
-
rctx->isa = calloc(1, sizeof(struct r600_isa));
if (!rctx->isa || r600_isa_init(rctx, rctx->isa))
goto fail;
- rctx->blitter = util_blitter_create(&rctx->context);
+ rctx->blitter = util_blitter_create(&rctx->b.b);
if (rctx->blitter == NULL)
goto fail;
util_blitter_set_texture_multisample(rctx->blitter, rscreen->has_msaa);
@@ -494,15 +486,15 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
r600_get_backend_mask(rctx); /* this emits commands and must be last */
rctx->dummy_pixel_shader =
- util_make_fragment_cloneinput_shader(&rctx->context, 0,
+ util_make_fragment_cloneinput_shader(&rctx->b.b, 0,
TGSI_SEMANTIC_GENERIC,
TGSI_INTERPOLATE_CONSTANT);
- rctx->context.bind_fs_state(&rctx->context, rctx->dummy_pixel_shader);
+ rctx->b.b.bind_fs_state(&rctx->b.b, rctx->dummy_pixel_shader);
- return &rctx->context;
+ return &rctx->b.b;
fail:
- r600_destroy_context(&rctx->context);
+ r600_destroy_context(&rctx->b.b);
return NULL;
}
@@ -550,13 +542,13 @@ static const char* r600_get_name(struct pipe_screen* pscreen)
{
struct r600_screen *rscreen = (struct r600_screen *)pscreen;
- return r600_get_family_name(rscreen->family);
+ return r600_get_family_name(rscreen->b.family);
}
static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
{
struct r600_screen *rscreen = (struct r600_screen *)pscreen;
- enum radeon_family family = rscreen->family;
+ enum radeon_family family = rscreen->b.family;
switch (param) {
/* Supported features (boolean caps). */
@@ -601,7 +593,7 @@ static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
return 0;
case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
- return MIN2(rscreen->info.vram_size, 0xFFFFFFFF);
+ return MIN2(rscreen->b.info.vram_size, 0xFFFFFFFF);
case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
return R600_MAP_BUFFER_ALIGNMENT;
@@ -654,7 +646,7 @@ static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
else
return 14;
case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
- return rscreen->info.drm_minor >= 9 ?
+ return rscreen->b.info.drm_minor >= 9 ?
(family >= CHIP_CEDAR ? 16384 : 8192) : 0;
case PIPE_CAP_MAX_COMBINED_SAMPLERS:
return 32;
@@ -666,10 +658,10 @@ static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
/* Timer queries, present when the clock frequency is non zero. */
case PIPE_CAP_QUERY_TIME_ELAPSED:
- return rscreen->info.r600_clock_crystal_freq != 0;
+ return rscreen->b.info.r600_clock_crystal_freq != 0;
case PIPE_CAP_QUERY_TIMESTAMP:
- return rscreen->info.drm_minor >= 20 &&
- rscreen->info.r600_clock_crystal_freq != 0;
+ return rscreen->b.info.drm_minor >= 20 &&
+ rscreen->b.info.r600_clock_crystal_freq != 0;
case PIPE_CAP_MIN_TEXEL_OFFSET:
return -8;
@@ -689,7 +681,7 @@ static float r600_get_paramf(struct pipe_screen* pscreen,
enum pipe_capf param)
{
struct r600_screen *rscreen = (struct r600_screen *)pscreen;
- enum radeon_family family = rscreen->family;
+ enum radeon_family family = rscreen->b.family;
switch (param) {
case PIPE_CAPF_MAX_LINE_WIDTH:
@@ -879,7 +871,7 @@ static int r600_get_compute_param(struct pipe_screen *screen,
//TODO: select these params by asic
switch (param) {
case PIPE_COMPUTE_CAP_IR_TARGET: {
- const char *gpu = r600_llvm_gpu_string(rscreen->family);
+ const char *gpu = r600_llvm_gpu_string(rscreen->b.family);
if (ret) {
sprintf(ret, "%s-r600--", gpu);
}
@@ -986,16 +978,16 @@ static void r600_destroy_screen(struct pipe_screen* pscreen)
FREE(entry);
}
- rscreen->ws->buffer_unmap(rscreen->fences.bo->cs_buf);
+ rscreen->b.ws->buffer_unmap(rscreen->fences.bo->cs_buf);
pipe_resource_reference((struct pipe_resource**)&rscreen->fences.bo, NULL);
}
if (rscreen->trace_bo) {
- rscreen->ws->buffer_unmap(rscreen->trace_bo->cs_buf);
+ rscreen->b.ws->buffer_unmap(rscreen->trace_bo->cs_buf);
pipe_resource_reference((struct pipe_resource**)&rscreen->trace_bo, NULL);
}
pipe_mutex_destroy(rscreen->fences.mutex);
- rscreen->ws->destroy(rscreen->ws);
+ rscreen->b.ws->destroy(rscreen->b.ws);
FREE(rscreen);
}
@@ -1045,13 +1037,13 @@ static boolean r600_fence_finish(struct pipe_screen *pscreen,
while (rscreen->fences.data[rfence->index] == 0) {
/* Special-case infinite timeout - wait for the dummy BO to become idle */
if (timeout == PIPE_TIMEOUT_INFINITE) {
- rscreen->ws->buffer_wait(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE);
+ rscreen->b.ws->buffer_wait(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE);
break;
}
/* The dummy BO will be busy until the CS including the fence has completed, or
* the GPU is reset. Don't bother continuing to spin when the BO is idle. */
- if (!rscreen->ws->buffer_is_busy(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE))
+ if (!rscreen->b.ws->buffer_is_busy(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE))
break;
if (++spins % 256)
@@ -1161,10 +1153,10 @@ static int evergreen_interpret_tiling(struct r600_screen *rscreen, uint32_t tili
static int r600_init_tiling(struct r600_screen *rscreen)
{
- uint32_t tiling_config = rscreen->info.r600_tiling_config;
+ uint32_t tiling_config = rscreen->b.info.r600_tiling_config;
/* set default group bytes, overridden by tiling info ioctl */
- if (rscreen->chip_class <= R700) {
+ if (rscreen->b.chip_class <= R700) {
rscreen->tiling_info.group_bytes = 256;
} else {
rscreen->tiling_info.group_bytes = 512;
@@ -1173,7 +1165,7 @@ static int r600_init_tiling(struct r600_screen *rscreen)
if (!tiling_config)
return 0;
- if (rscreen->chip_class <= R700) {
+ if (rscreen->b.chip_class <= R700) {
return r600_interpret_tiling(rscreen, tiling_config);
} else {
return evergreen_interpret_tiling(rscreen, tiling_config);
@@ -1184,8 +1176,8 @@ static uint64_t r600_get_timestamp(struct pipe_screen *screen)
{
struct r600_screen *rscreen = (struct r600_screen*)screen;
- return 1000000 * rscreen->ws->query_value(rscreen->ws, RADEON_TIMESTAMP) /
- rscreen->info.r600_clock_crystal_freq;
+ return 1000000 * rscreen->b.ws->query_value(rscreen->b.ws, RADEON_TIMESTAMP) /
+ rscreen->b.info.r600_clock_crystal_freq;
}
static int r600_get_driver_query_info(struct pipe_screen *screen,
@@ -1195,8 +1187,8 @@ static int r600_get_driver_query_info(struct pipe_screen *screen,
struct r600_screen *rscreen = (struct r600_screen*)screen;
struct pipe_driver_query_info list[] = {
{"draw-calls", R600_QUERY_DRAW_CALLS, 0},
- {"requested-VRAM", R600_QUERY_REQUESTED_VRAM, rscreen->info.vram_size, TRUE},
- {"requested-GTT", R600_QUERY_REQUESTED_GTT, rscreen->info.gart_size, TRUE},
+ {"requested-VRAM", R600_QUERY_REQUESTED_VRAM, rscreen->b.info.vram_size, TRUE},
+ {"requested-GTT", R600_QUERY_REQUESTED_GTT, rscreen->b.info.gart_size, TRUE},
{"buffer-wait-time", R600_QUERY_BUFFER_WAIT_TIME, 0, FALSE}
};
@@ -1218,8 +1210,7 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws)
return NULL;
}
- rscreen->ws = ws;
- ws->query_info(ws, &rscreen->info);
+ r600_common_screen_init(&rscreen->b, ws);
rscreen->debug_flags = debug_get_flags_option("R600_DEBUG", debug_options, 0);
if (debug_get_bool_option("R600_DEBUG_COMPUTE", FALSE))
@@ -1232,30 +1223,28 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws)
rscreen->debug_flags |= DBG_NO_LLVM;
if (debug_get_bool_option("R600_PRINT_TEXDEPTH", FALSE))
rscreen->debug_flags |= DBG_TEX_DEPTH;
- rscreen->family = rscreen->info.family;
- rscreen->chip_class = rscreen->info.chip_class;
- if (rscreen->family == CHIP_UNKNOWN) {
- fprintf(stderr, "r600: Unknown chipset 0x%04X\n", rscreen->info.pci_id);
+ if (rscreen->b.family == CHIP_UNKNOWN) {
+ fprintf(stderr, "r600: Unknown chipset 0x%04X\n", rscreen->b.info.pci_id);
FREE(rscreen);
return NULL;
}
/* Figure out streamout kernel support. */
- switch (rscreen->chip_class) {
+ switch (rscreen->b.chip_class) {
case R600:
- if (rscreen->family < CHIP_RS780) {
- rscreen->has_streamout = rscreen->info.drm_minor >= 14;
+ if (rscreen->b.family < CHIP_RS780) {
+ rscreen->has_streamout = rscreen->b.info.drm_minor >= 14;
} else {
- rscreen->has_streamout = rscreen->info.drm_minor >= 23;
+ rscreen->has_streamout = rscreen->b.info.drm_minor >= 23;
}
break;
case R700:
- rscreen->has_streamout = rscreen->info.drm_minor >= 17;
+ rscreen->has_streamout = rscreen->b.info.drm_minor >= 17;
break;
case EVERGREEN:
case CAYMAN:
- rscreen->has_streamout = rscreen->info.drm_minor >= 14;
+ rscreen->has_streamout = rscreen->b.info.drm_minor >= 14;
break;
default:
rscreen->has_streamout = FALSE;
@@ -1263,18 +1252,18 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws)
}
/* MSAA support. */
- switch (rscreen->chip_class) {
+ switch (rscreen->b.chip_class) {
case R600:
case R700:
- rscreen->has_msaa = rscreen->info.drm_minor >= 22;
+ rscreen->has_msaa = rscreen->b.info.drm_minor >= 22;
rscreen->has_compressed_msaa_texturing = false;
break;
case EVERGREEN:
- rscreen->has_msaa = rscreen->info.drm_minor >= 19;
- rscreen->has_compressed_msaa_texturing = rscreen->info.drm_minor >= 24;
+ rscreen->has_msaa = rscreen->b.info.drm_minor >= 19;
+ rscreen->has_compressed_msaa_texturing = rscreen->b.info.drm_minor >= 24;
break;
case CAYMAN:
- rscreen->has_msaa = rscreen->info.drm_minor >= 19;
+ rscreen->has_msaa = rscreen->b.info.drm_minor >= 19;
rscreen->has_compressed_msaa_texturing = true;
break;
default:
@@ -1282,7 +1271,7 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws)
rscreen->has_compressed_msaa_texturing = false;
}
- rscreen->has_cp_dma = rscreen->info.drm_minor >= 27 &&
+ rscreen->has_cp_dma = rscreen->b.info.drm_minor >= 27 &&
!(rscreen->debug_flags & DBG_NO_CP_DMA);
if (r600_init_tiling(rscreen)) {
@@ -1290,37 +1279,37 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws)
return NULL;
}
- rscreen->screen.destroy = r600_destroy_screen;
- rscreen->screen.get_name = r600_get_name;
- rscreen->screen.get_vendor = r600_get_vendor;
- rscreen->screen.get_param = r600_get_param;
- rscreen->screen.get_shader_param = r600_get_shader_param;
- rscreen->screen.get_paramf = r600_get_paramf;
- rscreen->screen.get_compute_param = r600_get_compute_param;
- rscreen->screen.get_timestamp = r600_get_timestamp;
-
- if (rscreen->chip_class >= EVERGREEN) {
- rscreen->screen.is_format_supported = evergreen_is_format_supported;
+ rscreen->b.b.destroy = r600_destroy_screen;
+ rscreen->b.b.get_name = r600_get_name;
+ rscreen->b.b.get_vendor = r600_get_vendor;
+ rscreen->b.b.get_param = r600_get_param;
+ rscreen->b.b.get_shader_param = r600_get_shader_param;
+ rscreen->b.b.get_paramf = r600_get_paramf;
+ rscreen->b.b.get_compute_param = r600_get_compute_param;
+ rscreen->b.b.get_timestamp = r600_get_timestamp;
+
+ if (rscreen->b.chip_class >= EVERGREEN) {
+ rscreen->b.b.is_format_supported = evergreen_is_format_supported;
rscreen->dma_blit = &evergreen_dma_blit;
} else {
- rscreen->screen.is_format_supported = r600_is_format_supported;
+ rscreen->b.b.is_format_supported = r600_is_format_supported;
rscreen->dma_blit = &r600_dma_blit;
}
- rscreen->screen.context_create = r600_create_context;
- rscreen->screen.fence_reference = r600_fence_reference;
- rscreen->screen.fence_signalled = r600_fence_signalled;
- rscreen->screen.fence_finish = r600_fence_finish;
- rscreen->screen.get_driver_query_info = r600_get_driver_query_info;
-
- if (rscreen->info.has_uvd) {
- rscreen->screen.get_video_param = r600_uvd_get_video_param;
- rscreen->screen.is_video_format_supported = ruvd_is_format_supported;
+ rscreen->b.b.context_create = r600_create_context;
+ rscreen->b.b.fence_reference = r600_fence_reference;
+ rscreen->b.b.fence_signalled = r600_fence_signalled;
+ rscreen->b.b.fence_finish = r600_fence_finish;
+ rscreen->b.b.get_driver_query_info = r600_get_driver_query_info;
+
+ if (rscreen->b.info.has_uvd) {
+ rscreen->b.b.get_video_param = r600_uvd_get_video_param;
+ rscreen->b.b.is_video_format_supported = ruvd_is_format_supported;
} else {
- rscreen->screen.get_video_param = r600_get_video_param;
- rscreen->screen.is_video_format_supported = vl_video_buffer_is_format_supported;
+ rscreen->b.b.get_video_param = r600_get_video_param;
+ rscreen->b.b.is_video_format_supported = vl_video_buffer_is_format_supported;
}
- r600_init_screen_resource_functions(&rscreen->screen);
+ r600_init_screen_resource_functions(&rscreen->b.b);
util_format_s3tc_init();
@@ -1334,20 +1323,20 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws)
rscreen->global_pool = compute_memory_pool_new(rscreen);
rscreen->cs_count = 0;
- if (rscreen->info.drm_minor >= 28 && (rscreen->debug_flags & DBG_TRACE_CS)) {
- rscreen->trace_bo = (struct r600_resource*)pipe_buffer_create(&rscreen->screen,
+ if (rscreen->b.info.drm_minor >= 28 && (rscreen->debug_flags & DBG_TRACE_CS)) {
+ rscreen->trace_bo = (struct r600_resource*)pipe_buffer_create(&rscreen->b.b,
PIPE_BIND_CUSTOM,
PIPE_USAGE_STAGING,
4096);
if (rscreen->trace_bo) {
- rscreen->trace_ptr = rscreen->ws->buffer_map(rscreen->trace_bo->cs_buf, NULL,
+ rscreen->trace_ptr = rscreen->b.ws->buffer_map(rscreen->trace_bo->cs_buf, NULL,
PIPE_TRANSFER_UNSYNCHRONIZED);
}
}
/* Create the auxiliary context. */
pipe_mutex_init(rscreen->aux_context_lock);
- rscreen->aux_context = rscreen->screen.context_create(&rscreen->screen, NULL);
+ rscreen->aux_context = rscreen->b.b.context_create(&rscreen->b.b, NULL);
#if 0 /* This is for testing whether aux_context and buffer clearing work correctly. */
struct pipe_resource templ = {};
@@ -1381,5 +1370,5 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws)
}
#endif
- return &rscreen->screen;
+ return &rscreen->b.b;
}