summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/radeon/r600_query.c
diff options
context:
space:
mode:
authorMarek Olšák <[email protected]>2017-03-01 19:24:42 +0100
committerMarek Olšák <[email protected]>2017-03-17 18:30:21 +0100
commitbe6173e7d6c0c192175c37158eefcf1c159ceb16 (patch)
tree30bb759156843726c0367e04550642c86a51966c /src/gallium/drivers/radeon/r600_query.c
parent04e6977e5d4b17951d3ed81cf872a0243f582e82 (diff)
gallium/radeon: formalize that create_query doesn't need pipe_context
for threaded gallium Reviewed-by: Timothy Arceri <[email protected]>
Diffstat (limited to 'src/gallium/drivers/radeon/r600_query.c')
-rw-r--r--src/gallium/drivers/radeon/r600_query.c56
1 files changed, 28 insertions, 28 deletions
diff --git a/src/gallium/drivers/radeon/r600_query.c b/src/gallium/drivers/radeon/r600_query.c
index 2c3b5f4dd1d..d83426116c7 100644
--- a/src/gallium/drivers/radeon/r600_query.c
+++ b/src/gallium/drivers/radeon/r600_query.c
@@ -400,8 +400,7 @@ static struct r600_query_ops sw_query_ops = {
.get_result_resource = NULL
};
-static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
- unsigned query_type)
+static struct pipe_query *r600_query_sw_create(unsigned query_type)
{
struct r600_query_sw *query;
@@ -433,23 +432,23 @@ void r600_query_hw_destroy(struct r600_common_context *rctx,
FREE(rquery);
}
-static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
+static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen,
struct r600_query_hw *query)
{
unsigned buf_size = MAX2(query->result_size,
- ctx->screen->info.min_alloc_size);
+ rscreen->info.min_alloc_size);
/* Queries are normally read by the CPU after
* being written by the gpu, hence staging is probably a good
* usage pattern.
*/
struct r600_resource *buf = (struct r600_resource*)
- pipe_buffer_create(ctx->b.screen, 0,
+ pipe_buffer_create(&rscreen->b, 0,
PIPE_USAGE_STAGING, buf_size);
if (!buf)
return NULL;
- if (!query->ops->prepare_buffer(ctx, query, buf)) {
+ if (!query->ops->prepare_buffer(rscreen, query, buf)) {
r600_resource_reference(&buf, NULL);
return NULL;
}
@@ -457,14 +456,14 @@ static struct r600_resource *r600_new_query_buffer(struct r600_common_context *c
return buf;
}
-static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
+static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
struct r600_query_hw *query,
struct r600_resource *buffer)
{
/* Callers ensure that the buffer is currently unused by the GPU. */
- uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_UNSYNCHRONIZED);
+ uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL,
+ PIPE_TRANSFER_WRITE |
+ PIPE_TRANSFER_UNSYNCHRONIZED);
if (!results)
return false;
@@ -472,8 +471,8 @@ static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
- unsigned max_rbs = ctx->screen->info.num_render_backends;
- unsigned enabled_rb_mask = ctx->screen->info.enabled_rb_mask;
+ unsigned max_rbs = rscreen->info.num_render_backends;
+ unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
unsigned num_results;
unsigned i, j;
@@ -531,17 +530,17 @@ static struct r600_query_hw_ops query_hw_default_hw_ops = {
.add_result = r600_query_hw_add_result,
};
-bool r600_query_hw_init(struct r600_common_context *rctx,
+bool r600_query_hw_init(struct r600_common_screen *rscreen,
struct r600_query_hw *query)
{
- query->buffer.buf = r600_new_query_buffer(rctx, query);
+ query->buffer.buf = r600_new_query_buffer(rscreen, query);
if (!query->buffer.buf)
return false;
return true;
}
-static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
+static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen,
unsigned query_type,
unsigned index)
{
@@ -556,19 +555,19 @@ static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
switch (query_type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
- query->result_size = 16 * rctx->screen->info.num_render_backends;
+ query->result_size = 16 * rscreen->info.num_render_backends;
query->result_size += 16; /* for the fence + alignment */
query->num_cs_dw_begin = 6;
- query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
+ query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
break;
case PIPE_QUERY_TIME_ELAPSED:
query->result_size = 24;
query->num_cs_dw_begin = 8;
- query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
+ query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
break;
case PIPE_QUERY_TIMESTAMP:
query->result_size = 16;
- query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
+ query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
query->flags = R600_QUERY_HW_FLAG_NO_START;
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
@@ -583,10 +582,10 @@ static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
break;
case PIPE_QUERY_PIPELINE_STATISTICS:
/* 11 values on EG, 8 on R600. */
- query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
+ query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16;
query->result_size += 8; /* for the fence + alignment */
query->num_cs_dw_begin = 6;
- query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
+ query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
break;
default:
assert(0);
@@ -594,7 +593,7 @@ static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
return NULL;
}
- if (!r600_query_hw_init(rctx, query)) {
+ if (!r600_query_hw_init(rscreen, query)) {
FREE(query);
return NULL;
}
@@ -701,7 +700,7 @@ static void r600_query_hw_emit_start(struct r600_common_context *ctx,
*qbuf = query->buffer;
query->buffer.results_end = 0;
query->buffer.previous = qbuf;
- query->buffer.buf = r600_new_query_buffer(ctx, query);
+ query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
if (!query->buffer.buf)
return;
}
@@ -861,14 +860,15 @@ static void r600_emit_query_predication(struct r600_common_context *ctx,
static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ struct r600_common_screen *rscreen =
+ (struct r600_common_screen *)ctx->screen;
if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
query_type == PIPE_QUERY_GPU_FINISHED ||
query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
- return r600_query_sw_create(ctx, query_type);
+ return r600_query_sw_create(query_type);
- return r600_query_hw_create(rctx, query_type, index);
+ return r600_query_hw_create(rscreen, query_type, index);
}
static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
@@ -908,9 +908,9 @@ void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
r600_resource_reference(&query->buffer.buf, NULL);
- query->buffer.buf = r600_new_query_buffer(rctx, query);
+ query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
} else {
- if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
+ if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
r600_resource_reference(&query->buffer.buf, NULL);
}
}