aboutsummaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers
diff options
context:
space:
mode:
authorMarek Olšák <[email protected]>2014-01-22 01:14:10 +0100
committerMarek Olšák <[email protected]>2014-01-28 01:39:27 +0100
commitf4612105e82834e6f46a70dd90c81e982b6506ca (patch)
treee8169d1a8f2f028c2be31a3a88f5a04803f6fd79 /src/gallium/drivers
parenta9ae7635b77fc4fd9f4614fead63fefa6ff74f4e (diff)
radeon: place context-related functions first in r600_pipe_common.c
To follow the unwritten convention of r600g and radeonsi. Reviewed-by: Michel Dänzer <[email protected]> Reviewed-by: Tom Stellard <[email protected]>
Diffstat (limited to 'src/gallium/drivers')
-rw-r--r--src/gallium/drivers/radeon/r600_pipe_common.c166
1 files changed, 87 insertions, 79 deletions
diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c b/src/gallium/drivers/radeon/r600_pipe_common.c
index 031f858dfe6..7462d43f20b 100644
--- a/src/gallium/drivers/radeon/r600_pipe_common.c
+++ b/src/gallium/drivers/radeon/r600_pipe_common.c
@@ -31,6 +31,93 @@
#include "util/u_upload_mgr.h"
#include <inttypes.h>
+/*
+ * pipe_context
+ */
+
+bool r600_common_context_init(struct r600_common_context *rctx,
+ struct r600_common_screen *rscreen)
+{
+ util_slab_create(&rctx->pool_transfers,
+ sizeof(struct r600_transfer), 64,
+ UTIL_SLAB_SINGLETHREADED);
+
+ rctx->screen = rscreen;
+ rctx->ws = rscreen->ws;
+ rctx->family = rscreen->family;
+ rctx->chip_class = rscreen->chip_class;
+ rctx->max_db = rscreen->chip_class >= EVERGREEN ? 8 : 4;
+
+ rctx->b.transfer_map = u_transfer_map_vtbl;
+ rctx->b.transfer_flush_region = u_default_transfer_flush_region;
+ rctx->b.transfer_unmap = u_transfer_unmap_vtbl;
+ rctx->b.transfer_inline_write = u_default_transfer_inline_write;
+
+ r600_streamout_init(rctx);
+ r600_query_init(rctx);
+
+ rctx->allocator_so_filled_size = u_suballocator_create(&rctx->b, 4096, 4,
+ 0, PIPE_USAGE_STATIC, TRUE);
+ if (!rctx->allocator_so_filled_size)
+ return false;
+
+ rctx->uploader = u_upload_create(&rctx->b, 1024 * 1024, 256,
+ PIPE_BIND_INDEX_BUFFER |
+ PIPE_BIND_CONSTANT_BUFFER);
+ if (!rctx->uploader)
+ return false;
+
+ return true;
+}
+
+void r600_common_context_cleanup(struct r600_common_context *rctx)
+{
+ if (rctx->rings.gfx.cs) {
+ rctx->ws->cs_destroy(rctx->rings.gfx.cs);
+ }
+ if (rctx->rings.dma.cs) {
+ rctx->ws->cs_destroy(rctx->rings.dma.cs);
+ }
+
+ if (rctx->uploader) {
+ u_upload_destroy(rctx->uploader);
+ }
+
+ util_slab_destroy(&rctx->pool_transfers);
+
+ if (rctx->allocator_so_filled_size) {
+ u_suballocator_destroy(rctx->allocator_so_filled_size);
+ }
+}
+
+void r600_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r)
+{
+ struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ struct r600_resource *rr = (struct r600_resource *)r;
+
+ if (r == NULL) {
+ return;
+ }
+
+ /*
+ * The idea is to compute a gross estimate of memory requirement of
+ * each draw call. After each draw call, memory will be precisely
+ * accounted. So the uncertainty is only on the current draw call.
+ * In practice this gave very good estimate (+/- 10% of the target
+ * memory limit).
+ */
+ if (rr->domains & RADEON_DOMAIN_GTT) {
+ rctx->gtt += rr->buf->size;
+ }
+ if (rr->domains & RADEON_DOMAIN_VRAM) {
+ rctx->vram += rr->buf->size;
+ }
+}
+
+/*
+ * pipe_screen
+ */
+
static const struct debug_named_value common_debug_options[] = {
/* logging */
{ "tex", DBG_TEX, "Print texture info" },
@@ -235,85 +322,6 @@ void r600_common_screen_cleanup(struct r600_common_screen *rscreen)
rscreen->aux_context->destroy(rscreen->aux_context);
}
-bool r600_common_context_init(struct r600_common_context *rctx,
- struct r600_common_screen *rscreen)
-{
- util_slab_create(&rctx->pool_transfers,
- sizeof(struct r600_transfer), 64,
- UTIL_SLAB_SINGLETHREADED);
-
- rctx->screen = rscreen;
- rctx->ws = rscreen->ws;
- rctx->family = rscreen->family;
- rctx->chip_class = rscreen->chip_class;
- rctx->max_db = rscreen->chip_class >= EVERGREEN ? 8 : 4;
-
- rctx->b.transfer_map = u_transfer_map_vtbl;
- rctx->b.transfer_flush_region = u_default_transfer_flush_region;
- rctx->b.transfer_unmap = u_transfer_unmap_vtbl;
- rctx->b.transfer_inline_write = u_default_transfer_inline_write;
-
- r600_streamout_init(rctx);
- r600_query_init(rctx);
-
- rctx->allocator_so_filled_size = u_suballocator_create(&rctx->b, 4096, 4,
- 0, PIPE_USAGE_STATIC, TRUE);
- if (!rctx->allocator_so_filled_size)
- return false;
-
- rctx->uploader = u_upload_create(&rctx->b, 1024 * 1024, 256,
- PIPE_BIND_INDEX_BUFFER |
- PIPE_BIND_CONSTANT_BUFFER);
- if (!rctx->uploader)
- return false;
-
- return true;
-}
-
-void r600_common_context_cleanup(struct r600_common_context *rctx)
-{
- if (rctx->rings.gfx.cs) {
- rctx->ws->cs_destroy(rctx->rings.gfx.cs);
- }
- if (rctx->rings.dma.cs) {
- rctx->ws->cs_destroy(rctx->rings.dma.cs);
- }
-
- if (rctx->uploader) {
- u_upload_destroy(rctx->uploader);
- }
-
- util_slab_destroy(&rctx->pool_transfers);
-
- if (rctx->allocator_so_filled_size) {
- u_suballocator_destroy(rctx->allocator_so_filled_size);
- }
-}
-
-void r600_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r)
-{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
- struct r600_resource *rr = (struct r600_resource *)r;
-
- if (r == NULL) {
- return;
- }
-
- /*
- * The idea is to compute a gross estimate of memory requirement of
- * each draw call. After each draw call, memory will be precisely
- * accounted. So the uncertainty is only on the current draw call.
- * In practice this gave very good estimate (+/- 10% of the target
- * memory limit).
- */
- if (rr->domains & RADEON_DOMAIN_GTT) {
- rctx->gtt += rr->buf->size;
- }
- if (rr->domains & RADEON_DOMAIN_VRAM) {
- rctx->vram += rr->buf->size;
- }
-}
-
static unsigned tgsi_get_processor_type(const struct tgsi_token *tokens)
{
struct tgsi_parse_context parse;