summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/r600
diff options
context:
space:
mode:
authorMarek Olšák <[email protected]>2012-10-08 04:06:42 +0200
committerMarek Olšák <[email protected]>2012-10-11 21:12:16 +0200
commit369e46888904c6d379b8b477d9242cff1608e30e (patch)
tree528b10f900f23af3acd22a0edcf50fde0eeee86e /src/gallium/drivers/r600
parentec4c74a9dc10039d97ad24c4f16bd2400517991d (diff)
gallium: unify transfer functions
"get_transfer + transfer_map" becomes "transfer_map". "transfer_unmap + transfer_destroy" becomes "transfer_unmap". transfer_map must create and return the transfer object and transfer_unmap must destroy it. transfer_map is successful if the returned buffer pointer is not NULL. If transfer_map fails, the pointer to the transfer object remains unchanged (i.e. doesn't have to be NULL). Acked-by: Brian Paul <[email protected]>
Diffstat (limited to 'src/gallium/drivers/r600')
-rw-r--r--src/gallium/drivers/r600/compute_memory_pool.c17
-rw-r--r--src/gallium/drivers/r600/evergreen_compute.c94
-rw-r--r--src/gallium/drivers/r600/evergreen_compute.h11
-rw-r--r--src/gallium/drivers/r600/r600_buffer.c113
-rw-r--r--src/gallium/drivers/r600/r600_resource.c2
-rw-r--r--src/gallium/drivers/r600/r600_resource.h13
-rw-r--r--src/gallium/drivers/r600/r600_texture.c138
7 files changed, 170 insertions, 218 deletions
diff --git a/src/gallium/drivers/r600/compute_memory_pool.c b/src/gallium/drivers/r600/compute_memory_pool.c
index 19fbe0cddc4..46bff3221d9 100644
--- a/src/gallium/drivers/r600/compute_memory_pool.c
+++ b/src/gallium/drivers/r600/compute_memory_pool.c
@@ -453,27 +453,22 @@ void compute_memory_transfer(
"offset_in_chunk = %d, size = %d\n", device_to_host,
offset_in_chunk, size);
- if (device_to_host)
- {
- xfer = pipe->get_transfer(pipe, gart, 0, PIPE_TRANSFER_READ,
+ if (device_to_host) {
+ map = pipe->transfer_map(pipe, gart, 0, PIPE_TRANSFER_READ,
&(struct pipe_box) { .width = aligned_size,
- .height = 1, .depth = 1 });
- assert(xfer);
- map = pipe->transfer_map(pipe, xfer);
+ .height = 1, .depth = 1 }, &xfer);
+ assert(xfer);
assert(map);
memcpy(data, map + internal_offset, size);
pipe->transfer_unmap(pipe, xfer);
- pipe->transfer_destroy(pipe, xfer);
} else {
- xfer = pipe->get_transfer(pipe, gart, 0, PIPE_TRANSFER_WRITE,
+ map = pipe->transfer_map(pipe, gart, 0, PIPE_TRANSFER_WRITE,
&(struct pipe_box) { .width = aligned_size,
- .height = 1, .depth = 1 });
+ .height = 1, .depth = 1 }, &xfer);
assert(xfer);
- map = pipe->transfer_map(pipe, xfer);
assert(map);
memcpy(map + internal_offset, data, size);
pipe->transfer_unmap(pipe, xfer);
- pipe->transfer_destroy(pipe, xfer);
}
}
diff --git a/src/gallium/drivers/r600/evergreen_compute.c b/src/gallium/drivers/r600/evergreen_compute.c
index 8f055bd5f38..655cf756186 100644
--- a/src/gallium/drivers/r600/evergreen_compute.c
+++ b/src/gallium/drivers/r600/evergreen_compute.c
@@ -104,12 +104,10 @@ static void evergreen_cs_set_vertex_buffer(
state->atom.dirty = true;
}
-const struct u_resource_vtbl r600_global_buffer_vtbl =
+static const struct u_resource_vtbl r600_global_buffer_vtbl =
{
u_default_resource_get_handle, /* get_handle */
r600_compute_global_buffer_destroy, /* resource_destroy */
- r600_compute_global_get_transfer, /* get_transfer */
- r600_compute_global_transfer_destroy, /* transfer_destroy */
r600_compute_global_transfer_map, /* transfer_map */
r600_compute_global_transfer_flush_region,/* transfer_flush_region */
r600_compute_global_transfer_unmap, /* transfer_unmap */
@@ -841,30 +839,57 @@ void r600_compute_global_buffer_destroy(
free(res);
}
-void* r600_compute_global_transfer_map(
+void *r600_compute_global_transfer_map(
struct pipe_context *ctx_,
- struct pipe_transfer* transfer)
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer)
{
+ struct r600_context *rctx = (struct r600_context*)ctx_;
+ struct compute_memory_pool *pool = rctx->screen->global_pool;
+ struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
+ struct r600_resource_global* buffer =
+ (struct r600_resource_global*)transfer->resource;
+ uint32_t* map;
+
+ compute_memory_finalize_pending(pool, ctx_);
+
+ assert(resource->target == PIPE_BUFFER);
+
+ COMPUTE_DBG("* r600_compute_global_get_transfer()\n"
+ "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
+ "width = %u, height = %u, depth = %u)\n", level, usage,
+ box->x, box->y, box->z, box->width, box->height,
+ box->depth);
+
+ transfer->resource = resource;
+ transfer->level = level;
+ transfer->usage = usage;
+ transfer->box = *box;
+ transfer->stride = 0;
+ transfer->layer_stride = 0;
+ transfer->data = NULL;
+
assert(transfer->resource->target == PIPE_BUFFER);
assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
assert(transfer->box.x >= 0);
assert(transfer->box.y == 0);
assert(transfer->box.z == 0);
- struct r600_context *ctx = (struct r600_context *)ctx_;
- struct r600_resource_global* buffer =
- (struct r600_resource_global*)transfer->resource;
-
- uint32_t* map;
///TODO: do it better, mapping is not possible if the pool is too big
COMPUTE_DBG("* r600_compute_global_transfer_map()\n");
- if (!(map = ctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
- ctx->cs, transfer->usage))) {
+ if (!(map = rctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
+ rctx->cs, transfer->usage))) {
+ util_slab_free(&rctx->pool_transfers, transfer);
return NULL;
}
+ *ptransfer = transfer;
+
COMPUTE_DBG("Buffer: %p + %u (buffer offset in global memory) "
"+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
@@ -884,50 +909,7 @@ void r600_compute_global_transfer_unmap(
COMPUTE_DBG("* r600_compute_global_transfer_unmap()\n");
ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
-}
-
-struct pipe_transfer * r600_compute_global_get_transfer(
- struct pipe_context *ctx_,
- struct pipe_resource *resource,
- unsigned level,
- unsigned usage,
- const struct pipe_box *box)
-{
- struct r600_context *ctx = (struct r600_context *)ctx_;
- struct compute_memory_pool *pool = ctx->screen->global_pool;
-
- compute_memory_finalize_pending(pool, ctx_);
-
- assert(resource->target == PIPE_BUFFER);
- struct r600_context *rctx = (struct r600_context*)ctx_;
- struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
-
- COMPUTE_DBG("* r600_compute_global_get_transfer()\n"
- "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
- "width = %u, height = %u, depth = %u)\n", level, usage,
- box->x, box->y, box->z, box->width, box->height,
- box->depth);
-
- transfer->resource = resource;
- transfer->level = level;
- transfer->usage = usage;
- transfer->box = *box;
- transfer->stride = 0;
- transfer->layer_stride = 0;
- transfer->data = NULL;
-
- /* Note strides are zero, this is ok for buffers, but not for
- * textures 2d & higher at least.
- */
- return transfer;
-}
-
-void r600_compute_global_transfer_destroy(
- struct pipe_context *ctx_,
- struct pipe_transfer *transfer)
-{
- struct r600_context *rctx = (struct r600_context*)ctx_;
- util_slab_free(&rctx->pool_transfers, transfer);
+ util_slab_free(&ctx->pool_transfers, transfer);
}
void r600_compute_global_transfer_flush_region(
diff --git a/src/gallium/drivers/r600/evergreen_compute.h b/src/gallium/drivers/r600/evergreen_compute.h
index f29d91b9349..e68ebd8585e 100644
--- a/src/gallium/drivers/r600/evergreen_compute.h
+++ b/src/gallium/drivers/r600/evergreen_compute.h
@@ -41,11 +41,14 @@ void evergreen_emit_cs_shader(struct r600_context *rctx, struct r600_atom * atom
struct pipe_resource *r600_compute_global_buffer_create(struct pipe_screen *screen, const struct pipe_resource *templ);
void r600_compute_global_buffer_destroy(struct pipe_screen *screen, struct pipe_resource *res);
-void* r600_compute_global_transfer_map(struct pipe_context *ctx, struct pipe_transfer* transfer);
+void *r600_compute_global_transfer_map(
+ struct pipe_context *ctx_,
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer);
void r600_compute_global_transfer_unmap(struct pipe_context *ctx, struct pipe_transfer* transfer);
-struct pipe_transfer * r600_compute_global_get_transfer(struct pipe_context *, struct pipe_resource *, unsigned level,
- unsigned usage, const struct pipe_box *);
-void r600_compute_global_transfer_destroy(struct pipe_context *, struct pipe_transfer *);
void r600_compute_global_transfer_flush_region( struct pipe_context *, struct pipe_transfer *, const struct pipe_box *);
void r600_compute_global_transfer_inline_write( struct pipe_context *, struct pipe_resource *, unsigned level,
unsigned usage, const struct pipe_box *, const void *data, unsigned stride, unsigned layer_stride);
diff --git a/src/gallium/drivers/r600/r600_buffer.c b/src/gallium/drivers/r600/r600_buffer.c
index 0b0ac3460e1..90ab2475d75 100644
--- a/src/gallium/drivers/r600/r600_buffer.c
+++ b/src/gallium/drivers/r600/r600_buffer.c
@@ -37,33 +37,6 @@ static void r600_buffer_destroy(struct pipe_screen *screen,
FREE(rbuffer);
}
-static struct pipe_transfer *r600_get_transfer(struct pipe_context *ctx,
- struct pipe_resource *resource,
- unsigned level,
- unsigned usage,
- const struct pipe_box *box)
-{
- struct r600_context *rctx = (struct r600_context*)ctx;
- struct r600_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
-
- assert(box->x + box->width <= resource->width0);
-
- transfer->transfer.resource = resource;
- transfer->transfer.level = level;
- transfer->transfer.usage = usage;
- transfer->transfer.box = *box;
- transfer->transfer.stride = 0;
- transfer->transfer.layer_stride = 0;
- transfer->transfer.data = NULL;
- transfer->staging = NULL;
- transfer->offset = 0;
-
- /* Note strides are zero, this is ok for buffers, but not for
- * textures 2d & higher at least.
- */
- return &transfer->transfer;
-}
-
static void r600_set_constants_dirty_if_bound(struct r600_context *rctx,
struct r600_resource *rbuffer)
{
@@ -87,16 +60,47 @@ static void r600_set_constants_dirty_if_bound(struct r600_context *rctx,
}
}
-static void *r600_buffer_transfer_map(struct pipe_context *pipe,
- struct pipe_transfer *transfer)
+static void *r600_buffer_get_transfer(struct pipe_context *ctx,
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer,
+ void *data, struct r600_resource *staging)
{
- struct r600_resource *rbuffer = r600_resource(transfer->resource);
- struct r600_context *rctx = (struct r600_context*)pipe;
+ struct r600_context *rctx = (struct r600_context*)ctx;
+ struct r600_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
+
+ transfer->transfer.resource = resource;
+ transfer->transfer.level = level;
+ transfer->transfer.usage = usage;
+ transfer->transfer.box = *box;
+ transfer->transfer.stride = 0;
+ transfer->transfer.layer_stride = 0;
+ transfer->transfer.data = NULL;
+ transfer->staging = NULL;
+ transfer->offset = 0;
+ transfer->staging = staging;
+ *ptransfer = &transfer->transfer;
+ return data;
+}
+
+static void *r600_buffer_transfer_map(struct pipe_context *ctx,
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer)
+{
+ struct r600_context *rctx = (struct r600_context*)ctx;
+ struct r600_resource *rbuffer = r600_resource(resource);
uint8_t *data;
- if (transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
- !(transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
- assert(transfer->usage & PIPE_TRANSFER_WRITE);
+ assert(box->x + box->width <= resource->width0);
+
+ if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
+ !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ assert(usage & PIPE_TRANSFER_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU. */
if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
@@ -134,37 +138,44 @@ static void *r600_buffer_transfer_map(struct pipe_context *pipe,
}
}
#if 0 /* this is broken (see Bug 53130) */
- else if ((transfer->usage & PIPE_TRANSFER_DISCARD_RANGE) &&
- !(transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
+ else if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
+ !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
rctx->screen->has_streamout &&
/* The buffer range must be aligned to 4. */
- transfer->box.x % 4 == 0 && transfer->box.width % 4 == 0) {
- assert(transfer->usage & PIPE_TRANSFER_WRITE);
+ box->x % 4 == 0 && box->width % 4 == 0) {
+ assert(usage & PIPE_TRANSFER_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU. */
if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
/* Do a wait-free write-only transfer using a temporary buffer. */
- struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
-
- rtransfer->staging = (struct r600_resource*)
- pipe_buffer_create(pipe->screen, PIPE_BIND_VERTEX_BUFFER,
- PIPE_USAGE_STAGING, transfer->box.width);
- return rctx->ws->buffer_map(rtransfer->staging->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
+ struct r600_resource *staging = (struct r600_resource*)
+ pipe_buffer_create(ctx->screen, PIPE_BIND_VERTEX_BUFFER,
+ PIPE_USAGE_STAGING, box->width);
+ data = rctx->ws->buffer_map(staging->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
+
+ if (!data)
+ return NULL;
+ return r600_buffer_get_transfer(ctx, resource, level, usage, box,
+ ptransfer, data, staging);
}
}
#endif
- data = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs, transfer->usage);
- if (!data)
+ data = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs, usage);
+ if (!data) {
return NULL;
+ }
+ data += box->x;
- return (uint8_t*)data + transfer->box.x;
+ return r600_buffer_get_transfer(ctx, resource, level, usage, box,
+ ptransfer, data, NULL);
}
static void r600_buffer_transfer_unmap(struct pipe_context *pipe,
struct pipe_transfer *transfer)
{
+ struct r600_context *rctx = (struct r600_context*)pipe;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
if (rtransfer->staging) {
@@ -176,12 +187,6 @@ static void r600_buffer_transfer_unmap(struct pipe_context *pipe,
&rtransfer->staging->b.b, &box);
pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
}
-}
-
-static void r600_transfer_destroy(struct pipe_context *ctx,
- struct pipe_transfer *transfer)
-{
- struct r600_context *rctx = (struct r600_context*)ctx;
util_slab_free(&rctx->pool_transfers, transfer);
}
@@ -189,8 +194,6 @@ static const struct u_resource_vtbl r600_buffer_vtbl =
{
u_default_resource_get_handle, /* get_handle */
r600_buffer_destroy, /* resource_destroy */
- r600_get_transfer, /* get_transfer */
- r600_transfer_destroy, /* transfer_destroy */
r600_buffer_transfer_map, /* transfer_map */
NULL, /* transfer_flush_region */
r600_buffer_transfer_unmap, /* transfer_unmap */
diff --git a/src/gallium/drivers/r600/r600_resource.c b/src/gallium/drivers/r600/r600_resource.c
index 1a91d5d5755..5e637f64a4a 100644
--- a/src/gallium/drivers/r600/r600_resource.c
+++ b/src/gallium/drivers/r600/r600_resource.c
@@ -68,10 +68,8 @@ void r600_init_screen_resource_functions(struct pipe_screen *screen)
void r600_init_context_resource_functions(struct r600_context *r600)
{
- r600->context.get_transfer = u_get_transfer_vtbl;
r600->context.transfer_map = u_transfer_map_vtbl;
r600->context.transfer_flush_region = u_default_transfer_flush_region;
r600->context.transfer_unmap = u_transfer_unmap_vtbl;
- r600->context.transfer_destroy = u_transfer_destroy_vtbl;
r600->context.transfer_inline_write = u_default_transfer_inline_write;
}
diff --git a/src/gallium/drivers/r600/r600_resource.h b/src/gallium/drivers/r600/r600_resource.h
index bf7fffa44c5..7ebf59e23a5 100644
--- a/src/gallium/drivers/r600/r600_resource.h
+++ b/src/gallium/drivers/r600/r600_resource.h
@@ -141,17 +141,4 @@ bool r600_init_flushed_depth_texture(struct pipe_context *ctx,
struct pipe_resource *texture,
struct r600_texture **staging);
-/* r600_texture.c texture transfer functions. */
-struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx,
- struct pipe_resource *texture,
- unsigned level,
- unsigned usage,
- const struct pipe_box *box);
-void r600_texture_transfer_destroy(struct pipe_context *ctx,
- struct pipe_transfer *trans);
-void* r600_texture_transfer_map(struct pipe_context *ctx,
- struct pipe_transfer* transfer);
-void r600_texture_transfer_unmap(struct pipe_context *ctx,
- struct pipe_transfer* transfer);
-
#endif
diff --git a/src/gallium/drivers/r600/r600_texture.c b/src/gallium/drivers/r600/r600_texture.c
index 4fb10ca031d..785eeffcab5 100644
--- a/src/gallium/drivers/r600/r600_texture.c
+++ b/src/gallium/drivers/r600/r600_texture.c
@@ -240,17 +240,7 @@ static void r600_texture_destroy(struct pipe_screen *screen,
FREE(rtex);
}
-static const struct u_resource_vtbl r600_texture_vtbl =
-{
- r600_texture_get_handle, /* get_handle */
- r600_texture_destroy, /* resource_destroy */
- r600_texture_get_transfer, /* get_transfer */
- r600_texture_transfer_destroy, /* transfer_destroy */
- r600_texture_transfer_map, /* transfer_map */
- NULL, /* transfer_flush_region */
- r600_texture_transfer_unmap, /* transfer_unmap */
- NULL /* transfer_inline_write */
-};
+static const struct u_resource_vtbl r600_texture_vtbl;
/* The number of samples can be specified independently of the texture. */
void r600_texture_get_fmask_info(struct r600_screen *rscreen,
@@ -603,17 +593,26 @@ bool r600_init_flushed_depth_texture(struct pipe_context *ctx,
return true;
}
-struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx,
- struct pipe_resource *texture,
- unsigned level,
- unsigned usage,
- const struct pipe_box *box)
+static void *r600_texture_transfer_map(struct pipe_context *ctx,
+ struct pipe_resource *texture,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer)
{
struct r600_context *rctx = (struct r600_context*)ctx;
struct r600_texture *rtex = (struct r600_texture*)texture;
struct pipe_resource resource;
struct r600_transfer *trans;
boolean use_staging_texture = FALSE;
+ enum pipe_format format = texture->format;
+ struct radeon_winsys_cs_handle *buf;
+ unsigned offset = 0;
+ char *map;
+
+ if ((texture->bind & PIPE_BIND_GLOBAL) && texture->target == PIPE_BUFFER) {
+ return r600_compute_global_transfer_map(ctx, texture, level, usage, box, ptransfer);
+ }
/* We cannot map a tiled texture directly because the data is
* in a different order, therefore we do detiling using a blit.
@@ -644,7 +643,7 @@ struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx,
trans = CALLOC_STRUCT(r600_transfer);
if (trans == NULL)
return NULL;
- pipe_resource_reference(&trans->transfer.resource, texture);
+ trans->transfer.resource = texture;
trans->transfer.level = level;
trans->transfer.usage = usage;
trans->transfer.box = *box;
@@ -657,7 +656,6 @@ struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx,
if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
R600_ERR("failed to create temporary texture to hold untiled copy\n");
- pipe_resource_reference(&trans->transfer.resource, NULL);
FREE(trans);
return NULL;
}
@@ -670,7 +668,6 @@ struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx,
trans->transfer.stride = staging_depth->surface.level[level].pitch_bytes;
trans->offset = r600_texture_get_offset(staging_depth, level, box->z);
trans->staging = (struct r600_resource*)staging_depth;
- return &trans->transfer;
} else if (use_staging_texture) {
resource.target = PIPE_TEXTURE_2D;
resource.format = texture->format;
@@ -697,7 +694,6 @@ struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx,
trans->staging = (struct r600_resource*)ctx->screen->resource_create(ctx->screen, &resource);
if (trans->staging == NULL) {
R600_ERR("failed to create temporary texture to hold untiled copy\n");
- pipe_resource_reference(&trans->transfer.resource, NULL);
FREE(trans);
return NULL;
}
@@ -709,79 +705,41 @@ struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx,
/* Always referenced in the blit. */
r600_flush(ctx, NULL, 0);
}
- return &trans->transfer;
- }
- trans->transfer.stride = rtex->surface.level[level].pitch_bytes;
- trans->transfer.layer_stride = rtex->surface.level[level].slice_size;
- trans->offset = r600_texture_get_offset(rtex, level, box->z);
- return &trans->transfer;
-}
-
-void r600_texture_transfer_destroy(struct pipe_context *ctx,
- struct pipe_transfer *transfer)
-{
- struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
- struct pipe_resource *texture = transfer->resource;
- struct r600_texture *rtex = (struct r600_texture*)texture;
-
- if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
- if (rtex->is_depth) {
- ctx->resource_copy_region(ctx, texture, transfer->level,
- transfer->box.x, transfer->box.y, transfer->box.z,
- &rtransfer->staging->b.b, transfer->level,
- &transfer->box);
- } else {
- r600_copy_from_staging_texture(ctx, rtransfer);
- }
- }
-
- if (rtransfer->staging)
- pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
-
- pipe_resource_reference(&transfer->resource, NULL);
- FREE(transfer);
-}
-
-void* r600_texture_transfer_map(struct pipe_context *ctx,
- struct pipe_transfer* transfer)
-{
- struct r600_context *rctx = (struct r600_context *)ctx;
- struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
- struct radeon_winsys_cs_handle *buf;
- struct r600_texture *rtex =
- (struct r600_texture*)transfer->resource;
- enum pipe_format format = transfer->resource->format;
- unsigned offset = 0;
- char *map;
-
- if ((transfer->resource->bind & PIPE_BIND_GLOBAL) && transfer->resource->target == PIPE_BUFFER) {
- return r600_compute_global_transfer_map(ctx, transfer);
+ } else {
+ trans->transfer.stride = rtex->surface.level[level].pitch_bytes;
+ trans->transfer.layer_stride = rtex->surface.level[level].slice_size;
+ trans->offset = r600_texture_get_offset(rtex, level, box->z);
}
- if (rtransfer->staging) {
- buf = ((struct r600_resource *)rtransfer->staging)->cs_buf;
+ if (trans->staging) {
+ buf = ((struct r600_resource *)trans->staging)->cs_buf;
} else {
- buf = ((struct r600_resource *)transfer->resource)->cs_buf;
+ buf = ((struct r600_resource *)texture)->cs_buf;
}
- if (rtex->is_depth || !rtransfer->staging)
- offset = rtransfer->offset +
- transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
- transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
+ if (rtex->is_depth || !trans->staging)
+ offset = trans->offset +
+ box->y / util_format_get_blockheight(format) * trans->transfer.stride +
+ box->x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
- if (!(map = rctx->ws->buffer_map(buf, rctx->cs, transfer->usage))) {
+ if (!(map = rctx->ws->buffer_map(buf, rctx->cs, usage))) {
+ pipe_resource_reference((struct pipe_resource**)&trans->staging, NULL);
+ FREE(trans);
return NULL;
}
+ *ptransfer = &trans->transfer;
return map + offset;
}
-void r600_texture_transfer_unmap(struct pipe_context *ctx,
- struct pipe_transfer* transfer)
+static void r600_texture_transfer_unmap(struct pipe_context *ctx,
+ struct pipe_transfer* transfer)
{
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
struct r600_context *rctx = (struct r600_context*)ctx;
struct radeon_winsys_cs_handle *buf;
+ struct pipe_resource *texture = transfer->resource;
+ struct r600_texture *rtex = (struct r600_texture*)texture;
if ((transfer->resource->bind & PIPE_BIND_GLOBAL) && transfer->resource->target == PIPE_BUFFER) {
return r600_compute_global_transfer_unmap(ctx, transfer);
@@ -793,6 +751,22 @@ void r600_texture_transfer_unmap(struct pipe_context *ctx,
buf = ((struct r600_resource *)transfer->resource)->cs_buf;
}
rctx->ws->buffer_unmap(buf);
+
+ if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
+ if (rtex->is_depth) {
+ ctx->resource_copy_region(ctx, texture, transfer->level,
+ transfer->box.x, transfer->box.y, transfer->box.z,
+ &rtransfer->staging->b.b, transfer->level,
+ &transfer->box);
+ } else {
+ r600_copy_from_staging_texture(ctx, rtransfer);
+ }
+ }
+
+ if (rtransfer->staging)
+ pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
+
+ FREE(transfer);
}
void r600_init_surface_functions(struct r600_context *r600)
@@ -1178,3 +1152,13 @@ out_unknown:
/* R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format)); */
return ~0;
}
+
+static const struct u_resource_vtbl r600_texture_vtbl =
+{
+ r600_texture_get_handle, /* get_handle */
+ r600_texture_destroy, /* resource_destroy */
+ r600_texture_transfer_map, /* transfer_map */
+ NULL, /* transfer_flush_region */
+ r600_texture_transfer_unmap, /* transfer_unmap */
+ NULL /* transfer_inline_write */
+};