diff options
author | Chia-I Wu <[email protected]> | 2019-07-16 16:48:03 -0700 |
---|---|---|
committer | Chia-I Wu <[email protected]> | 2019-07-19 18:04:42 -0700 |
commit | d31d25f634abe787552eba8526db9f901a257d9c (patch) | |
tree | 7773fb61f8064cad9763c8ed228f6197f5d1f67f | |
parent | 324c20304e82fc1e36cd0d293653bf1533edc854 (diff) |
virgl: fix a sync issue in virgl_buffer_transfer_extend
In virgl_buffer_transfer_extend, when no flush is needed, it tries
to extend a previously queued transfer instead if it can find one.
Comparing to virgl_resource_transfer_prepare, it fails to check if
the resource is busy.
The existence of a previously queued transfer normally implies that
the resource is not busy, maybe except for when the transfer is
PIPE_TRANSFER_UNSYNCHRONIZED. Rather than burdening us with a
lengthy comment, and potential concerns over breaking it as the
transfer code evolves, this commit makes the valid_buffer_range
check the only condition to take the fast path.
In real world, we hit the fast path almost only because of the
valid_buffer_range check. In micro benchmarks, the condition should
always be true, otherwise the benchmarks are not very representative
of meaningful workloads. I think this fix is justified.
The recent change to PIPE_TRANSFER_MAP_DIRECTLY usage disables the
fast path. This commit re-enables it as well.
Signed-off-by: Chia-I Wu <[email protected]>
Reviewed-by: Gurchetan Singh <[email protected]>
-rw-r--r-- | src/gallium/drivers/virgl/virgl_resource.c | 77 |
1 files changed, 15 insertions, 62 deletions
diff --git a/src/gallium/drivers/virgl/virgl_resource.c b/src/gallium/drivers/virgl/virgl_resource.c index dcc0bce717c..74da5ef968c 100644 --- a/src/gallium/drivers/virgl/virgl_resource.c +++ b/src/gallium/drivers/virgl/virgl_resource.c @@ -536,76 +536,29 @@ void virgl_init_screen_resource_functions(struct pipe_screen *screen) screen->resource_destroy = u_resource_destroy_vtbl; } -static bool virgl_buffer_transfer_extend(struct pipe_context *ctx, - struct pipe_resource *resource, - unsigned usage, - const struct pipe_box *box, - const void *data) -{ - struct virgl_context *vctx = virgl_context(ctx); - struct virgl_resource *vbuf = virgl_resource(resource); - struct virgl_transfer dummy_trans = { 0 }; - bool flush; - - /* - * Attempts to short circuit the entire process of mapping and unmapping - * a resource if there is an existing transfer that can be extended. - * Pessimestically falls back if a flush is required. - */ - dummy_trans.base.resource = resource; - dummy_trans.hw_res = vbuf->hw_res; - dummy_trans.base.usage = usage; - dummy_trans.base.box = *box; - dummy_trans.base.stride = vbuf->metadata.stride[0]; - dummy_trans.base.layer_stride = vbuf->metadata.layer_stride[0]; - dummy_trans.offset = box->x; - - flush = virgl_res_needs_flush(vctx, &dummy_trans); - if (flush && util_ranges_intersect(&vbuf->valid_buffer_range, - box->x, box->x + box->width)) - return false; - - if (!virgl_transfer_queue_extend_buffer(&vctx->queue, - vbuf->hw_res, box->x, box->width, data)) - return false; - - util_range_add(&vbuf->valid_buffer_range, box->x, box->x + box->width); - - return true; -} - static void virgl_buffer_subdata(struct pipe_context *pipe, struct pipe_resource *resource, unsigned usage, unsigned offset, unsigned size, const void *data) { - struct pipe_transfer *transfer; - uint8_t *map; - struct pipe_box box; - - assert(!(usage & PIPE_TRANSFER_READ)); - - /* the write flag is implicit by the nature of buffer_subdata */ - usage |= PIPE_TRANSFER_WRITE; - - if (!(usage & PIPE_TRANSFER_MAP_DIRECTLY)) { - if (offset == 0 && size == resource->width0) - usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE; - else - usage |= PIPE_TRANSFER_DISCARD_RANGE; - } - - u_box_1d(offset, size, &box); + struct virgl_context *vctx = virgl_context(pipe); + struct virgl_resource *vbuf = virgl_resource(resource); - if (usage & PIPE_TRANSFER_DISCARD_RANGE && - virgl_buffer_transfer_extend(pipe, resource, usage, &box, data)) + /* We can try virgl_transfer_queue_extend_buffer when there is no + * flush/readback/wait required. Based on virgl_resource_transfer_prepare, + * the simplest way to make sure that is the case is to check the valid + * buffer range. + */ + if (!util_ranges_intersect(&vbuf->valid_buffer_range, + offset, offset + size) && + likely(!(virgl_debug & VIRGL_DEBUG_XFER)) && + virgl_transfer_queue_extend_buffer(&vctx->queue, + vbuf->hw_res, offset, size, data)) { + util_range_add(&vbuf->valid_buffer_range, offset, offset + size); return; - - map = pipe->transfer_map(pipe, resource, 0, usage, &box, &transfer); - if (map) { - memcpy(map, data, size); - pipe_transfer_unmap(pipe, transfer); } + + u_default_buffer_subdata(pipe, resource, usage, offset, size, data); } void virgl_init_context_resource_functions(struct pipe_context *ctx) |