diff options
author | Chia-I Wu <[email protected]> | 2019-05-09 13:27:34 -0700 |
---|---|---|
committer | Chia-I Wu <[email protected]> | 2019-05-22 09:28:19 -0700 |
commit | 96c2851586e8d76397823624321d5d24b3d22b36 (patch) | |
tree | b1b08df7750f827390394435799f18e99749c997 /src/gallium/drivers/virgl/virgl_encode.c | |
parent | 440982cdd6e50797853eb78b6edf9401ba5a93b8 (diff) |
virgl: track valid buffer range for transfer sync
virgl_transfer_queue_is_queued was used to avoid flushing. That
fails when the resource is being accessed by previous cmdbufs but
not the current one.
The new approach of tracking the valid buffer range does not apply
to textures however. But hopefully it is fine because the goal is
to avoid waiting for this scenario
glBufferSubData(..., offset, size, data1);
glDrawArrays(...);
// append new vertex data
glBufferSubData(..., offset+size, size, data2);
glDrawArrays(...);
If glTex(Sub)Image* turns out to be an issue, we will need to track
valid level/layer ranges as well.
v2: update virgl_buffer_transfer_extend as well
v3: do not remove virgl_transfer_queue_is_queued
Signed-off-by: Chia-I Wu <[email protected]>
Reviewed-by: Alexandros Frantzis <[email protected]> (v1)
Reviewed-by: Gurchetan Singh <[email protected]> (v2)
Diffstat (limited to 'src/gallium/drivers/virgl/virgl_encode.c')
-rw-r--r-- | src/gallium/drivers/virgl/virgl_encode.c | 11 |
1 files changed, 11 insertions, 0 deletions
diff --git a/src/gallium/drivers/virgl/virgl_encode.c b/src/gallium/drivers/virgl/virgl_encode.c index ee524c883d9..9c60b99bfb6 100644 --- a/src/gallium/drivers/virgl/virgl_encode.c +++ b/src/gallium/drivers/virgl/virgl_encode.c @@ -965,6 +965,9 @@ int virgl_encode_set_shader_buffers(struct virgl_context *ctx, virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset); virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size); virgl_encoder_write_res(ctx, res); + + util_range_add(&res->valid_buffer_range, buffers[i].buffer_offset, + buffers[i].buffer_offset + buffers[i].buffer_size); virgl_resource_dirty(res, 0); } else { virgl_encoder_write_dword(ctx->cbuf, 0); @@ -989,6 +992,9 @@ int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx, virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset); virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size); virgl_encoder_write_res(ctx, res); + + util_range_add(&res->valid_buffer_range, buffers[i].buffer_offset, + buffers[i].buffer_offset + buffers[i].buffer_size); virgl_resource_dirty(res, 0); } else { virgl_encoder_write_dword(ctx->cbuf, 0); @@ -1017,6 +1023,11 @@ int virgl_encode_set_shader_images(struct virgl_context *ctx, virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset); virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size); virgl_encoder_write_res(ctx, res); + + if (res->u.b.target == PIPE_BUFFER) { + util_range_add(&res->valid_buffer_range, images[i].u.buf.offset, + images[i].u.buf.offset + images[i].u.buf.size); + } virgl_resource_dirty(res, images[i].u.tex.level); } else { virgl_encoder_write_dword(ctx->cbuf, 0); |