aboutsummaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/drivers')
-rw-r--r--src/gallium/drivers/virgl/Automake.inc10
-rw-r--r--src/gallium/drivers/virgl/Makefile.am43
-rw-r--r--src/gallium/drivers/virgl/virgl.h51
-rw-r--r--src/gallium/drivers/virgl/virgl_buffer.c170
-rw-r--r--src/gallium/drivers/virgl/virgl_context.c962
-rw-r--r--src/gallium/drivers/virgl/virgl_context.h100
-rw-r--r--src/gallium/drivers/virgl/virgl_encode.c863
-rw-r--r--src/gallium/drivers/virgl/virgl_encode.h232
-rw-r--r--src/gallium/drivers/virgl/virgl_protocol.h468
-rw-r--r--src/gallium/drivers/virgl/virgl_public.h32
-rw-r--r--src/gallium/drivers/virgl/virgl_query.c169
-rw-r--r--src/gallium/drivers/virgl/virgl_resource.c89
-rw-r--r--src/gallium/drivers/virgl/virgl_resource.h134
-rw-r--r--src/gallium/drivers/virgl/virgl_screen.c557
-rw-r--r--src/gallium/drivers/virgl/virgl_streamout.c87
-rw-r--r--src/gallium/drivers/virgl/virgl_texture.c349
-rw-r--r--src/gallium/drivers/virgl/virgl_tgsi.c66
-rw-r--r--src/gallium/drivers/virgl/virgl_winsys.h110
18 files changed, 4492 insertions, 0 deletions
diff --git a/src/gallium/drivers/virgl/Automake.inc b/src/gallium/drivers/virgl/Automake.inc
new file mode 100644
index 00000000000..457ca77b3a7
--- /dev/null
+++ b/src/gallium/drivers/virgl/Automake.inc
@@ -0,0 +1,10 @@
+if HAVE_GALLIUM_VIRGL
+
+TARGET_DRIVERS += virtio_gpu
+TARGET_CPPFLAGS += -DGALLIUM_VIRGL
+TARGET_LIB_DEPS += \
+ $(top_builddir)/src/gallium/drivers/virgl/libvirgl.la \
+ $(top_builddir)/src/gallium/winsys/virgl/drm/libvirgldrm.la
+ $(LIBDRM_LIBS)
+
+endif
diff --git a/src/gallium/drivers/virgl/Makefile.am b/src/gallium/drivers/virgl/Makefile.am
new file mode 100644
index 00000000000..90a18ec3ffe
--- /dev/null
+++ b/src/gallium/drivers/virgl/Makefile.am
@@ -0,0 +1,43 @@
+# Copyright © 2014, 2015 Red Hat.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+include $(top_srcdir)/src/gallium/Automake.inc
+
+AM_CPPFLAGS = \
+ -I$(top_srcdir)/src/gallium/drivers \
+ -I$(top_srcdir)/src/gallium/winsys/virgl/drm \
+ -I$(top_srcdir)/include \
+ $(GALLIUM_CFLAGS) \
+ $(LIBDRM_CFLAGS)
+
+noinst_LTLIBRARIES = libvirgl.la
+
+libvirgl_la_SOURCES = \
+ virgl_screen.c \
+ virgl_resource.c \
+ virgl_buffer.c \
+ virgl_texture.c \
+ virgl_context.c \
+ virgl_encode.c \
+ virgl_query.c \
+ virgl_streamout.c \
+ virgl_tgsi.c
diff --git a/src/gallium/drivers/virgl/virgl.h b/src/gallium/drivers/virgl/virgl.h
new file mode 100644
index 00000000000..21e3bc0ea36
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef VIRGL_H
+#define VIRGL_H
+
+#include "util/u_transfer.h"
+
+#include "../winsys/virgl/drm/virgl_hw.h"
+
+#include "virgl_winsys.h"
+#include "pipe/p_screen.h"
+struct virgl_screen {
+ struct pipe_screen base;
+ struct sw_winsys *winsys;
+ struct virgl_winsys *vws;
+
+ struct virgl_drm_caps caps;
+
+ uint32_t sub_ctx_id;
+};
+
+
+static inline struct virgl_screen *
+virgl_screen( struct pipe_screen *pipe )
+{
+ return (struct virgl_screen *)pipe;
+}
+
+#define VIRGL_MAP_BUFFER_ALIGNMENT 64
+
+#endif
diff --git a/src/gallium/drivers/virgl/virgl_buffer.c b/src/gallium/drivers/virgl/virgl_buffer.c
new file mode 100644
index 00000000000..93fb29598a9
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_buffer.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "util/u_memory.h"
+#include "virgl_context.h"
+#include "virgl_resource.h"
+
+static void virgl_buffer_destroy(struct pipe_screen *screen,
+ struct pipe_resource *buf)
+{
+ struct virgl_screen *vs = virgl_screen(screen);
+ struct virgl_buffer *vbuf = virgl_buffer(buf);
+
+ util_range_destroy(&vbuf->valid_buffer_range);
+ vs->vws->resource_unref(vs->vws, vbuf->base.hw_res);
+ FREE(vbuf);
+}
+
+static void *virgl_buffer_transfer_map(struct pipe_context *ctx,
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **transfer)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_screen *vs = virgl_screen(ctx->screen);
+ struct virgl_buffer *vbuf = virgl_buffer(resource);
+ struct virgl_transfer *trans;
+ void *ptr;
+ bool readback;
+ uint32_t offset;
+ bool doflushwait = false;
+
+ if ((usage & PIPE_TRANSFER_READ) && (vbuf->on_list == TRUE))
+ doflushwait = true;
+ else
+ doflushwait = virgl_res_needs_flush_wait(vctx, &vbuf->base, usage);
+
+ if (doflushwait)
+ ctx->flush(ctx, NULL, 0);
+
+ trans = util_slab_alloc(&vctx->texture_transfer_pool);
+ if (trans == NULL)
+ return NULL;
+
+ trans->base.resource = resource;
+ trans->base.level = level;
+ trans->base.usage = usage;
+ trans->base.box = *box;
+ trans->base.stride = 0;
+ trans->base.layer_stride = 0;
+
+ offset = box->x;
+
+ readback = virgl_res_needs_readback(vctx, &vbuf->base, usage);
+ if (readback)
+ vs->vws->transfer_get(vs->vws, vbuf->base.hw_res, box, trans->base.stride, trans->base.layer_stride, offset, level);
+
+ if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED))
+ doflushwait = true;
+
+ if (doflushwait || readback)
+ vs->vws->resource_wait(vs->vws, vbuf->base.hw_res);
+
+ ptr = vs->vws->resource_map(vs->vws, vbuf->base.hw_res);
+ if (!ptr) {
+ return NULL;
+ }
+
+ trans->offset = offset;
+ *transfer = &trans->base;
+
+ return ptr + trans->offset;
+}
+
+static void virgl_buffer_transfer_unmap(struct pipe_context *ctx,
+ struct pipe_transfer *transfer)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_transfer *trans = (struct virgl_transfer *)transfer;
+ struct virgl_buffer *vbuf = virgl_buffer(transfer->resource);
+
+ if (trans->base.usage & PIPE_TRANSFER_WRITE) {
+ if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
+ struct virgl_screen *vs = virgl_screen(ctx->screen);
+ vbuf->base.clean = FALSE;
+ vctx->num_transfers++;
+ vs->vws->transfer_put(vs->vws, vbuf->base.hw_res,
+ &transfer->box, trans->base.stride, trans->base.layer_stride, trans->offset, transfer->level);
+
+ }
+ }
+
+ util_slab_free(&vctx->texture_transfer_pool, trans);
+}
+
+static void virgl_buffer_transfer_flush_region(struct pipe_context *ctx,
+ struct pipe_transfer *transfer,
+ const struct pipe_box *box)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_buffer *vbuf = virgl_buffer(transfer->resource);
+
+ if (!vbuf->on_list) {
+ struct pipe_resource *res = NULL;
+
+ list_addtail(&vbuf->flush_list, &vctx->to_flush_bufs);
+ vbuf->on_list = TRUE;
+ pipe_resource_reference(&res, &vbuf->base.u.b);
+ }
+
+ util_range_add(&vbuf->valid_buffer_range, transfer->box.x + box->x,
+ transfer->box.x + box->x + box->width);
+
+ vbuf->base.clean = FALSE;
+}
+
+static const struct u_resource_vtbl virgl_buffer_vtbl =
+{
+ u_default_resource_get_handle, /* get_handle */
+ virgl_buffer_destroy, /* resource_destroy */
+ virgl_buffer_transfer_map, /* transfer_map */
+ virgl_buffer_transfer_flush_region, /* transfer_flush_region */
+ virgl_buffer_transfer_unmap, /* transfer_unmap */
+ virgl_transfer_inline_write /* transfer_inline_write */
+};
+
+struct pipe_resource *virgl_buffer_create(struct virgl_screen *vs,
+ const struct pipe_resource *template)
+{
+ struct virgl_buffer *buf;
+ uint32_t size;
+ uint32_t vbind;
+ buf = CALLOC_STRUCT(virgl_buffer);
+ buf->base.clean = TRUE;
+ buf->base.u.b = *template;
+ buf->base.u.b.screen = &vs->base;
+ buf->base.u.vtbl = &virgl_buffer_vtbl;
+ pipe_reference_init(&buf->base.u.b.reference, 1);
+ util_range_init(&buf->valid_buffer_range);
+
+ vbind = pipe_to_virgl_bind(template->bind);
+ size = template->width0;
+
+ buf->base.hw_res = vs->vws->resource_create(vs->vws, template->target, template->format, vbind, template->width0, 1, 1, 1, 0, 0, size);
+
+ util_range_set_empty(&buf->valid_buffer_range);
+ return &buf->base.u.b;
+}
diff --git a/src/gallium/drivers/virgl/virgl_context.c b/src/gallium/drivers/virgl/virgl_context.c
new file mode 100644
index 00000000000..37a631a2b26
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_context.c
@@ -0,0 +1,962 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "pipe/p_shader_tokens.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_state.h"
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+#include "util/u_format.h"
+#include "util/u_transfer.h"
+#include "util/u_helpers.h"
+#include "util/u_slab.h"
+#include "util/u_upload_mgr.h"
+#include "util/u_blitter.h"
+#include "tgsi/tgsi_text.h"
+
+#include "pipebuffer/pb_buffer.h"
+#include "state_tracker/graw.h"
+#include "state_tracker/drm_driver.h"
+
+#include "virgl_encode.h"
+
+#include "virgl_context.h"
+
+#include "virgl_resource.h"
+#include "virgl.h"
+#include "state_tracker/sw_winsys.h"
+ struct pipe_screen encscreen;
+
+static uint32_t next_handle;
+uint32_t virgl_object_assign_handle(void)
+{
+ return ++next_handle;
+}
+
+static void virgl_buffer_flush(struct virgl_context *vctx,
+ struct virgl_buffer *vbuf)
+{
+ struct virgl_screen *rs = virgl_screen(vctx->base.screen);
+ struct pipe_box box;
+
+ assert(vbuf->on_list);
+
+ box.height = 1;
+ box.depth = 1;
+ box.y = 0;
+ box.z = 0;
+
+ box.x = vbuf->valid_buffer_range.start;
+ box.width = MIN2(vbuf->valid_buffer_range.end - vbuf->valid_buffer_range.start, vbuf->base.u.b.width0);
+
+ vctx->num_transfers++;
+ rs->vws->transfer_put(rs->vws, vbuf->base.hw_res,
+ &box, 0, 0, box.x, 0);
+
+ util_range_set_empty(&vbuf->valid_buffer_range);
+}
+
+static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
+{
+ struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+ struct pipe_surface *surf;
+ struct virgl_resource *res;
+ unsigned i;
+
+ surf = vctx->framebuffer.zsbuf;
+ if (surf) {
+ res = (struct virgl_resource *)surf->texture;
+ if (res)
+ vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
+ }
+ for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
+ surf = vctx->framebuffer.cbufs[i];
+ if (surf) {
+ res = (struct virgl_resource *)surf->texture;
+ if (res)
+ vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
+ }
+ }
+}
+
+static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
+ unsigned shader_type)
+{
+ struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+ struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
+ struct virgl_resource *res;
+ uint32_t remaining_mask = tinfo->enabled_mask;
+ unsigned i;
+ while (remaining_mask) {
+ i = u_bit_scan(&remaining_mask);
+ assert(tinfo->views[i]);
+
+ res = (struct virgl_resource *)tinfo->views[i]->base.texture;
+ if (res)
+ vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
+ }
+}
+
+static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
+{
+ struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+ struct virgl_resource *res;
+ unsigned i;
+
+ for (i = 0; i < vctx->num_vertex_buffers; i++) {
+ res = (struct virgl_resource *)vctx->vertex_buffer[i].buffer;
+ if (res)
+ vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
+ }
+}
+
+static void virgl_attach_res_index_buffer(struct virgl_context *vctx)
+{
+ struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+ struct virgl_resource *res;
+
+ res = (struct virgl_resource *)vctx->index_buffer.buffer;
+ if (res)
+ vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
+}
+
+static void virgl_attach_res_so_targets(struct virgl_context *vctx)
+{
+ struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+ struct virgl_resource *res;
+ unsigned i;
+
+ for (i = 0; i < vctx->num_so_targets; i++) {
+ res = (struct virgl_resource *)vctx->so_targets[i].base.buffer;
+ if (res)
+ vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
+ }
+}
+
+static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
+ unsigned shader_type)
+{
+ struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+ struct virgl_resource *res;
+ unsigned i;
+ for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
+ res = (struct virgl_resource *)vctx->ubos[shader_type][i];
+ if (res) {
+ vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
+ }
+ }
+}
+
+/*
+ * after flushing, the hw context still has a bunch of
+ * resources bound, so we need to rebind those here.
+ */
+static void virgl_reemit_res(struct virgl_context *vctx)
+{
+ unsigned shader_type;
+
+ /* reattach any flushed resources */
+ /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
+ virgl_attach_res_framebuffer(vctx);
+
+ for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
+ virgl_attach_res_sampler_views(vctx, shader_type);
+ virgl_attach_res_uniform_buffers(vctx, shader_type);
+ }
+ virgl_attach_res_index_buffer(vctx);
+ virgl_attach_res_vertex_buffers(vctx);
+ virgl_attach_res_so_targets(vctx);
+}
+
+static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
+ struct pipe_resource *resource,
+ const struct pipe_surface *templ)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_surface *surf;
+ struct virgl_resource *res = virgl_resource(resource);
+ uint32_t handle;
+
+ surf = CALLOC_STRUCT(virgl_surface);
+ if (surf == NULL)
+ return NULL;
+
+ res->clean = FALSE;
+ handle = virgl_object_assign_handle();
+ pipe_reference_init(&surf->base.reference, 1);
+ pipe_resource_reference(&surf->base.texture, resource);
+ surf->base.context = ctx;
+ surf->base.format = templ->format;
+ if (resource->target != PIPE_BUFFER) {
+ surf->base.width = u_minify(resource->width0, templ->u.tex.level);
+ surf->base.height = u_minify(resource->height0, templ->u.tex.level);
+ surf->base.u.tex.level = templ->u.tex.level;
+ surf->base.u.tex.first_layer = templ->u.tex.first_layer;
+ surf->base.u.tex.last_layer = templ->u.tex.last_layer;
+ } else {
+ surf->base.width = templ->u.buf.last_element - templ->u.buf.first_element + 1;
+ surf->base.height = resource->height0;
+ surf->base.u.buf.first_element = templ->u.buf.first_element;
+ surf->base.u.buf.last_element = templ->u.buf.last_element;
+ }
+ virgl_encoder_create_surface(vctx, handle, res, &surf->base);
+ surf->handle = handle;
+ return &surf->base;
+}
+
+static void virgl_surface_destroy(struct pipe_context *ctx,
+ struct pipe_surface *psurf)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_surface *surf = (struct virgl_surface *)psurf;
+
+ pipe_resource_reference(&surf->base.texture, NULL);
+ virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
+ FREE(surf);
+}
+
+static void *virgl_create_blend_state(struct pipe_context *ctx,
+ const struct pipe_blend_state *blend_state)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle;
+ handle = virgl_object_assign_handle();
+
+ virgl_encode_blend_state(vctx, handle, blend_state);
+ return (void *)(unsigned long)handle;
+
+}
+
+static void virgl_bind_blend_state(struct pipe_context *ctx,
+ void *blend_state)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle = (unsigned long)blend_state;
+ virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
+}
+
+static void virgl_delete_blend_state(struct pipe_context *ctx,
+ void *blend_state)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle = (unsigned long)blend_state;
+ virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
+}
+
+static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
+ const struct pipe_depth_stencil_alpha_state *blend_state)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle;
+ handle = virgl_object_assign_handle();
+
+ virgl_encode_dsa_state(vctx, handle, blend_state);
+ return (void *)(unsigned long)handle;
+}
+
+static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
+ void *blend_state)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle = (unsigned long)blend_state;
+ virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
+}
+
+static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
+ void *dsa_state)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle = (unsigned long)dsa_state;
+ virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
+}
+
+static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
+ const struct pipe_rasterizer_state *rs_state)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle;
+ handle = virgl_object_assign_handle();
+
+ virgl_encode_rasterizer_state(vctx, handle, rs_state);
+ return (void *)(unsigned long)handle;
+}
+
+static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
+ void *rs_state)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle = (unsigned long)rs_state;
+
+ virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
+}
+
+static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
+ void *rs_state)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle = (unsigned long)rs_state;
+ virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
+}
+
+static void virgl_set_framebuffer_state(struct pipe_context *ctx,
+ const struct pipe_framebuffer_state *state)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+
+ vctx->framebuffer = *state;
+ virgl_encoder_set_framebuffer_state(vctx, state);
+ virgl_attach_res_framebuffer(vctx);
+}
+
+static void virgl_set_viewport_states(struct pipe_context *ctx,
+ unsigned start_slot,
+ unsigned num_viewports,
+ const struct pipe_viewport_state *state)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
+}
+
+static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
+ unsigned num_elements,
+ const struct pipe_vertex_element *elements)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle = virgl_object_assign_handle();
+ virgl_encoder_create_vertex_elements(vctx, handle,
+ num_elements, elements);
+ return (void*)(unsigned long)handle;
+
+}
+
+static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
+ void *ve)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle = (unsigned long)ve;
+
+ virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
+}
+
+static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
+ void *ve)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle = (unsigned long)ve;
+ virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
+}
+
+static void virgl_set_vertex_buffers(struct pipe_context *ctx,
+ unsigned start_slot,
+ unsigned num_buffers,
+ const struct pipe_vertex_buffer *buffers)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+
+ util_set_vertex_buffers_count(vctx->vertex_buffer,
+ &vctx->num_vertex_buffers,
+ buffers, start_slot, num_buffers);
+
+ vctx->vertex_array_dirty = TRUE;
+}
+
+static void virgl_hw_set_vertex_buffers(struct pipe_context *ctx)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+
+ if (vctx->vertex_array_dirty) {
+ virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
+ virgl_attach_res_vertex_buffers(vctx);
+ }
+}
+
+static void virgl_set_stencil_ref(struct pipe_context *ctx,
+ const struct pipe_stencil_ref *ref)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ virgl_encoder_set_stencil_ref(vctx, ref);
+}
+
+static void virgl_set_blend_color(struct pipe_context *ctx,
+ const struct pipe_blend_color *color)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ virgl_encoder_set_blend_color(vctx, color);
+}
+
+static void virgl_set_index_buffer(struct pipe_context *ctx,
+ const struct pipe_index_buffer *ib)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+
+ if (ib) {
+ pipe_resource_reference(&vctx->index_buffer.buffer, ib->buffer);
+ memcpy(&vctx->index_buffer, ib, sizeof(*ib));
+ } else {
+ pipe_resource_reference(&vctx->index_buffer.buffer, NULL);
+ }
+}
+
+static void virgl_hw_set_index_buffer(struct pipe_context *ctx,
+ struct pipe_index_buffer *ib)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ virgl_encoder_set_index_buffer(vctx, ib);
+ virgl_attach_res_index_buffer(vctx);
+}
+
+static void virgl_set_constant_buffer(struct pipe_context *ctx,
+ uint shader, uint index,
+ struct pipe_constant_buffer *buf)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+
+ if (buf) {
+ if (!buf->user_buffer){
+ struct virgl_resource *res = (struct virgl_resource *)buf->buffer;
+ virgl_encoder_set_uniform_buffer(vctx, shader, index, buf->buffer_offset,
+ buf->buffer_size, res);
+ pipe_resource_reference(&vctx->ubos[shader][index], buf->buffer);
+ return;
+ }
+ pipe_resource_reference(&vctx->ubos[shader][index], NULL);
+ virgl_encoder_write_constant_buffer(vctx, shader, index, buf->buffer_size / 4, buf->user_buffer);
+ } else {
+ virgl_encoder_write_constant_buffer(vctx, shader, index, 0, NULL);
+ pipe_resource_reference(&vctx->ubos[shader][index], NULL);
+ }
+}
+
+void virgl_transfer_inline_write(struct pipe_context *ctx,
+ struct pipe_resource *res,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ const void *data,
+ unsigned stride,
+ unsigned layer_stride)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_screen *vs = virgl_screen(ctx->screen);
+ struct virgl_resource *grres = (struct virgl_resource *)res;
+ struct virgl_buffer *vbuf = virgl_buffer(res);
+
+ grres->clean = FALSE;
+
+ if (virgl_res_needs_flush_wait(vctx, &vbuf->base, usage)) {
+ ctx->flush(ctx, NULL, 0);
+
+ vs->vws->resource_wait(vs->vws, vbuf->base.hw_res);
+ }
+
+ virgl_encoder_inline_write(vctx, grres, level, usage,
+ box, data, stride, layer_stride);
+}
+
+static void *virgl_shader_encoder(struct pipe_context *ctx,
+ const struct pipe_shader_state *shader,
+ unsigned type)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle;
+ struct tgsi_token *new_tokens;
+ int ret;
+
+ new_tokens = virgl_tgsi_transform(shader->tokens);
+ if (!new_tokens)
+ return NULL;
+
+ handle = virgl_object_assign_handle();
+ /* encode VS state */
+ ret = virgl_encode_shader_state(vctx, handle, type,
+ &shader->stream_output,
+ new_tokens);
+ if (ret) {
+ return NULL;
+ }
+
+ FREE(new_tokens);
+ return (void *)(unsigned long)handle;
+
+}
+static void *virgl_create_vs_state(struct pipe_context *ctx,
+ const struct pipe_shader_state *shader)
+{
+ return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
+}
+
+static void *virgl_create_gs_state(struct pipe_context *ctx,
+ const struct pipe_shader_state *shader)
+{
+ return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
+}
+
+static void *virgl_create_fs_state(struct pipe_context *ctx,
+ const struct pipe_shader_state *shader)
+{
+ return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
+}
+
+static void
+virgl_delete_fs_state(struct pipe_context *ctx,
+ void *fs)
+{
+ uint32_t handle = (unsigned long)fs;
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+
+ virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
+}
+
+static void
+virgl_delete_gs_state(struct pipe_context *ctx,
+ void *gs)
+{
+ uint32_t handle = (unsigned long)gs;
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+
+ virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
+}
+
+static void
+virgl_delete_vs_state(struct pipe_context *ctx,
+ void *vs)
+{
+ uint32_t handle = (unsigned long)vs;
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+
+ virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
+}
+
+static void virgl_bind_vs_state(struct pipe_context *ctx,
+ void *vss)
+{
+ uint32_t handle = (unsigned long)vss;
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+
+ virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
+}
+
+static void virgl_bind_gs_state(struct pipe_context *ctx,
+ void *vss)
+{
+ uint32_t handle = (unsigned long)vss;
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+
+ virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
+}
+
+
+static void virgl_bind_fs_state(struct pipe_context *ctx,
+ void *vss)
+{
+ uint32_t handle = (unsigned long)vss;
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+
+ virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
+}
+
+static void virgl_clear(struct pipe_context *ctx,
+ unsigned buffers,
+ const union pipe_color_union *color,
+ double depth, unsigned stencil)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+
+ virgl_encode_clear(vctx, buffers, color, depth, stencil);
+}
+
+static void virgl_draw_vbo(struct pipe_context *ctx,
+ const struct pipe_draw_info *dinfo)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_screen *rs = virgl_screen(ctx->screen);
+ struct pipe_index_buffer ib = {};
+ struct pipe_draw_info info = *dinfo;
+
+ if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
+ util_primconvert_save_index_buffer(vctx->primconvert, &vctx->index_buffer);
+ util_primconvert_draw_vbo(vctx->primconvert, dinfo);
+ return;
+ }
+ if (info.indexed) {
+ pipe_resource_reference(&ib.buffer, vctx->index_buffer.buffer);
+ ib.user_buffer = vctx->index_buffer.user_buffer;
+ ib.index_size = vctx->index_buffer.index_size;
+ ib.offset = vctx->index_buffer.offset + info.start * ib.index_size;
+
+ if (ib.user_buffer) {
+ u_upload_data(vctx->uploader, 0, info.count * ib.index_size,
+ ib.user_buffer, &ib.offset, &ib.buffer);
+ ib.user_buffer = NULL;
+ }
+ }
+
+ u_upload_unmap(vctx->uploader);
+
+ vctx->num_draws++;
+ virgl_hw_set_vertex_buffers(ctx);
+ if (info.indexed)
+ virgl_hw_set_index_buffer(ctx, &ib);
+
+ virgl_encoder_draw_vbo(vctx, &info);
+
+ pipe_resource_reference(&ib.buffer, NULL);
+
+}
+
+static void virgl_flush_eq(struct virgl_context *ctx, void *closure)
+{
+ struct virgl_screen *rs = virgl_screen(ctx->base.screen);
+
+ /* send the buffer to the remote side for decoding */
+ ctx->num_transfers = ctx->num_draws = 0;
+ rs->vws->submit_cmd(rs->vws, ctx->cbuf);
+
+ virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
+
+ /* add back current framebuffer resources to reference list? */
+ virgl_reemit_res(ctx);
+}
+
+static void virgl_flush_from_st(struct pipe_context *ctx,
+ struct pipe_fence_handle **fence,
+ enum pipe_flush_flags flags)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_screen *rs = virgl_screen(ctx->screen);
+ struct virgl_buffer *buf, *tmp;
+
+ if (fence)
+ *fence = rs->vws->cs_create_fence(rs->vws);
+
+ LIST_FOR_EACH_ENTRY_SAFE(buf, tmp, &vctx->to_flush_bufs, flush_list) {
+ struct pipe_resource *res = &buf->base.u.b;
+ virgl_buffer_flush(vctx, buf);
+ list_del(&buf->flush_list);
+ buf->on_list = FALSE;
+ pipe_resource_reference(&res, NULL);
+
+ }
+ virgl_flush_eq(vctx, vctx);
+}
+
+static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
+ struct pipe_resource *texture,
+ const struct pipe_sampler_view *state)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_sampler_view *grview = CALLOC_STRUCT(virgl_sampler_view);
+ uint32_t handle;
+ struct virgl_resource *res;
+
+ if (state == NULL)
+ return NULL;
+
+ res = (struct virgl_resource *)texture;
+ handle = virgl_object_assign_handle();
+ virgl_encode_sampler_view(vctx, handle, res, state);
+
+ grview->base = *state;
+ grview->base.reference.count = 1;
+
+ grview->base.texture = NULL;
+ grview->base.context = ctx;
+ pipe_resource_reference(&grview->base.texture, texture);
+ grview->handle = handle;
+ return &grview->base;
+}
+
+static void virgl_set_sampler_views(struct pipe_context *ctx,
+ unsigned shader_type,
+ unsigned start_slot,
+ unsigned num_views,
+ struct pipe_sampler_view **views)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ int i;
+ uint32_t disable_mask = ~((1ull << num_views) - 1);
+ struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
+ uint32_t new_mask = 0;
+ uint32_t remaining_mask;
+
+ remaining_mask = tinfo->enabled_mask & disable_mask;
+
+ while (remaining_mask) {
+ i = u_bit_scan(&remaining_mask);
+ assert(tinfo->views[i]);
+
+ pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
+ }
+
+ for (i = 0; i < num_views; i++) {
+ struct virgl_sampler_view *grview = (struct virgl_sampler_view *)views[i];
+
+ if (views[i] == (struct pipe_sampler_view *)tinfo->views[i])
+ continue;
+
+ if (grview) {
+ new_mask |= 1 << i;
+ pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], views[i]);
+ } else {
+ pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
+ disable_mask |= 1 << i;
+ }
+ }
+
+ tinfo->enabled_mask &= ~disable_mask;
+ tinfo->enabled_mask |= new_mask;
+ virgl_encode_set_sampler_views(vctx, shader_type, start_slot, num_views, tinfo->views);
+ virgl_attach_res_sampler_views(vctx, shader_type);
+}
+
+static void virgl_destroy_sampler_view(struct pipe_context *ctx,
+ struct pipe_sampler_view *view)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_sampler_view *grview = (struct virgl_sampler_view *)view;
+
+ virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
+ pipe_resource_reference(&view->texture, NULL);
+ FREE(view);
+}
+
+static void *virgl_create_sampler_state(struct pipe_context *ctx,
+ const struct pipe_sampler_state *state)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle;
+
+ handle = virgl_object_assign_handle();
+
+ virgl_encode_sampler_state(vctx, handle, state);
+ return (void *)(unsigned long)handle;
+}
+
+static void virgl_delete_sampler_state(struct pipe_context *ctx,
+ void *ss)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handle = (unsigned long)ss;
+
+ virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
+}
+
+static void virgl_bind_sampler_states(struct pipe_context *ctx,
+ unsigned shader, unsigned start_slot,
+ unsigned num_samplers,
+ void **samplers)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ uint32_t handles[32];
+ int i;
+ for (i = 0; i < num_samplers; i++) {
+ handles[i] = (unsigned long)(samplers[i]);
+ }
+ virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
+}
+
+static void virgl_set_polygon_stipple(struct pipe_context *ctx,
+ const struct pipe_poly_stipple *ps)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ virgl_encoder_set_polygon_stipple(vctx, ps);
+}
+
+static void virgl_set_scissor_states(struct pipe_context *ctx,
+ unsigned start_slot,
+ unsigned num_scissor,
+ const struct pipe_scissor_state *ss)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
+}
+
+static void virgl_set_sample_mask(struct pipe_context *ctx,
+ unsigned sample_mask)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ virgl_encoder_set_sample_mask(vctx, sample_mask);
+}
+
+static void virgl_set_clip_state(struct pipe_context *ctx,
+ const struct pipe_clip_state *clip)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ virgl_encoder_set_clip_state(vctx, clip);
+}
+
+static void virgl_resource_copy_region(struct pipe_context *ctx,
+ struct pipe_resource *dst,
+ unsigned dst_level,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *src,
+ unsigned src_level,
+ const struct pipe_box *src_box)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_resource *dres = (struct virgl_resource *)dst;
+ struct virgl_resource *sres = (struct virgl_resource *)src;
+
+ dres->clean = FALSE;
+ virgl_encode_resource_copy_region(vctx, dres,
+ dst_level, dstx, dsty, dstz,
+ sres, src_level,
+ src_box);
+}
+
+static void
+virgl_flush_resource(struct pipe_context *pipe,
+ struct pipe_resource *resource)
+{
+}
+
+static void virgl_blit(struct pipe_context *ctx,
+ const struct pipe_blit_info *blit)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_resource *dres = (struct virgl_resource *)blit->dst.resource;
+ struct virgl_resource *sres = (struct virgl_resource *)blit->src.resource;
+
+ dres->clean = FALSE;
+ virgl_encode_blit(vctx, dres, sres,
+ blit);
+}
+
+static void
+virgl_context_destroy( struct pipe_context *ctx )
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_screen *rs = virgl_screen(ctx->screen);
+
+ vctx->framebuffer.zsbuf = NULL;
+ vctx->framebuffer.nr_cbufs = 0;
+ virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
+ virgl_flush_eq(vctx, vctx);
+
+ rs->vws->cmd_buf_destroy(vctx->cbuf);
+ if (vctx->uploader)
+ u_upload_destroy(vctx->uploader);
+ util_primconvert_destroy(vctx->primconvert);
+
+ util_slab_destroy(&vctx->texture_transfer_pool);
+ FREE(vctx);
+}
+
+struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
+ void *priv,
+ unsigned flags)
+{
+ struct virgl_context *vctx;
+ struct virgl_screen *rs = virgl_screen(pscreen);
+ vctx = CALLOC_STRUCT(virgl_context);
+
+ vctx->cbuf = rs->vws->cmd_buf_create(rs->vws);
+ if (!vctx->cbuf) {
+ FREE(vctx);
+ return NULL;
+ }
+
+ vctx->base.destroy = virgl_context_destroy;
+ vctx->base.create_surface = virgl_create_surface;
+ vctx->base.surface_destroy = virgl_surface_destroy;
+ vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
+ vctx->base.create_blend_state = virgl_create_blend_state;
+ vctx->base.bind_blend_state = virgl_bind_blend_state;
+ vctx->base.delete_blend_state = virgl_delete_blend_state;
+ vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
+ vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
+ vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
+ vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
+ vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
+ vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
+
+ vctx->base.set_viewport_states = virgl_set_viewport_states;
+ vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
+ vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
+ vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
+ vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
+ vctx->base.set_index_buffer = virgl_set_index_buffer;
+ vctx->base.set_constant_buffer = virgl_set_constant_buffer;
+ vctx->base.transfer_inline_write = virgl_transfer_inline_write;
+
+ vctx->base.create_vs_state = virgl_create_vs_state;
+ vctx->base.create_gs_state = virgl_create_gs_state;
+ vctx->base.create_fs_state = virgl_create_fs_state;
+
+ vctx->base.bind_vs_state = virgl_bind_vs_state;
+ vctx->base.bind_gs_state = virgl_bind_gs_state;
+ vctx->base.bind_fs_state = virgl_bind_fs_state;
+
+ vctx->base.delete_vs_state = virgl_delete_vs_state;
+ vctx->base.delete_gs_state = virgl_delete_gs_state;
+ vctx->base.delete_fs_state = virgl_delete_fs_state;
+
+ vctx->base.clear = virgl_clear;
+ vctx->base.draw_vbo = virgl_draw_vbo;
+ vctx->base.flush = virgl_flush_from_st;
+ vctx->base.screen = pscreen;
+ vctx->base.create_sampler_view = virgl_create_sampler_view;
+ vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
+ vctx->base.set_sampler_views = virgl_set_sampler_views;
+
+ vctx->base.create_sampler_state = virgl_create_sampler_state;
+ vctx->base.delete_sampler_state = virgl_delete_sampler_state;
+ vctx->base.bind_sampler_states = virgl_bind_sampler_states;
+
+ vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
+ vctx->base.set_scissor_states = virgl_set_scissor_states;
+ vctx->base.set_sample_mask = virgl_set_sample_mask;
+ vctx->base.set_stencil_ref = virgl_set_stencil_ref;
+ vctx->base.set_clip_state = virgl_set_clip_state;
+
+ vctx->base.set_blend_color = virgl_set_blend_color;
+
+ vctx->base.resource_copy_region = virgl_resource_copy_region;
+ vctx->base.flush_resource = virgl_flush_resource;
+ vctx->base.blit = virgl_blit;
+
+ virgl_init_context_resource_functions(&vctx->base);
+ virgl_init_query_functions(vctx);
+ virgl_init_so_functions(vctx);
+
+ list_inithead(&vctx->to_flush_bufs);
+ util_slab_create(&vctx->texture_transfer_pool, sizeof(struct virgl_transfer),
+ 16, UTIL_SLAB_SINGLETHREADED);
+
+ vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
+ vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024, 256,
+ PIPE_BIND_INDEX_BUFFER);
+ if (!vctx->uploader)
+ goto fail;
+
+ vctx->hw_sub_ctx_id = rs->sub_ctx_id++;
+ virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
+
+ virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
+ return &vctx->base;
+fail:
+ return NULL;
+}
diff --git a/src/gallium/drivers/virgl/virgl_context.h b/src/gallium/drivers/virgl/virgl_context.h
new file mode 100644
index 00000000000..878d7862dbd
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_context.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef VIRGL_CONTEXT_H
+#define VIRGL_CONTEXT_H
+
+#include "pipe/p_state.h"
+#include "pipe/p_context.h"
+#include "virgl_protocol.h"
+
+#include "virgl.h"
+#include "util/u_slab.h"
+#include "util/list.h"
+#include "indices/u_primconvert.h"
+
+struct virgl_resource;
+struct virgl_buffer;
+
+struct virgl_sampler_view {
+ struct pipe_sampler_view base;
+ uint32_t handle;
+};
+
+struct virgl_so_target {
+ struct pipe_stream_output_target base;
+ uint32_t handle;
+};
+
+struct virgl_textures_info {
+ struct virgl_sampler_view *views[16];
+ uint32_t enabled_mask;
+};
+
+struct virgl_context {
+ struct pipe_context base;
+ struct virgl_cmd_buf *cbuf;
+
+ struct virgl_textures_info samplers[PIPE_SHADER_TYPES];
+
+ struct pipe_framebuffer_state framebuffer;
+
+ struct util_slab_mempool texture_transfer_pool;
+
+ struct pipe_index_buffer index_buffer;
+ struct u_upload_mgr *uploader;
+
+ struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
+ unsigned num_vertex_buffers;
+ boolean vertex_array_dirty;
+
+ struct virgl_so_target so_targets[PIPE_MAX_SO_BUFFERS];
+ unsigned num_so_targets;
+
+ struct pipe_resource *ubos[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS];
+ int num_transfers;
+ int num_draws;
+ struct list_head to_flush_bufs;
+
+ struct primconvert_context *primconvert;
+ uint32_t hw_sub_ctx_id;
+};
+
+struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
+ void *priv, unsigned flags);
+
+void virgl_init_blit_functions(struct virgl_context *vctx);
+void virgl_init_query_functions(struct virgl_context *vctx);
+void virgl_init_so_functions(struct virgl_context *vctx);
+
+void virgl_transfer_inline_write(struct pipe_context *ctx,
+ struct pipe_resource *res,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ const void *data,
+ unsigned stride,
+ unsigned layer_stride);
+
+struct tgsi_token *virgl_tgsi_transform(const struct tgsi_token *tokens_in);
+
+#endif
diff --git a/src/gallium/drivers/virgl/virgl_encode.c b/src/gallium/drivers/virgl/virgl_encode.c
new file mode 100644
index 00000000000..b4b49a21494
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_encode.c
@@ -0,0 +1,863 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdint.h>
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+#include "pipe/p_state.h"
+#include "virgl_encode.h"
+#include "virgl_resource.h"
+#include "tgsi/tgsi_dump.h"
+#include "tgsi/tgsi_parse.h"
+
+static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx,
+ uint32_t dword)
+{
+ int len = (dword >> 16);
+
+ if ((ctx->cbuf->cdw + len + 1) > VIRGL_MAX_CMDBUF_DWORDS)
+ ctx->base.flush(&ctx->base, NULL, 0);
+
+ virgl_encoder_write_dword(ctx->cbuf, dword);
+ return 0;
+}
+
+static void virgl_encoder_write_res(struct virgl_context *ctx,
+ struct virgl_resource *res)
+{
+ struct virgl_winsys *vws = virgl_screen(ctx->base.screen)->vws;
+
+ if (res && res->hw_res)
+ vws->emit_res(vws, ctx->cbuf, res->hw_res, TRUE);
+ else {
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ }
+}
+
+int virgl_encode_bind_object(struct virgl_context *ctx,
+ uint32_t handle, uint32_t object)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT, object, 1));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ return 0;
+}
+
+int virgl_encode_delete_object(struct virgl_context *ctx,
+ uint32_t handle, uint32_t object)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT, object, 1));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ return 0;
+}
+
+int virgl_encode_blend_state(struct virgl_context *ctx,
+ uint32_t handle,
+ const struct pipe_blend_state *blend_state)
+{
+ uint32_t tmp;
+ int i;
+
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_BLEND, VIRGL_OBJ_BLEND_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+
+ tmp =
+ VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state->independent_blend_enable) |
+ VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state->logicop_enable) |
+ VIRGL_OBJ_BLEND_S0_DITHER(blend_state->dither) |
+ VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state->alpha_to_coverage) |
+ VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state->alpha_to_one);
+
+ virgl_encoder_write_dword(ctx->cbuf, tmp);
+
+ tmp = VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state->logicop_func);
+ virgl_encoder_write_dword(ctx->cbuf, tmp);
+
+ for (i = 0; i < VIRGL_MAX_COLOR_BUFS; i++) {
+ tmp =
+ VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state->rt[i].blend_enable) |
+ VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state->rt[i].rgb_func) |
+ VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state->rt[i].rgb_src_factor) |
+ VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state->rt[i].rgb_dst_factor)|
+ VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state->rt[i].alpha_func) |
+ VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(blend_state->rt[i].alpha_src_factor) |
+ VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state->rt[i].alpha_dst_factor) |
+ VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state->rt[i].colormask);
+ virgl_encoder_write_dword(ctx->cbuf, tmp);
+ }
+ return 0;
+}
+
+int virgl_encode_dsa_state(struct virgl_context *ctx,
+ uint32_t handle,
+ const struct pipe_depth_stencil_alpha_state *dsa_state)
+{
+ uint32_t tmp;
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_DSA, VIRGL_OBJ_DSA_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+
+ tmp = VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state->depth.enabled) |
+ VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state->depth.writemask) |
+ VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state->depth.func) |
+ VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state->alpha.enabled) |
+ VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state->alpha.func);
+ virgl_encoder_write_dword(ctx->cbuf, tmp);
+
+ for (i = 0; i < 2; i++) {
+ tmp = VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state->stencil[i].enabled) |
+ VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state->stencil[i].func) |
+ VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state->stencil[i].fail_op) |
+ VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state->stencil[i].zpass_op) |
+ VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state->stencil[i].zfail_op) |
+ VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state->stencil[i].valuemask) |
+ VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state->stencil[i].writemask);
+ virgl_encoder_write_dword(ctx->cbuf, tmp);
+ }
+
+ virgl_encoder_write_dword(ctx->cbuf, fui(dsa_state->alpha.ref_value));
+ return 0;
+}
+int virgl_encode_rasterizer_state(struct virgl_context *ctx,
+ uint32_t handle,
+ const struct pipe_rasterizer_state *state)
+{
+ uint32_t tmp;
+
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_RASTERIZER, VIRGL_OBJ_RS_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+
+ tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) |
+ VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip) |
+ VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) |
+ VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) |
+ VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) |
+ VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state->light_twoside) |
+ VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state->sprite_coord_mode) |
+ VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state->point_quad_rasterization) |
+ VIRGL_OBJ_RS_S0_CULL_FACE(state->cull_face) |
+ VIRGL_OBJ_RS_S0_FILL_FRONT(state->fill_front) |
+ VIRGL_OBJ_RS_S0_FILL_BACK(state->fill_back) |
+ VIRGL_OBJ_RS_S0_SCISSOR(state->scissor) |
+ VIRGL_OBJ_RS_S0_FRONT_CCW(state->front_ccw) |
+ VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state->clamp_vertex_color) |
+ VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state->clamp_fragment_color) |
+ VIRGL_OBJ_RS_S0_OFFSET_LINE(state->offset_line) |
+ VIRGL_OBJ_RS_S0_OFFSET_POINT(state->offset_point) |
+ VIRGL_OBJ_RS_S0_OFFSET_TRI(state->offset_tri) |
+ VIRGL_OBJ_RS_S0_POLY_SMOOTH(state->poly_smooth) |
+ VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state->poly_stipple_enable) |
+ VIRGL_OBJ_RS_S0_POINT_SMOOTH(state->point_smooth) |
+ VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state->point_size_per_vertex) |
+ VIRGL_OBJ_RS_S0_MULTISAMPLE(state->multisample) |
+ VIRGL_OBJ_RS_S0_LINE_SMOOTH(state->line_smooth) |
+ VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
+ VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) |
+ VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) |
+ VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule);
+
+ virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */
+ virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */
+ virgl_encoder_write_dword(ctx->cbuf, state->sprite_coord_enable); /* S2 */
+ tmp = VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state->line_stipple_pattern) |
+ VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state->line_stipple_factor) |
+ VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state->clip_plane_enable);
+ virgl_encoder_write_dword(ctx->cbuf, tmp); /* S3 */
+ virgl_encoder_write_dword(ctx->cbuf, fui(state->line_width)); /* S4 */
+ virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_units)); /* S5 */
+ virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_scale)); /* S6 */
+ virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_clamp)); /* S7 */
+ return 0;
+}
+
+static void virgl_emit_shader_header(struct virgl_context *ctx,
+ uint32_t handle, uint32_t len,
+ uint32_t type, uint32_t offlen,
+ uint32_t num_tokens)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SHADER, len));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ virgl_encoder_write_dword(ctx->cbuf, type);
+ virgl_encoder_write_dword(ctx->cbuf, offlen);
+ virgl_encoder_write_dword(ctx->cbuf, num_tokens);
+}
+
+static void virgl_emit_shader_streamout(struct virgl_context *ctx,
+ const struct pipe_stream_output_info *so_info)
+{
+ int num_outputs = 0;
+ int i;
+ uint32_t tmp;
+
+ if (so_info)
+ num_outputs = so_info->num_outputs;
+
+ virgl_encoder_write_dword(ctx->cbuf, num_outputs);
+ if (num_outputs) {
+ for (i = 0; i < 4; i++)
+ virgl_encoder_write_dword(ctx->cbuf, so_info->stride[i]);
+
+ for (i = 0; i < so_info->num_outputs; i++) {
+ tmp =
+ VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info->output[i].register_index) |
+ VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info->output[i].start_component) |
+ VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info->output[i].num_components) |
+ VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
+ VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
+ virgl_encoder_write_dword(ctx->cbuf, tmp);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ }
+ }
+}
+
+int virgl_encode_shader_state(struct virgl_context *ctx,
+ uint32_t handle,
+ uint32_t type,
+ const struct pipe_stream_output_info *so_info,
+ const struct tgsi_token *tokens)
+{
+ char *str, *sptr;
+ uint32_t shader_len, len;
+ bool bret;
+ int num_tokens = tgsi_num_tokens(tokens);
+ int str_total_size = 65536;
+ int retry_size = 1;
+ uint32_t left_bytes, base_hdr_size, strm_hdr_size, thispass;
+ bool first_pass;
+ str = CALLOC(1, str_total_size);
+ if (!str)
+ return -1;
+
+ do {
+ int old_size;
+
+ bret = tgsi_dump_str(tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
+ if (bret == false) {
+ fprintf(stderr, "Failed to translate shader in available space - trying again\n");
+ old_size = str_total_size;
+ str_total_size = 65536 * ++retry_size;
+ str = REALLOC(str, old_size, str_total_size);
+ if (!str)
+ return -1;
+ }
+ } while (bret == false && retry_size < 10);
+
+ if (bret == false)
+ return -1;
+
+ shader_len = strlen(str) + 1;
+
+ left_bytes = shader_len;
+
+ base_hdr_size = 5;
+ strm_hdr_size = so_info->num_outputs ? so_info->num_outputs * 2 + 4 : 0;
+ first_pass = true;
+ sptr = str;
+ while (left_bytes) {
+ uint32_t length, offlen;
+ int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
+ if (ctx->cbuf->cdw + hdr_len + 1 > VIRGL_MAX_CMDBUF_DWORDS)
+ ctx->base.flush(&ctx->base, NULL, 0);
+
+ thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
+
+ length = MIN2(thispass, left_bytes);
+ len = ((length + 3) / 4) + hdr_len;
+
+ if (first_pass)
+ offlen = VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len);
+ else
+ offlen = VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr - (uintptr_t)str) | VIRGL_OBJ_SHADER_OFFSET_CONT;
+
+ virgl_emit_shader_header(ctx, handle, len, type, offlen, num_tokens);
+
+ virgl_emit_shader_streamout(ctx, first_pass ? so_info : NULL);
+
+ virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
+
+ sptr += length;
+ first_pass = false;
+ left_bytes -= length;
+ }
+
+ FREE(str);
+ return 0;
+}
+
+
+int virgl_encode_clear(struct virgl_context *ctx,
+ unsigned buffers,
+ const union pipe_color_union *color,
+ double depth, unsigned stencil)
+{
+ int i;
+
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR, 0, VIRGL_OBJ_CLEAR_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, buffers);
+ for (i = 0; i < 4; i++)
+ virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
+ virgl_encoder_write_qword(ctx->cbuf, *(uint64_t *)&depth);
+ virgl_encoder_write_dword(ctx->cbuf, stencil);
+ return 0;
+}
+
+int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
+ const struct pipe_framebuffer_state *state)
+{
+ struct virgl_surface *zsurf = (struct virgl_surface *)state->zsbuf;
+ int i;
+
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state->nr_cbufs)));
+ virgl_encoder_write_dword(ctx->cbuf, state->nr_cbufs);
+ virgl_encoder_write_dword(ctx->cbuf, zsurf ? zsurf->handle : 0);
+ for (i = 0; i < state->nr_cbufs; i++) {
+ struct virgl_surface *surf = (struct virgl_surface *)state->cbufs[i];
+ virgl_encoder_write_dword(ctx->cbuf, surf ? surf->handle : 0);
+ }
+
+ return 0;
+}
+
+int virgl_encoder_set_viewport_states(struct virgl_context *ctx,
+ int start_slot,
+ int num_viewports,
+ const struct pipe_viewport_state *states)
+{
+ int i,v;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports)));
+ virgl_encoder_write_dword(ctx->cbuf, start_slot);
+ for (v = 0; v < num_viewports; v++) {
+ for (i = 0; i < 3; i++)
+ virgl_encoder_write_dword(ctx->cbuf, fui(states[v].scale[i]));
+ for (i = 0; i < 3; i++)
+ virgl_encoder_write_dword(ctx->cbuf, fui(states[v].translate[i]));
+ }
+ return 0;
+}
+
+int virgl_encoder_create_vertex_elements(struct virgl_context *ctx,
+ uint32_t handle,
+ unsigned num_elements,
+ const struct pipe_vertex_element *element)
+{
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_VERTEX_ELEMENTS, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements)));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ for (i = 0; i < num_elements; i++) {
+ virgl_encoder_write_dword(ctx->cbuf, element[i].src_offset);
+ virgl_encoder_write_dword(ctx->cbuf, element[i].instance_divisor);
+ virgl_encoder_write_dword(ctx->cbuf, element[i].vertex_buffer_index);
+ virgl_encoder_write_dword(ctx->cbuf, element[i].src_format);
+ }
+ return 0;
+}
+
+int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
+ unsigned num_buffers,
+ const struct pipe_vertex_buffer *buffers)
+{
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
+ for (i = 0; i < num_buffers; i++) {
+ struct virgl_resource *res = (struct virgl_resource *)buffers[i].buffer;
+ virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
+ virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
+ virgl_encoder_write_res(ctx, res);
+ }
+ return 0;
+}
+
+int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
+ const struct pipe_index_buffer *ib)
+{
+ int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
+ struct virgl_resource *res = NULL;
+ if (ib)
+ res = (struct virgl_resource *)ib->buffer;
+
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
+ virgl_encoder_write_res(ctx, res);
+ if (ib) {
+ virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
+ virgl_encoder_write_dword(ctx->cbuf, ib->offset);
+ }
+ return 0;
+}
+
+int virgl_encoder_draw_vbo(struct virgl_context *ctx,
+ const struct pipe_draw_info *info)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, VIRGL_DRAW_VBO_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, info->start);
+ virgl_encoder_write_dword(ctx->cbuf, info->count);
+ virgl_encoder_write_dword(ctx->cbuf, info->mode);
+ virgl_encoder_write_dword(ctx->cbuf, info->indexed);
+ virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
+ virgl_encoder_write_dword(ctx->cbuf, info->index_bias);
+ virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
+ virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart);
+ virgl_encoder_write_dword(ctx->cbuf, info->restart_index);
+ virgl_encoder_write_dword(ctx->cbuf, info->min_index);
+ virgl_encoder_write_dword(ctx->cbuf, info->max_index);
+ if (info->count_from_stream_output)
+ virgl_encoder_write_dword(ctx->cbuf, info->count_from_stream_output->buffer_size);
+ else
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ return 0;
+}
+
+int virgl_encoder_create_surface(struct virgl_context *ctx,
+ uint32_t handle,
+ struct virgl_resource *res,
+ const struct pipe_surface *templat)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SURFACE, VIRGL_OBJ_SURFACE_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ virgl_encoder_write_res(ctx, res);
+ virgl_encoder_write_dword(ctx->cbuf, templat->format);
+ if (templat->texture->target == PIPE_BUFFER) {
+ virgl_encoder_write_dword(ctx->cbuf, templat->u.buf.first_element);
+ virgl_encoder_write_dword(ctx->cbuf, templat->u.buf.last_element);
+
+ } else {
+ virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
+ virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
+ }
+ return 0;
+}
+
+int virgl_encoder_create_so_target(struct virgl_context *ctx,
+ uint32_t handle,
+ struct virgl_resource *res,
+ unsigned buffer_offset,
+ unsigned buffer_size)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_STREAMOUT_TARGET, VIRGL_OBJ_STREAMOUT_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ virgl_encoder_write_res(ctx, res);
+ virgl_encoder_write_dword(ctx->cbuf, buffer_offset);
+ virgl_encoder_write_dword(ctx->cbuf, buffer_size);
+ return 0;
+}
+
+static void virgl_encoder_iw_emit_header_1d(struct virgl_context *ctx,
+ struct virgl_resource *res,
+ unsigned level, unsigned usage,
+ const struct pipe_box *box,
+ unsigned stride, unsigned layer_stride)
+{
+ virgl_encoder_write_res(ctx, res);
+ virgl_encoder_write_dword(ctx->cbuf, level);
+ virgl_encoder_write_dword(ctx->cbuf, usage);
+ virgl_encoder_write_dword(ctx->cbuf, stride);
+ virgl_encoder_write_dword(ctx->cbuf, layer_stride);
+ virgl_encoder_write_dword(ctx->cbuf, box->x);
+ virgl_encoder_write_dword(ctx->cbuf, box->y);
+ virgl_encoder_write_dword(ctx->cbuf, box->z);
+ virgl_encoder_write_dword(ctx->cbuf, box->width);
+ virgl_encoder_write_dword(ctx->cbuf, box->height);
+ virgl_encoder_write_dword(ctx->cbuf, box->depth);
+}
+
+int virgl_encoder_inline_write(struct virgl_context *ctx,
+ struct virgl_resource *res,
+ unsigned level, unsigned usage,
+ const struct pipe_box *box,
+ const void *data, unsigned stride,
+ unsigned layer_stride)
+{
+ uint32_t size = (stride ? stride : box->width) * box->height;
+ uint32_t length, thispass, left_bytes;
+ struct pipe_box mybox = *box;
+
+ length = 11 + (size + 3) / 4;
+ if ((ctx->cbuf->cdw + length + 1) > VIRGL_MAX_CMDBUF_DWORDS) {
+ if (box->height > 1 || box->depth > 1) {
+ debug_printf("inline transfer failed due to multi dimensions and too large\n");
+ assert(0);
+ }
+ }
+
+ left_bytes = size;
+ while (left_bytes) {
+ if (ctx->cbuf->cdw + 12 > VIRGL_MAX_CMDBUF_DWORDS)
+ ctx->base.flush(&ctx->base, NULL, 0);
+
+ thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - 12) * 4;
+
+ length = MIN2(thispass, left_bytes);
+
+ mybox.width = length;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE, 0, ((length + 3) / 4) + 11));
+ virgl_encoder_iw_emit_header_1d(ctx, res, level, usage, &mybox, stride, layer_stride);
+ virgl_encoder_write_block(ctx->cbuf, data, length);
+ left_bytes -= length;
+ mybox.x += length;
+ data += length;
+ }
+ return 0;
+}
+
+int virgl_encoder_flush_frontbuffer(struct virgl_context *ctx,
+ struct virgl_resource *res)
+{
+// virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
+// virgl_encoder_write_dword(ctx->cbuf, res_handle);
+ return 0;
+}
+
+int virgl_encode_sampler_state(struct virgl_context *ctx,
+ uint32_t handle,
+ const struct pipe_sampler_state *state)
+{
+ uint32_t tmp;
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJ_SAMPLER_STATE_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+
+ tmp = VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state->wrap_s) |
+ VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state->wrap_t) |
+ VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state->wrap_r) |
+ VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state->min_img_filter) |
+ VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) |
+ VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) |
+ VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) |
+ VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func);
+
+ virgl_encoder_write_dword(ctx->cbuf, tmp);
+ virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias));
+ virgl_encoder_write_dword(ctx->cbuf, fui(state->min_lod));
+ virgl_encoder_write_dword(ctx->cbuf, fui(state->max_lod));
+ for (i = 0; i < 4; i++)
+ virgl_encoder_write_dword(ctx->cbuf, state->border_color.ui[i]);
+ return 0;
+}
+
+
+int virgl_encode_sampler_view(struct virgl_context *ctx,
+ uint32_t handle,
+ struct virgl_resource *res,
+ const struct pipe_sampler_view *state)
+{
+ uint32_t tmp;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJ_SAMPLER_VIEW_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ virgl_encoder_write_res(ctx, res);
+ virgl_encoder_write_dword(ctx->cbuf, state->format);
+ if (res->u.b.target == PIPE_BUFFER) {
+ virgl_encoder_write_dword(ctx->cbuf, state->u.buf.first_element);
+ virgl_encoder_write_dword(ctx->cbuf, state->u.buf.last_element);
+ } else {
+ virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
+ virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
+ }
+ tmp = VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state->swizzle_r) |
+ VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state->swizzle_g) |
+ VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state->swizzle_b) |
+ VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state->swizzle_a);
+ virgl_encoder_write_dword(ctx->cbuf, tmp);
+ return 0;
+}
+
+int virgl_encode_set_sampler_views(struct virgl_context *ctx,
+ uint32_t shader_type,
+ uint32_t start_slot,
+ uint32_t num_views,
+ struct virgl_sampler_view **views)
+{
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views)));
+ virgl_encoder_write_dword(ctx->cbuf, shader_type);
+ virgl_encoder_write_dword(ctx->cbuf, start_slot);
+ for (i = 0; i < num_views; i++) {
+ uint32_t handle = views[i] ? views[i]->handle : 0;
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ }
+ return 0;
+}
+
+int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
+ uint32_t shader_type,
+ uint32_t start_slot,
+ uint32_t num_handles,
+ uint32_t *handles)
+{
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES, 0, VIRGL_BIND_SAMPLER_STATES(num_handles)));
+ virgl_encoder_write_dword(ctx->cbuf, shader_type);
+ virgl_encoder_write_dword(ctx->cbuf, start_slot);
+ for (i = 0; i < num_handles; i++)
+ virgl_encoder_write_dword(ctx->cbuf, handles[i]);
+ return 0;
+}
+
+int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
+ uint32_t shader,
+ uint32_t index,
+ uint32_t size,
+ const void *data)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER, 0, size + 2));
+ virgl_encoder_write_dword(ctx->cbuf, shader);
+ virgl_encoder_write_dword(ctx->cbuf, index);
+ if (data)
+ virgl_encoder_write_block(ctx->cbuf, data, size * 4);
+ return 0;
+}
+
+int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
+ uint32_t shader,
+ uint32_t index,
+ uint32_t offset,
+ uint32_t length,
+ struct virgl_resource *res)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, shader);
+ virgl_encoder_write_dword(ctx->cbuf, index);
+ virgl_encoder_write_dword(ctx->cbuf, offset);
+ virgl_encoder_write_dword(ctx->cbuf, length);
+ virgl_encoder_write_res(ctx, res);
+ return 0;
+}
+
+
+int virgl_encoder_set_stencil_ref(struct virgl_context *ctx,
+ const struct pipe_stencil_ref *ref)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF, 0, VIRGL_SET_STENCIL_REF_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, VIRGL_STENCIL_REF_VAL(ref->ref_value[0] , (ref->ref_value[1])));
+ return 0;
+}
+
+int virgl_encoder_set_blend_color(struct virgl_context *ctx,
+ const struct pipe_blend_color *color)
+{
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR, 0, VIRGL_SET_BLEND_COLOR_SIZE));
+ for (i = 0; i < 4; i++)
+ virgl_encoder_write_dword(ctx->cbuf, fui(color->color[i]));
+ return 0;
+}
+
+int virgl_encoder_set_scissor_state(struct virgl_context *ctx,
+ unsigned start_slot,
+ int num_scissors,
+ const struct pipe_scissor_state *ss)
+{
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors)));
+ virgl_encoder_write_dword(ctx->cbuf, start_slot);
+ for (i = 0; i < num_scissors; i++) {
+ virgl_encoder_write_dword(ctx->cbuf, (ss[i].minx | ss[i].miny << 16));
+ virgl_encoder_write_dword(ctx->cbuf, (ss[i].maxx | ss[i].maxy << 16));
+ }
+ return 0;
+}
+
+void virgl_encoder_set_polygon_stipple(struct virgl_context *ctx,
+ const struct pipe_poly_stipple *ps)
+{
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE, 0, VIRGL_POLYGON_STIPPLE_SIZE));
+ for (i = 0; i < VIRGL_POLYGON_STIPPLE_SIZE; i++) {
+ virgl_encoder_write_dword(ctx->cbuf, ps->stipple[i]);
+ }
+}
+
+void virgl_encoder_set_sample_mask(struct virgl_context *ctx,
+ unsigned sample_mask)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK, 0, VIRGL_SET_SAMPLE_MASK_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, sample_mask);
+}
+
+void virgl_encoder_set_clip_state(struct virgl_context *ctx,
+ const struct pipe_clip_state *clip)
+{
+ int i, j;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE, 0, VIRGL_SET_CLIP_STATE_SIZE));
+ for (i = 0; i < VIRGL_MAX_CLIP_PLANES; i++) {
+ for (j = 0; j < 4; j++) {
+ virgl_encoder_write_dword(ctx->cbuf, fui(clip->ucp[i][j]));
+ }
+ }
+}
+
+int virgl_encode_resource_copy_region(struct virgl_context *ctx,
+ struct virgl_resource *dst_res,
+ unsigned dst_level,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct virgl_resource *src_res,
+ unsigned src_level,
+ const struct pipe_box *src_box)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE));
+ virgl_encoder_write_res(ctx, dst_res);
+ virgl_encoder_write_dword(ctx->cbuf, dst_level);
+ virgl_encoder_write_dword(ctx->cbuf, dstx);
+ virgl_encoder_write_dword(ctx->cbuf, dsty);
+ virgl_encoder_write_dword(ctx->cbuf, dstz);
+ virgl_encoder_write_res(ctx, src_res);
+ virgl_encoder_write_dword(ctx->cbuf, src_level);
+ virgl_encoder_write_dword(ctx->cbuf, src_box->x);
+ virgl_encoder_write_dword(ctx->cbuf, src_box->y);
+ virgl_encoder_write_dword(ctx->cbuf, src_box->z);
+ virgl_encoder_write_dword(ctx->cbuf, src_box->width);
+ virgl_encoder_write_dword(ctx->cbuf, src_box->height);
+ virgl_encoder_write_dword(ctx->cbuf, src_box->depth);
+ return 0;
+}
+
+int virgl_encode_blit(struct virgl_context *ctx,
+ struct virgl_resource *dst_res,
+ struct virgl_resource *src_res,
+ const struct pipe_blit_info *blit)
+{
+ uint32_t tmp;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BLIT, 0, VIRGL_CMD_BLIT_SIZE));
+ tmp = VIRGL_CMD_BLIT_S0_MASK(blit->mask) |
+ VIRGL_CMD_BLIT_S0_FILTER(blit->filter) |
+ VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable);
+ virgl_encoder_write_dword(ctx->cbuf, tmp);
+ virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.minx | blit->scissor.miny << 16));
+ virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.maxx | blit->scissor.maxy << 16));
+
+ virgl_encoder_write_res(ctx, dst_res);
+ virgl_encoder_write_dword(ctx->cbuf, blit->dst.level);
+ virgl_encoder_write_dword(ctx->cbuf, blit->dst.format);
+ virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.x);
+ virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.y);
+ virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.z);
+ virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.width);
+ virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.height);
+ virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.depth);
+
+ virgl_encoder_write_res(ctx, src_res);
+ virgl_encoder_write_dword(ctx->cbuf, blit->src.level);
+ virgl_encoder_write_dword(ctx->cbuf, blit->src.format);
+ virgl_encoder_write_dword(ctx->cbuf, blit->src.box.x);
+ virgl_encoder_write_dword(ctx->cbuf, blit->src.box.y);
+ virgl_encoder_write_dword(ctx->cbuf, blit->src.box.z);
+ virgl_encoder_write_dword(ctx->cbuf, blit->src.box.width);
+ virgl_encoder_write_dword(ctx->cbuf, blit->src.box.height);
+ virgl_encoder_write_dword(ctx->cbuf, blit->src.box.depth);
+ return 0;
+}
+
+int virgl_encoder_create_query(struct virgl_context *ctx,
+ uint32_t handle,
+ uint query_type,
+ uint query_index,
+ struct virgl_resource *res,
+ uint32_t offset)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_QUERY, VIRGL_OBJ_QUERY_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ virgl_encoder_write_dword(ctx->cbuf, ((query_type & 0xffff) | (query_index << 16)));
+ virgl_encoder_write_dword(ctx->cbuf, offset);
+ virgl_encoder_write_res(ctx, res);
+ return 0;
+}
+
+int virgl_encoder_begin_query(struct virgl_context *ctx,
+ uint32_t handle)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY, 0, 1));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ return 0;
+}
+
+int virgl_encoder_end_query(struct virgl_context *ctx,
+ uint32_t handle)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_QUERY, 0, 1));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ return 0;
+}
+
+int virgl_encoder_get_query_result(struct virgl_context *ctx,
+ uint32_t handle, boolean wait)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT, 0, 2));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
+ return 0;
+}
+
+int virgl_encoder_render_condition(struct virgl_context *ctx,
+ uint32_t handle, boolean condition,
+ uint mode)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION, 0, VIRGL_RENDER_CONDITION_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ virgl_encoder_write_dword(ctx->cbuf, condition);
+ virgl_encoder_write_dword(ctx->cbuf, mode);
+ return 0;
+}
+
+int virgl_encoder_set_so_targets(struct virgl_context *ctx,
+ unsigned num_targets,
+ struct pipe_stream_output_target **targets,
+ unsigned append_bitmask)
+{
+ int i;
+
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS, 0, num_targets + 1));
+ virgl_encoder_write_dword(ctx->cbuf, append_bitmask);
+ for (i = 0; i < num_targets; i++) {
+ struct virgl_so_target *tg = (struct virgl_so_target *)targets[i];
+ virgl_encoder_write_dword(ctx->cbuf, tg->handle);
+ }
+ return 0;
+}
+
+
+int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX, 0, 1));
+ virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
+ return 0;
+}
+
+int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX, 0, 1));
+ virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
+ return 0;
+}
+
+int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX, 0, 1));
+ virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
+ return 0;
+}
+
+int virgl_encode_bind_shader(struct virgl_context *ctx,
+ uint32_t handle, uint32_t type)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER, 0, 2));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ virgl_encoder_write_dword(ctx->cbuf, type);
+ return 0;
+}
diff --git a/src/gallium/drivers/virgl/virgl_encode.h b/src/gallium/drivers/virgl/virgl_encode.h
new file mode 100644
index 00000000000..eabc421f861
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_encode.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef VIRGL_ENCODE_H
+#define VIRGL_ENCODE_H
+
+#include "virgl_context.h"
+struct virgl_surface {
+ struct pipe_surface base;
+ uint32_t handle;
+};
+
+static inline void virgl_encoder_write_dword(struct virgl_cmd_buf *state,
+ uint32_t dword)
+{
+ state->buf[state->cdw++] = dword;
+}
+
+static inline void virgl_encoder_write_qword(struct virgl_cmd_buf *state,
+ uint64_t qword)
+{
+ memcpy(state->buf + state->cdw, &qword, sizeof(uint64_t));
+ state->cdw += 2;
+}
+
+static inline void virgl_encoder_write_block(struct virgl_cmd_buf *state,
+ const uint8_t *ptr, uint32_t len)
+{
+ int x;
+ memcpy(state->buf + state->cdw, ptr, len);
+ x = (len % 4);
+// fprintf(stderr, "[%d] block %d x is %d\n", state->cdw, len, x);
+ if (x) {
+ uint8_t *mp = (uint8_t *)(state->buf + state->cdw);
+ mp += len;
+ memset(mp, 0, x);
+ }
+ state->cdw += (len + 3) / 4;
+}
+
+extern int virgl_encode_blend_state(struct virgl_context *ctx,
+ uint32_t handle,
+ const struct pipe_blend_state *blend_state);
+extern int virgl_encode_rasterizer_state(struct virgl_context *ctx,
+ uint32_t handle,
+ const struct pipe_rasterizer_state *state);
+
+extern int virgl_encode_shader_state(struct virgl_context *ctx,
+ uint32_t handle,
+ uint32_t type,
+ const struct pipe_stream_output_info *so_info,
+ const struct tgsi_token *tokens);
+
+int virgl_encode_stream_output_info(struct virgl_context *ctx,
+ uint32_t handle,
+ uint32_t type,
+ const struct pipe_shader_state *shader);
+
+int virgl_encoder_set_so_targets(struct virgl_context *ctx,
+ unsigned num_targets,
+ struct pipe_stream_output_target **targets,
+ unsigned append_bitmask);
+
+int virgl_encoder_create_so_target(struct virgl_context *ctx,
+ uint32_t handle,
+ struct virgl_resource *res,
+ unsigned buffer_offset,
+ unsigned buffer_size);
+
+int virgl_encode_clear(struct virgl_context *ctx,
+ unsigned buffers,
+ const union pipe_color_union *color,
+ double depth, unsigned stencil);
+
+int virgl_encode_bind_object(struct virgl_context *ctx,
+ uint32_t handle, uint32_t object);
+int virgl_encode_delete_object(struct virgl_context *ctx,
+ uint32_t handle, uint32_t object);
+
+int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
+ const struct pipe_framebuffer_state *state);
+int virgl_encoder_set_viewport_states(struct virgl_context *ctx,
+ int start_slot,
+ int num_viewports,
+ const struct pipe_viewport_state *states);
+
+int virgl_encoder_draw_vbo(struct virgl_context *ctx,
+ const struct pipe_draw_info *info);
+
+
+int virgl_encoder_create_surface(struct virgl_context *ctx,
+ uint32_t handle,
+ struct virgl_resource *res,
+ const struct pipe_surface *templat);
+
+int virgl_encoder_flush_frontbuffer(struct virgl_context *ctx,
+ struct virgl_resource *res);
+
+int virgl_encoder_create_vertex_elements(struct virgl_context *ctx,
+ uint32_t handle,
+ unsigned num_elements,
+ const struct pipe_vertex_element *element);
+
+int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
+ unsigned num_buffers,
+ const struct pipe_vertex_buffer *buffers);
+
+
+int virgl_encoder_inline_write(struct virgl_context *ctx,
+ struct virgl_resource *res,
+ unsigned level, unsigned usage,
+ const struct pipe_box *box,
+ const void *data, unsigned stride,
+ unsigned layer_stride);
+int virgl_encode_sampler_state(struct virgl_context *ctx,
+ uint32_t handle,
+ const struct pipe_sampler_state *state);
+int virgl_encode_sampler_view(struct virgl_context *ctx,
+ uint32_t handle,
+ struct virgl_resource *res,
+ const struct pipe_sampler_view *state);
+
+int virgl_encode_set_sampler_views(struct virgl_context *ctx,
+ uint32_t shader_type,
+ uint32_t start_slot,
+ uint32_t num_views,
+ struct virgl_sampler_view **views);
+
+int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
+ uint32_t shader_type,
+ uint32_t start_slot,
+ uint32_t num_handles,
+ uint32_t *handles);
+
+int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
+ const struct pipe_index_buffer *ib);
+
+uint32_t virgl_object_assign_handle(void);
+
+int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
+ uint32_t shader,
+ uint32_t index,
+ uint32_t size,
+ const void *data);
+
+int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
+ uint32_t shader,
+ uint32_t index,
+ uint32_t offset,
+ uint32_t length,
+ struct virgl_resource *res);
+int virgl_encode_dsa_state(struct virgl_context *ctx,
+ uint32_t handle,
+ const struct pipe_depth_stencil_alpha_state *dsa_state);
+
+int virgl_encoder_set_stencil_ref(struct virgl_context *ctx,
+ const struct pipe_stencil_ref *ref);
+
+int virgl_encoder_set_blend_color(struct virgl_context *ctx,
+ const struct pipe_blend_color *color);
+
+int virgl_encoder_set_scissor_state(struct virgl_context *ctx,
+ unsigned start_slot,
+ int num_scissors,
+ const struct pipe_scissor_state *ss);
+
+void virgl_encoder_set_polygon_stipple(struct virgl_context *ctx,
+ const struct pipe_poly_stipple *ps);
+
+void virgl_encoder_set_sample_mask(struct virgl_context *ctx,
+ unsigned sample_mask);
+
+void virgl_encoder_set_clip_state(struct virgl_context *ctx,
+ const struct pipe_clip_state *clip);
+
+int virgl_encode_resource_copy_region(struct virgl_context *ctx,
+ struct virgl_resource *dst_res,
+ unsigned dst_level,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct virgl_resource *src_res,
+ unsigned src_level,
+ const struct pipe_box *src_box);
+
+int virgl_encode_blit(struct virgl_context *ctx,
+ struct virgl_resource *dst_res,
+ struct virgl_resource *src_res,
+ const struct pipe_blit_info *blit);
+
+int virgl_encoder_create_query(struct virgl_context *ctx,
+ uint32_t handle,
+ uint query_type,
+ uint query_index,
+ struct virgl_resource *res,
+ uint32_t offset);
+
+int virgl_encoder_begin_query(struct virgl_context *ctx,
+ uint32_t handle);
+int virgl_encoder_end_query(struct virgl_context *ctx,
+ uint32_t handle);
+int virgl_encoder_get_query_result(struct virgl_context *ctx,
+ uint32_t handle, boolean wait);
+
+int virgl_encoder_render_condition(struct virgl_context *ctx,
+ uint32_t handle, boolean condition,
+ uint mode);
+
+int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id);
+int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id);
+int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id);
+
+int virgl_encode_bind_shader(struct virgl_context *ctx,
+ uint32_t handle, uint32_t type);
+#endif
diff --git a/src/gallium/drivers/virgl/virgl_protocol.h b/src/gallium/drivers/virgl/virgl_protocol.h
new file mode 100644
index 00000000000..ca3142f5f72
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_protocol.h
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef VIRGL_PROTOCOL_H
+#define VIRGL_PROTOCOL_H
+
+#define VIRGL_QUERY_STATE_NEW 0
+#define VIRGL_QUERY_STATE_DONE 1
+#define VIRGL_QUERY_STATE_WAIT_HOST 2
+
+struct virgl_host_query_state {
+ uint32_t query_state;
+ uint32_t result_size;
+ uint64_t result;
+};
+
+enum virgl_object_type {
+ VIRGL_OBJECT_NULL,
+ VIRGL_OBJECT_BLEND,
+ VIRGL_OBJECT_RASTERIZER,
+ VIRGL_OBJECT_DSA,
+ VIRGL_OBJECT_SHADER,
+ VIRGL_OBJECT_VERTEX_ELEMENTS,
+ VIRGL_OBJECT_SAMPLER_VIEW,
+ VIRGL_OBJECT_SAMPLER_STATE,
+ VIRGL_OBJECT_SURFACE,
+ VIRGL_OBJECT_QUERY,
+ VIRGL_OBJECT_STREAMOUT_TARGET,
+ VIRGL_MAX_OBJECTS,
+};
+
+/* context cmds to be encoded in the command stream */
+enum virgl_context_cmd {
+ VIRGL_CCMD_NOP = 0,
+ VIRGL_CCMD_CREATE_OBJECT = 1,
+ VIRGL_CCMD_BIND_OBJECT,
+ VIRGL_CCMD_DESTROY_OBJECT,
+ VIRGL_CCMD_SET_VIEWPORT_STATE,
+ VIRGL_CCMD_SET_FRAMEBUFFER_STATE,
+ VIRGL_CCMD_SET_VERTEX_BUFFERS,
+ VIRGL_CCMD_CLEAR,
+ VIRGL_CCMD_DRAW_VBO,
+ VIRGL_CCMD_RESOURCE_INLINE_WRITE,
+ VIRGL_CCMD_SET_SAMPLER_VIEWS,
+ VIRGL_CCMD_SET_INDEX_BUFFER,
+ VIRGL_CCMD_SET_CONSTANT_BUFFER,
+ VIRGL_CCMD_SET_STENCIL_REF,
+ VIRGL_CCMD_SET_BLEND_COLOR,
+ VIRGL_CCMD_SET_SCISSOR_STATE,
+ VIRGL_CCMD_BLIT,
+ VIRGL_CCMD_RESOURCE_COPY_REGION,
+ VIRGL_CCMD_BIND_SAMPLER_STATES,
+ VIRGL_CCMD_BEGIN_QUERY,
+ VIRGL_CCMD_END_QUERY,
+ VIRGL_CCMD_GET_QUERY_RESULT,
+ VIRGL_CCMD_SET_POLYGON_STIPPLE,
+ VIRGL_CCMD_SET_CLIP_STATE,
+ VIRGL_CCMD_SET_SAMPLE_MASK,
+ VIRGL_CCMD_SET_STREAMOUT_TARGETS,
+ VIRGL_CCMD_SET_RENDER_CONDITION,
+ VIRGL_CCMD_SET_UNIFORM_BUFFER,
+
+ VIRGL_CCMD_SET_SUB_CTX,
+ VIRGL_CCMD_CREATE_SUB_CTX,
+ VIRGL_CCMD_DESTROY_SUB_CTX,
+ VIRGL_CCMD_BIND_SHADER,
+};
+
+/*
+ 8-bit cmd headers
+ 8-bit object type
+ 16-bit length
+*/
+
+#define VIRGL_CMD0(cmd, obj, len) ((cmd) | ((obj) << 8) | ((len) << 16))
+
+/* hw specification */
+#define VIRGL_MAX_COLOR_BUFS 8
+#define VIRGL_MAX_CLIP_PLANES 8
+
+#define VIRGL_OBJ_CREATE_HEADER 0
+#define VIRGL_OBJ_CREATE_HANDLE 1
+
+#define VIRGL_OBJ_BIND_HEADER 0
+#define VIRGL_OBJ_BIND_HANDLE 1
+
+#define VIRGL_OBJ_DESTROY_HANDLE 1
+
+/* some of these defines are a specification - not used in the code */
+/* bit offsets for blend state object */
+#define VIRGL_OBJ_BLEND_SIZE (VIRGL_MAX_COLOR_BUFS + 3)
+#define VIRGL_OBJ_BLEND_HANDLE 1
+#define VIRGL_OBJ_BLEND_S0 2
+#define VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(x) ((x) & 0x1 << 0)
+#define VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(x) (((x) & 0x1) << 1)
+#define VIRGL_OBJ_BLEND_S0_DITHER(x) (((x) & 0x1) << 2)
+#define VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(x) (((x) & 0x1) << 3)
+#define VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(x) (((x) & 0x1) << 4)
+#define VIRGL_OBJ_BLEND_S1 3
+#define VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(x) (((x) & 0xf) << 0)
+/* repeated once per number of cbufs */
+
+#define VIRGL_OBJ_BLEND_S2(cbuf) (4 + (cbuf))
+#define VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(x) (((x) & 0x1) << 0)
+#define VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(x) (((x) & 0x7) << 1)
+#define VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(x) (((x) & 0x1f) << 4)
+#define VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(x) (((x) & 0x1f) << 9)
+#define VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(x) (((x) & 0x7) << 14)
+#define VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(x) (((x) & 0x1f) << 17)
+#define VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(x) (((x) & 0x1f) << 22)
+#define VIRGL_OBJ_BLEND_S2_RT_COLORMASK(x) (((x) & 0xf) << 27)
+
+/* bit offsets for DSA state */
+#define VIRGL_OBJ_DSA_SIZE 5
+#define VIRGL_OBJ_DSA_HANDLE 1
+#define VIRGL_OBJ_DSA_S0 2
+#define VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(x) (((x) & 0x1) << 0)
+#define VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(x) (((x) & 0x1) << 1)
+#define VIRGL_OBJ_DSA_S0_DEPTH_FUNC(x) (((x) & 0x7) << 2)
+#define VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(x) (((x) & 0x1) << 8)
+#define VIRGL_OBJ_DSA_S0_ALPHA_FUNC(x) (((x) & 0x7) << 9)
+#define VIRGL_OBJ_DSA_S1 3
+#define VIRGL_OBJ_DSA_S2 4
+#define VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(x) (((x) & 0x1) << 0)
+#define VIRGL_OBJ_DSA_S1_STENCIL_FUNC(x) (((x) & 0x7) << 1)
+#define VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(x) (((x) & 0x7) << 4)
+#define VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(x) (((x) & 0x7) << 7)
+#define VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(x) (((x) & 0x7) << 10)
+#define VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(x) (((x) & 0xff) << 13)
+#define VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(x) (((x) & 0xff) << 21)
+#define VIRGL_OBJ_DSA_ALPHA_REF 5
+
+/* offsets for rasterizer state */
+#define VIRGL_OBJ_RS_SIZE 9
+#define VIRGL_OBJ_RS_HANDLE 1
+#define VIRGL_OBJ_RS_S0 2
+#define VIRGL_OBJ_RS_S0_FLATSHADE(x) (((x) & 0x1) << 0)
+#define VIRGL_OBJ_RS_S0_DEPTH_CLIP(x) (((x) & 0x1) << 1)
+#define VIRGL_OBJ_RS_S0_CLIP_HALFZ(x) (((x) & 0x1) << 2)
+#define VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(x) (((x) & 0x1) << 3)
+#define VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(x) (((x) & 0x1) << 4)
+#define VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(x) (((x) & 0x1) << 5)
+#define VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(x) (((x) & 0x1) << 6)
+#define VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(x) (((x) & 0x1) << 7)
+#define VIRGL_OBJ_RS_S0_CULL_FACE(x) (((x) & 0x3) << 8)
+#define VIRGL_OBJ_RS_S0_FILL_FRONT(x) (((x) & 0x3) << 10)
+#define VIRGL_OBJ_RS_S0_FILL_BACK(x) (((x) & 0x3) << 12)
+#define VIRGL_OBJ_RS_S0_SCISSOR(x) (((x) & 0x1) << 14)
+#define VIRGL_OBJ_RS_S0_FRONT_CCW(x) (((x) & 0x1) << 15)
+#define VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(x) (((x) & 0x1) << 16)
+#define VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(x) (((x) & 0x1) << 17)
+#define VIRGL_OBJ_RS_S0_OFFSET_LINE(x) (((x) & 0x1) << 18)
+#define VIRGL_OBJ_RS_S0_OFFSET_POINT(x) (((x) & 0x1) << 19)
+#define VIRGL_OBJ_RS_S0_OFFSET_TRI(x) (((x) & 0x1) << 20)
+#define VIRGL_OBJ_RS_S0_POLY_SMOOTH(x) (((x) & 0x1) << 21)
+#define VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(x) (((x) & 0x1) << 22)
+#define VIRGL_OBJ_RS_S0_POINT_SMOOTH(x) (((x) & 0x1) << 23)
+#define VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(x) (((x) & 0x1) << 24)
+#define VIRGL_OBJ_RS_S0_MULTISAMPLE(x) (((x) & 0x1) << 25)
+#define VIRGL_OBJ_RS_S0_LINE_SMOOTH(x) (((x) & 0x1) << 26)
+#define VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(x) (((x) & 0x1) << 27)
+#define VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(x) (((x) & 0x1) << 28)
+#define VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(x) (((x) & 0x1) << 29)
+#define VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(x) (((x) & 0x1) << 30)
+
+#define VIRGL_OBJ_RS_POINT_SIZE 3
+#define VIRGL_OBJ_RS_SPRITE_COORD_ENABLE 4
+#define VIRGL_OBJ_RS_S3 5
+
+#define VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(x) (((x) & 0xffff) << 0)
+#define VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(x) (((x) & 0xff) << 16)
+#define VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(x) (((x) & 0xff) << 24)
+#define VIRGL_OBJ_RS_LINE_WIDTH 6
+#define VIRGL_OBJ_RS_OFFSET_UNITS 7
+#define VIRGL_OBJ_RS_OFFSET_SCALE 8
+#define VIRGL_OBJ_RS_OFFSET_CLAMP 9
+
+#define VIRGL_OBJ_CLEAR_SIZE 8
+#define VIRGL_OBJ_CLEAR_BUFFERS 1
+#define VIRGL_OBJ_CLEAR_COLOR_0 2 /* color is 4 * u32/f32/i32 */
+#define VIRGL_OBJ_CLEAR_COLOR_1 3
+#define VIRGL_OBJ_CLEAR_COLOR_2 4
+#define VIRGL_OBJ_CLEAR_COLOR_3 5
+#define VIRGL_OBJ_CLEAR_DEPTH_0 6 /* depth is a double precision float */
+#define VIRGL_OBJ_CLEAR_DEPTH_1 7
+#define VIRGL_OBJ_CLEAR_STENCIL 8
+
+/* shader object */
+#define VIRGL_OBJ_SHADER_HDR_SIZE(nso) (5 + ((nso) ? (2 * nso) + 4 : 0))
+#define VIRGL_OBJ_SHADER_HANDLE 1
+#define VIRGL_OBJ_SHADER_TYPE 2
+#define VIRGL_OBJ_SHADER_OFFSET 3
+#define VIRGL_OBJ_SHADER_OFFSET_VAL(x) (((x) & 0x7fffffff) << 0)
+/* start contains full length in VAL - also implies continuations */
+/* continuation contains offset in VAL */
+#define VIRGL_OBJ_SHADER_OFFSET_CONT (0x1 << 31)
+#define VIRGL_OBJ_SHADER_NUM_TOKENS 4
+#define VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS 5
+#define VIRGL_OBJ_SHADER_SO_STRIDE(x) (6 + (x))
+#define VIRGL_OBJ_SHADER_SO_OUTPUT0(x) (10 + (x * 2))
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(x) (((x) & 0xff) << 0)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(x) (((x) & 0x3) << 8)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(x) (((x) & 0x7) << 10)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(x) (((x) & 0x7) << 13)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(x) (((x) & 0xffff) << 16)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT0_SO(x) (11 + (x * 2))
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_STREAM(x) (((x) & 0x03) << 0)
+
+/* viewport state */
+#define VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports) ((6 * num_viewports) + 1)
+#define VIRGL_SET_VIEWPORT_START_SLOT 1
+#define VIRGL_SET_VIEWPORT_STATE_SCALE_0(x) (2 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_SCALE_1(x) (3 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_SCALE_2(x) (4 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_TRANSLATE_0(x) (5 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_TRANSLATE_1(x) (6 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_TRANSLATE_2(x) (7 + (x * 6))
+
+/* framebuffer state */
+#define VIRGL_SET_FRAMEBUFFER_STATE_SIZE(nr_cbufs) (nr_cbufs + 2)
+#define VIRGL_SET_FRAMEBUFFER_STATE_NR_CBUFS 1
+#define VIRGL_SET_FRAMEBUFFER_STATE_NR_ZSURF_HANDLE 2
+#define VIRGL_SET_FRAMEBUFFER_STATE_CBUF_HANDLE(x) ((x) + 3)
+
+/* vertex elements object */
+#define VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements) (((num_elements) * 4) + 1)
+#define VIRGL_OBJ_VERTEX_ELEMENTS_HANDLE 1
+#define VIRGL_OBJ_VERTEX_ELEMENTS_V0_SRC_OFFSET(x) (((x) * 4) + 2) /* repeated per VE */
+#define VIRGL_OBJ_VERTEX_ELEMENTS_V0_INSTANCE_DIVISOR(x) (((x) * 4) + 3)
+#define VIRGL_OBJ_VERTEX_ELEMENTS_V0_VERTEX_BUFFER_INDEX(x) (((x) * 4) + 4)
+#define VIRGL_OBJ_VERTEX_ELEMENTS_V0_SRC_FORMAT(x) (((x) * 4) + 5)
+
+/* vertex buffers */
+#define VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers) ((num_buffers) * 3)
+#define VIRGL_SET_VERTEX_BUFFER_STRIDE(x) (((x) * 3) + 1)
+#define VIRGL_SET_VERTEX_BUFFER_OFFSET(x) (((x) * 3) + 2)
+#define VIRGL_SET_VERTEX_BUFFER_HANDLE(x) (((x) * 3) + 3)
+
+/* index buffer */
+#define VIRGL_SET_INDEX_BUFFER_SIZE(ib) (((ib) ? 2 : 0) + 1)
+#define VIRGL_SET_INDEX_BUFFER_HANDLE 1
+#define VIRGL_SET_INDEX_BUFFER_INDEX_SIZE 2 /* only if sending an IB handle */
+#define VIRGL_SET_INDEX_BUFFER_OFFSET 3 /* only if sending an IB handle */
+
+/* constant buffer */
+#define VIRGL_SET_CONSTANT_BUFFER_SHADER_TYPE 1
+#define VIRGL_SET_CONSTANT_BUFFER_INDEX 2
+#define VIRGL_SET_CONSTANT_BUFFER_DATA_START 3
+
+#define VIRGL_SET_UNIFORM_BUFFER_SIZE 5
+#define VIRGL_SET_UNIFORM_BUFFER_SHADER_TYPE 1
+#define VIRGL_SET_UNIFORM_BUFFER_INDEX 2
+#define VIRGL_SET_UNIFORM_BUFFER_OFFSET 3
+#define VIRGL_SET_UNIFORM_BUFFER_LENGTH 4
+#define VIRGL_SET_UNIFORM_BUFFER_RES_HANDLE 5
+
+/* draw VBO */
+#define VIRGL_DRAW_VBO_SIZE 12
+#define VIRGL_DRAW_VBO_START 1
+#define VIRGL_DRAW_VBO_COUNT 2
+#define VIRGL_DRAW_VBO_MODE 3
+#define VIRGL_DRAW_VBO_INDEXED 4
+#define VIRGL_DRAW_VBO_INSTANCE_COUNT 5
+#define VIRGL_DRAW_VBO_INDEX_BIAS 6
+#define VIRGL_DRAW_VBO_START_INSTANCE 7
+#define VIRGL_DRAW_VBO_PRIMITIVE_RESTART 8
+#define VIRGL_DRAW_VBO_RESTART_INDEX 9
+#define VIRGL_DRAW_VBO_MIN_INDEX 10
+#define VIRGL_DRAW_VBO_MAX_INDEX 11
+#define VIRGL_DRAW_VBO_COUNT_FROM_SO 12
+
+/* create surface */
+#define VIRGL_OBJ_SURFACE_SIZE 5
+#define VIRGL_OBJ_SURFACE_HANDLE 1
+#define VIRGL_OBJ_SURFACE_RES_HANDLE 2
+#define VIRGL_OBJ_SURFACE_FORMAT 3
+#define VIRGL_OBJ_SURFACE_BUFFER_FIRST_ELEMENT 4
+#define VIRGL_OBJ_SURFACE_BUFFER_LAST_ELEMENT 5
+#define VIRGL_OBJ_SURFACE_TEXTURE_LEVEL 4
+#define VIRGL_OBJ_SURFACE_TEXTURE_LAYERS 5
+
+/* create streamout target */
+#define VIRGL_OBJ_STREAMOUT_SIZE 4
+#define VIRGL_OBJ_STREAMOUT_HANDLE 1
+#define VIRGL_OBJ_STREAMOUT_RES_HANDLE 2
+#define VIRGL_OBJ_STREAMOUT_BUFFER_OFFSET 3
+#define VIRGL_OBJ_STREAMOUT_BUFFER_SIZE 4
+
+/* sampler state */
+#define VIRGL_OBJ_SAMPLER_STATE_SIZE 9
+#define VIRGL_OBJ_SAMPLER_STATE_HANDLE 1
+#define VIRGL_OBJ_SAMPLER_STATE_S0 2
+#define VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(x) (((x) & 0x7) << 0)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(x) (((x) & 0x7) << 3)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(x) (((x) & 0x7) << 6)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(x) (((x) & 0x3) << 9)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(x) (((x) & 0x3) << 11)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(x) (((x) & 0x3) << 13)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(x) (((x) & 0x1) << 15)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(x) (((x) & 0x7) << 16)
+
+#define VIRGL_OBJ_SAMPLER_STATE_LOD_BIAS 3
+#define VIRGL_OBJ_SAMPLER_STATE_MIN_LOD 4
+#define VIRGL_OBJ_SAMPLER_STATE_MAX_LOD 5
+#define VIRGL_OBJ_SAMPLER_STATE_BORDER_COLOR(x) ((x) + 6) /* 6 - 9 */
+
+
+/* sampler view */
+#define VIRGL_OBJ_SAMPLER_VIEW_SIZE 6
+#define VIRGL_OBJ_SAMPLER_VIEW_HANDLE 1
+#define VIRGL_OBJ_SAMPLER_VIEW_RES_HANDLE 2
+#define VIRGL_OBJ_SAMPLER_VIEW_FORMAT 3
+#define VIRGL_OBJ_SAMPLER_VIEW_BUFFER_FIRST_ELEMENT 4
+#define VIRGL_OBJ_SAMPLER_VIEW_BUFFER_LAST_ELEMENT 5
+#define VIRGL_OBJ_SAMPLER_VIEW_TEXTURE_LAYER 4
+#define VIRGL_OBJ_SAMPLER_VIEW_TEXTURE_LEVEL 5
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE 6
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(x) (((x) & 0x7) << 0)
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(x) (((x) & 0x7) << 3)
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(x) (((x) & 0x7) << 6)
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(x) (((x) & 0x7) << 9)
+
+/* set sampler views */
+#define VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views) ((num_views) + 2)
+#define VIRGL_SET_SAMPLER_VIEWS_SHADER_TYPE 1
+#define VIRGL_SET_SAMPLER_VIEWS_START_SLOT 2
+#define VIRGL_SET_SAMPLER_VIEWS_V0_HANDLE 3
+
+/* bind sampler states */
+#define VIRGL_BIND_SAMPLER_STATES(num_states) ((num_states) + 2)
+#define VIRGL_BIND_SAMPLER_STATES_SHADER_TYPE 1
+#define VIRGL_BIND_SAMPLER_STATES_START_SLOT 2
+#define VIRGL_BIND_SAMPLER_STATES_S0_HANDLE 3
+
+/* set stencil reference */
+#define VIRGL_SET_STENCIL_REF_SIZE 1
+#define VIRGL_SET_STENCIL_REF 1
+#define VIRGL_STENCIL_REF_VAL(f, s) ((f & 0xff) | (((s & 0xff) << 8)))
+
+/* set blend color */
+#define VIRGL_SET_BLEND_COLOR_SIZE 4
+#define VIRGL_SET_BLEND_COLOR(x) ((x) + 1)
+
+/* set scissor state */
+#define VIRGL_SET_SCISSOR_STATE_SIZE(x) (1 + 2 * x)
+#define VIRGL_SET_SCISSOR_START_SLOT 1
+#define VIRGL_SET_SCISSOR_MINX_MINY(x) (2 + (x * 2))
+#define VIRGL_SET_SCISSOR_MAXX_MAXY(x) (3 + (x * 2))
+
+/* resource copy region */
+#define VIRGL_CMD_RESOURCE_COPY_REGION_SIZE 13
+#define VIRGL_CMD_RCR_DST_RES_HANDLE 1
+#define VIRGL_CMD_RCR_DST_LEVEL 2
+#define VIRGL_CMD_RCR_DST_X 3
+#define VIRGL_CMD_RCR_DST_Y 4
+#define VIRGL_CMD_RCR_DST_Z 5
+#define VIRGL_CMD_RCR_SRC_RES_HANDLE 6
+#define VIRGL_CMD_RCR_SRC_LEVEL 7
+#define VIRGL_CMD_RCR_SRC_X 8
+#define VIRGL_CMD_RCR_SRC_Y 9
+#define VIRGL_CMD_RCR_SRC_Z 10
+#define VIRGL_CMD_RCR_SRC_W 11
+#define VIRGL_CMD_RCR_SRC_H 12
+#define VIRGL_CMD_RCR_SRC_D 13
+
+/* blit */
+#define VIRGL_CMD_BLIT_SIZE 21
+#define VIRGL_CMD_BLIT_S0 1
+#define VIRGL_CMD_BLIT_S0_MASK(x) (((x) & 0xff) << 0)
+#define VIRGL_CMD_BLIT_S0_FILTER(x) (((x) & 0x3) << 8)
+#define VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(x) (((x) & 0x1) << 10)
+#define VIRGL_CMD_BLIT_SCISSOR_MINX_MINY 2
+#define VIRGL_CMD_BLIT_SCISSOR_MAXX_MAXY 3
+#define VIRGL_CMD_BLIT_DST_RES_HANDLE 4
+#define VIRGL_CMD_BLIT_DST_LEVEL 5
+#define VIRGL_CMD_BLIT_DST_FORMAT 6
+#define VIRGL_CMD_BLIT_DST_X 7
+#define VIRGL_CMD_BLIT_DST_Y 8
+#define VIRGL_CMD_BLIT_DST_Z 9
+#define VIRGL_CMD_BLIT_DST_W 10
+#define VIRGL_CMD_BLIT_DST_H 11
+#define VIRGL_CMD_BLIT_DST_D 12
+#define VIRGL_CMD_BLIT_SRC_RES_HANDLE 13
+#define VIRGL_CMD_BLIT_SRC_LEVEL 14
+#define VIRGL_CMD_BLIT_SRC_FORMAT 15
+#define VIRGL_CMD_BLIT_SRC_X 16
+#define VIRGL_CMD_BLIT_SRC_Y 17
+#define VIRGL_CMD_BLIT_SRC_Z 18
+#define VIRGL_CMD_BLIT_SRC_W 19
+#define VIRGL_CMD_BLIT_SRC_H 20
+#define VIRGL_CMD_BLIT_SRC_D 21
+
+/* query object */
+#define VIRGL_OBJ_QUERY_SIZE 4
+#define VIRGL_OBJ_QUERY_HANDLE 1
+#define VIRGL_OBJ_QUERY_TYPE_INDEX 2
+#define VIRGL_OBJ_QUERY_TYPE(x) (x & 0xffff)
+#define VIRGL_OBJ_QUERY_INDEX(x) ((x & 0xffff) << 16)
+#define VIRGL_OBJ_QUERY_OFFSET 3
+#define VIRGL_OBJ_QUERY_RES_HANDLE 4
+
+#define VIRGL_QUERY_BEGIN_HANDLE 1
+
+#define VIRGL_QUERY_END_HANDLE 1
+
+#define VIRGL_QUERY_RESULT_HANDLE 1
+#define VIRGL_QUERY_RESULT_WAIT 2
+
+/* render condition */
+#define VIRGL_RENDER_CONDITION_SIZE 3
+#define VIRGL_RENDER_CONDITION_HANDLE 1
+#define VIRGL_RENDER_CONDITION_CONDITION 2
+#define VIRGL_RENDER_CONDITION_MODE 3
+
+/* resource inline write */
+#define VIRGL_RESOURCE_IW_RES_HANDLE 1
+#define VIRGL_RESOURCE_IW_LEVEL 2
+#define VIRGL_RESOURCE_IW_USAGE 3
+#define VIRGL_RESOURCE_IW_STRIDE 4
+#define VIRGL_RESOURCE_IW_LAYER_STRIDE 5
+#define VIRGL_RESOURCE_IW_X 6
+#define VIRGL_RESOURCE_IW_Y 7
+#define VIRGL_RESOURCE_IW_Z 8
+#define VIRGL_RESOURCE_IW_W 9
+#define VIRGL_RESOURCE_IW_H 10
+#define VIRGL_RESOURCE_IW_D 11
+#define VIRGL_RESOURCE_IW_DATA_START 12
+
+/* set streamout targets */
+#define VIRGL_SET_STREAMOUT_TARGETS_APPEND_BITMASK 1
+#define VIRGL_SET_STREAMOUT_TARGETS_H0 2
+
+/* set sample mask */
+#define VIRGL_SET_SAMPLE_MASK_SIZE 1
+#define VIRGL_SET_SAMPLE_MASK_MASK 1
+
+/* set clip state */
+#define VIRGL_SET_CLIP_STATE_SIZE 32
+#define VIRGL_SET_CLIP_STATE_C0 1
+
+/* polygon stipple */
+#define VIRGL_POLYGON_STIPPLE_SIZE 32
+#define VIRGL_POLYGON_STIPPLE_P0 1
+
+#define VIRGL_BIND_SHADER_SIZE 2
+#define VIRGL_BIND_SHADER_HANDLE 1
+#define VIRGL_BIND_SHADER_TYPE 2
+
+#endif
diff --git a/src/gallium/drivers/virgl/virgl_public.h b/src/gallium/drivers/virgl/virgl_public.h
new file mode 100644
index 00000000000..6a2c11be320
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_public.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef VIRGL_PUBLIC_H
+#define VIRGL_PUBLIC_H
+
+struct pipe_screen;
+struct sw_winsys;
+struct virgl_winsys;
+
+struct pipe_screen *
+virgl_create_screen(struct virgl_winsys *vws);
+#endif
diff --git a/src/gallium/drivers/virgl/virgl_query.c b/src/gallium/drivers/virgl/virgl_query.c
new file mode 100644
index 00000000000..f79be5f2f09
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_query.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "util/u_memory.h"
+#include "util/u_inlines.h"
+#include "virgl_resource.h"
+#include "virgl_context.h"
+#include "virgl_encode.h"
+
+struct virgl_query {
+ uint32_t handle;
+ struct virgl_resource *buf;
+
+ unsigned index;
+ unsigned type;
+ unsigned result_size;
+ unsigned result_gotten_sent;
+};
+
+static void virgl_render_condition(struct pipe_context *ctx,
+ struct pipe_query *q,
+ boolean condition,
+ uint mode)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_query *query = (struct virgl_query *)q;
+ uint32_t handle = 0;
+ if (q)
+ handle = query->handle;
+ virgl_encoder_render_condition(vctx, handle, condition, mode);
+}
+
+static struct pipe_query *virgl_create_query(struct pipe_context *ctx,
+ unsigned query_type, unsigned index)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_query *query;
+ uint32_t handle;
+
+ query = CALLOC_STRUCT(virgl_query);
+ if (!query)
+ return NULL;
+
+ query->buf = (struct virgl_resource *)pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_STAGING, sizeof(struct virgl_host_query_state));
+ if (!query->buf) {
+ FREE(query);
+ return NULL;
+ }
+
+ handle = virgl_object_assign_handle();
+ query->type = query_type;
+ query->index = index;
+ query->handle = handle;
+ query->buf->clean = FALSE;
+ virgl_encoder_create_query(vctx, handle, query_type, index, query->buf, 0);
+
+ return (struct pipe_query *)query;
+}
+
+static void virgl_destroy_query(struct pipe_context *ctx,
+ struct pipe_query *q)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_query *query = (struct virgl_query *)q;
+
+ virgl_encode_delete_object(vctx, query->handle, VIRGL_OBJECT_QUERY);
+
+ pipe_resource_reference((struct pipe_resource **)&query->buf, NULL);
+ FREE(query);
+}
+
+static boolean virgl_begin_query(struct pipe_context *ctx,
+ struct pipe_query *q)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_query *query = (struct virgl_query *)q;
+
+ query->buf->clean = FALSE;
+ virgl_encoder_begin_query(vctx, query->handle);
+ return true;
+}
+
+static void virgl_end_query(struct pipe_context *ctx,
+ struct pipe_query *q)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_query *query = (struct virgl_query *)q;
+ struct pipe_box box;
+
+ uint32_t qs = VIRGL_QUERY_STATE_WAIT_HOST;
+ u_box_1d(0, 4, &box);
+ virgl_transfer_inline_write(ctx, &query->buf->u.b, 0, PIPE_TRANSFER_WRITE,
+ &box, &qs, 0, 0);
+
+
+ virgl_encoder_end_query(vctx, query->handle);
+}
+
+static boolean virgl_get_query_result(struct pipe_context *ctx,
+ struct pipe_query *q,
+ boolean wait,
+ union pipe_query_result *result)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_query *query = (struct virgl_query *)q;
+ struct pipe_transfer *transfer;
+ struct virgl_host_query_state *host_state;
+
+ /* ask host for query result */
+ if (!query->result_gotten_sent) {
+ query->result_gotten_sent = 1;
+ virgl_encoder_get_query_result(vctx, query->handle, 0);
+ ctx->flush(ctx, NULL, 0);
+ }
+
+ /* do we have to flush? */
+ /* now we can do the transfer to get the result back? */
+ remap:
+ host_state = pipe_buffer_map(ctx, &query->buf->u.b,
+ PIPE_TRANSFER_READ, &transfer);
+
+ if (host_state->query_state != VIRGL_QUERY_STATE_DONE) {
+ pipe_buffer_unmap(ctx, transfer);
+ if (wait)
+ goto remap;
+ else
+ return FALSE;
+ }
+
+ if (query->type == PIPE_QUERY_TIMESTAMP || query->type == PIPE_QUERY_TIME_ELAPSED)
+ result->u64 = host_state->result;
+ else
+ result->u64 = (uint32_t)host_state->result;
+
+ pipe_buffer_unmap(ctx, transfer);
+ query->result_gotten_sent = 0;
+ return TRUE;
+}
+
+void virgl_init_query_functions(struct virgl_context *vctx)
+{
+ vctx->base.render_condition = virgl_render_condition;
+ vctx->base.create_query = virgl_create_query;
+ vctx->base.destroy_query = virgl_destroy_query;
+ vctx->base.begin_query = virgl_begin_query;
+ vctx->base.end_query = virgl_end_query;
+ vctx->base.get_query_result = virgl_get_query_result;
+}
diff --git a/src/gallium/drivers/virgl/virgl_resource.c b/src/gallium/drivers/virgl/virgl_resource.c
new file mode 100644
index 00000000000..758dd6b3e4b
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_resource.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "util/u_inlines.h"
+#include "virgl_resource.h"
+#include "virgl_context.h"
+
+bool virgl_res_needs_flush_wait(struct virgl_context *vctx,
+ struct virgl_resource *res,
+ unsigned usage)
+{
+ struct virgl_screen *vs = virgl_screen(vctx->base.screen);
+
+ if ((!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) && vs->vws->res_is_referenced(vs->vws, vctx->cbuf, res->hw_res)) {
+ return true;
+ }
+ return false;
+}
+
+bool virgl_res_needs_readback(struct virgl_context *vctx,
+ struct virgl_resource *res,
+ unsigned usage)
+{
+ bool readback = true;
+ if (res->clean)
+ readback = false;
+ else if (usage & PIPE_TRANSFER_DISCARD_RANGE)
+ readback = false;
+ else if ((usage & (PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT)) ==
+ (PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT))
+ readback = false;
+ return readback;
+}
+
+static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
+ const struct pipe_resource *templ)
+{
+ struct virgl_screen *vs = (struct virgl_screen *)screen;
+ if (templ->target == PIPE_BUFFER)
+ return virgl_buffer_create(vs, templ);
+ else
+ return virgl_texture_create(vs, templ);
+}
+
+static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen,
+ const struct pipe_resource *templ,
+ struct winsys_handle *whandle)
+{
+ struct virgl_screen *vs = (struct virgl_screen *)screen;
+ if (templ->target == PIPE_BUFFER)
+ return NULL;
+ else
+ return virgl_texture_from_handle(vs, templ, whandle);
+}
+
+void virgl_init_screen_resource_functions(struct pipe_screen *screen)
+{
+ screen->resource_create = virgl_resource_create;
+ screen->resource_from_handle = virgl_resource_from_handle;
+ screen->resource_get_handle = u_resource_get_handle_vtbl;
+ screen->resource_destroy = u_resource_destroy_vtbl;
+}
+
+void virgl_init_context_resource_functions(struct pipe_context *ctx)
+{
+ ctx->transfer_map = u_transfer_map_vtbl;
+ ctx->transfer_flush_region = u_transfer_flush_region_vtbl;
+ ctx->transfer_unmap = u_transfer_unmap_vtbl;
+ ctx->transfer_inline_write = u_transfer_inline_write_vtbl;
+}
diff --git a/src/gallium/drivers/virgl/virgl_resource.h b/src/gallium/drivers/virgl/virgl_resource.h
new file mode 100644
index 00000000000..2d0bd8b6400
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_resource.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef VIRGL_RESOURCE_H
+#define VIRGL_RESOURCE_H
+
+#include "util/u_inlines.h"
+#include "util/u_range.h"
+#include "util/list.h"
+#include "util/u_transfer.h"
+
+#include "virgl_hw.h"
+#define VR_MAX_TEXTURE_2D_LEVELS 15
+
+struct virgl_screen;
+struct virgl_context;
+struct virgl_resource {
+ struct u_resource u;
+ struct virgl_hw_res *hw_res;
+ boolean clean;
+};
+
+struct virgl_buffer {
+ struct virgl_resource base;
+
+ struct list_head flush_list;
+ boolean on_list;
+
+ /* The buffer range which is initialized (with a write transfer,
+ * streamout, DMA, or as a random access target). The rest of
+ * the buffer is considered invalid and can be mapped unsynchronized.
+ *
+ * This allows unsychronized mapping of a buffer range which hasn't
+ * been used yet. It's for applications which forget to use
+ * the unsynchronized map flag and expect the driver to figure it out.
+ */
+ struct util_range valid_buffer_range;
+};
+
+struct virgl_texture {
+ struct virgl_resource base;
+
+ unsigned long level_offset[VR_MAX_TEXTURE_2D_LEVELS];
+ unsigned stride[VR_MAX_TEXTURE_2D_LEVELS];
+};
+
+struct virgl_transfer {
+ struct pipe_transfer base;
+ uint32_t offset;
+ struct virgl_resource *resolve_tmp;
+};
+
+void virgl_resource_destroy(struct pipe_screen *screen,
+ struct pipe_resource *resource);
+
+void virgl_init_screen_resource_functions(struct pipe_screen *screen);
+
+void virgl_init_context_resource_functions(struct pipe_context *ctx);
+
+struct pipe_resource *virgl_texture_create(struct virgl_screen *vs,
+ const struct pipe_resource *templ);
+
+struct pipe_resource *virgl_texture_from_handle(struct virgl_screen *vs,
+ const struct pipe_resource *templ,
+ struct winsys_handle *whandle);
+
+static inline struct virgl_resource *virgl_resource(struct pipe_resource *r)
+{
+ return (struct virgl_resource *)r;
+}
+
+static inline struct virgl_buffer *virgl_buffer(struct pipe_resource *r)
+{
+ return (struct virgl_buffer *)r;
+}
+
+struct pipe_resource *virgl_buffer_create(struct virgl_screen *vs,
+ const struct pipe_resource *templ);
+
+static inline unsigned pipe_to_virgl_bind(unsigned pbind)
+{
+ unsigned outbind = 0;
+ if (pbind & PIPE_BIND_DEPTH_STENCIL)
+ outbind |= VIRGL_BIND_DEPTH_STENCIL;
+ if (pbind & PIPE_BIND_RENDER_TARGET)
+ outbind |= VIRGL_BIND_RENDER_TARGET;
+ if (pbind & PIPE_BIND_SAMPLER_VIEW)
+ outbind |= VIRGL_BIND_SAMPLER_VIEW;
+ if (pbind & PIPE_BIND_VERTEX_BUFFER)
+ outbind |= VIRGL_BIND_VERTEX_BUFFER;
+ if (pbind & PIPE_BIND_INDEX_BUFFER)
+ outbind |= VIRGL_BIND_INDEX_BUFFER;
+ if (pbind & PIPE_BIND_CONSTANT_BUFFER)
+ outbind |= VIRGL_BIND_CONSTANT_BUFFER;
+ if (pbind & PIPE_BIND_DISPLAY_TARGET)
+ outbind |= VIRGL_BIND_DISPLAY_TARGET;
+ if (pbind & PIPE_BIND_STREAM_OUTPUT)
+ outbind |= VIRGL_BIND_STREAM_OUTPUT;
+ if (pbind & PIPE_BIND_CURSOR)
+ outbind |= VIRGL_BIND_CURSOR;
+ if (pbind & PIPE_BIND_CUSTOM)
+ outbind |= VIRGL_BIND_CUSTOM;
+ if (pbind & PIPE_BIND_SCANOUT)
+ outbind |= VIRGL_BIND_SCANOUT;
+ return outbind;
+}
+
+bool virgl_res_needs_flush_wait(struct virgl_context *vctx,
+ struct virgl_resource *res,
+ unsigned usage);
+bool virgl_res_needs_readback(struct virgl_context *vctx,
+ struct virgl_resource *res,
+ unsigned usage);
+#endif
diff --git a/src/gallium/drivers/virgl/virgl_screen.c b/src/gallium/drivers/virgl/virgl_screen.c
new file mode 100644
index 00000000000..2bc7f879487
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_screen.c
@@ -0,0 +1,557 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "util/u_memory.h"
+#include "util/u_format.h"
+#include "util/u_format_s3tc.h"
+#include "util/u_video.h"
+#include "os/os_time.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_screen.h"
+#include "draw/draw_context.h"
+#include "vl/vl_decoder.h"
+#include "vl/vl_video_buffer.h"
+
+#include "state_tracker/sw_winsys.h"
+#include "tgsi/tgsi_exec.h"
+
+#include "virgl.h"
+#include "virgl_resource.h"
+#include "virgl_public.h"
+#include "virgl_context.h"
+
+#define SP_MAX_TEXTURE_2D_LEVELS 15 /* 16K x 16K */
+#define SP_MAX_TEXTURE_3D_LEVELS 9 /* 512 x 512 x 512 */
+#define SP_MAX_TEXTURE_CUBE_LEVELS 13 /* 4K x 4K */
+
+static const char *
+virgl_get_vendor(struct pipe_screen *screen)
+{
+ return "Red Hat";
+}
+
+
+static const char *
+virgl_get_name(struct pipe_screen *screen)
+{
+ return "virgl";
+}
+
+static int
+virgl_get_param(struct pipe_screen *screen, enum pipe_cap param)
+{
+ struct virgl_screen *vscreen = virgl_screen(screen);
+ switch (param) {
+ case PIPE_CAP_NPOT_TEXTURES:
+ return 1;
+ case PIPE_CAP_TWO_SIDED_STENCIL:
+ return 1;
+ case PIPE_CAP_SM3:
+ return 1;
+ case PIPE_CAP_ANISOTROPIC_FILTER:
+ return 1;
+ case PIPE_CAP_POINT_SPRITE:
+ return 1;
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return vscreen->caps.caps.v1.max_render_targets;
+ case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
+ return vscreen->caps.caps.v1.max_dual_source_render_targets;
+ case PIPE_CAP_OCCLUSION_QUERY:
+ return vscreen->caps.caps.v1.bset.occlusion_query;
+ case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
+ return vscreen->caps.caps.v1.bset.mirror_clamp;
+ case PIPE_CAP_TEXTURE_SHADOW_MAP:
+ return 1;
+ case PIPE_CAP_TEXTURE_SWIZZLE:
+ return 1;
+ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
+ return SP_MAX_TEXTURE_2D_LEVELS;
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ return SP_MAX_TEXTURE_3D_LEVELS;
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ return SP_MAX_TEXTURE_CUBE_LEVELS;
+ case PIPE_CAP_BLEND_EQUATION_SEPARATE:
+ return 1;
+ case PIPE_CAP_INDEP_BLEND_ENABLE:
+ return vscreen->caps.caps.v1.bset.indep_blend_enable;
+ case PIPE_CAP_INDEP_BLEND_FUNC:
+ return vscreen->caps.caps.v1.bset.indep_blend_func;
+ case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
+ case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
+ case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
+ case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
+ return vscreen->caps.caps.v1.bset.fragment_coord_conventions;
+ case PIPE_CAP_DEPTH_CLIP_DISABLE:
+ return vscreen->caps.caps.v1.bset.depth_clip_disable;
+ case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
+ return vscreen->caps.caps.v1.max_streamout_buffers;
+ case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
+ case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
+ return 16*4;
+ case PIPE_CAP_PRIMITIVE_RESTART:
+ return vscreen->caps.caps.v1.bset.primitive_restart;
+ case PIPE_CAP_SHADER_STENCIL_EXPORT:
+ return vscreen->caps.caps.v1.bset.shader_stencil_export;
+ case PIPE_CAP_TGSI_INSTANCEID:
+ case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
+ return 1;
+ case PIPE_CAP_SEAMLESS_CUBE_MAP:
+ return vscreen->caps.caps.v1.bset.seamless_cube_map;
+ case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
+ return vscreen->caps.caps.v1.bset.seamless_cube_map_per_texture;
+ case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
+ return vscreen->caps.caps.v1.max_texture_array_layers;
+ case PIPE_CAP_MIN_TEXEL_OFFSET:
+ case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
+ return -8;
+ case PIPE_CAP_MAX_TEXEL_OFFSET:
+ case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET:
+ return 7;
+ case PIPE_CAP_CONDITIONAL_RENDER:
+ return vscreen->caps.caps.v1.bset.conditional_render;
+ case PIPE_CAP_TEXTURE_BARRIER:
+ return 0;
+ case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
+ return 1;
+ case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
+ case PIPE_CAP_VERTEX_COLOR_CLAMPED:
+ return vscreen->caps.caps.v1.bset.color_clamping;
+ case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
+ return 1;
+ case PIPE_CAP_GLSL_FEATURE_LEVEL:
+ return vscreen->caps.caps.v1.glsl_level;
+ case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
+ return 0;
+ case PIPE_CAP_COMPUTE:
+ return 0;
+ case PIPE_CAP_USER_VERTEX_BUFFERS:
+ return 0;
+ case PIPE_CAP_USER_INDEX_BUFFERS:
+ case PIPE_CAP_USER_CONSTANT_BUFFERS:
+ return 1;
+ case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
+ return 16;
+ case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
+ return vscreen->caps.caps.v1.bset.streamout_pause_resume;
+ case PIPE_CAP_START_INSTANCE:
+ return vscreen->caps.caps.v1.bset.start_instance;
+ case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
+ case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
+ case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
+ case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
+ case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
+ return 0;
+ case PIPE_CAP_QUERY_TIMESTAMP:
+ return 1;
+ case PIPE_CAP_QUERY_TIME_ELAPSED:
+ return 0;
+ case PIPE_CAP_TGSI_TEXCOORD:
+ return 0;
+ case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
+ return VIRGL_MAP_BUFFER_ALIGNMENT;
+ case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
+ return vscreen->caps.caps.v1.max_tbo_size > 0;
+ case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
+ return 0;
+ case PIPE_CAP_CUBE_MAP_ARRAY:
+ return vscreen->caps.caps.v1.bset.cube_map_array;
+ case PIPE_CAP_TEXTURE_MULTISAMPLE:
+ return vscreen->caps.caps.v1.bset.texture_multisample;
+ case PIPE_CAP_MAX_VIEWPORTS:
+ return vscreen->caps.caps.v1.max_viewports;
+ case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
+ return vscreen->caps.caps.v1.max_tbo_size;
+ case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK:
+ case PIPE_CAP_QUERY_PIPELINE_STATISTICS:
+ case PIPE_CAP_ENDIANNESS:
+ return 0;
+ case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
+ return 1;
+ case PIPE_CAP_TGSI_VS_LAYER_VIEWPORT:
+ return 0;
+ case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
+ return 1024;
+ case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
+ return 16384;
+ case PIPE_CAP_TEXTURE_QUERY_LOD:
+ return vscreen->caps.caps.v1.bset.texture_query_lod;
+ case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
+ return vscreen->caps.caps.v1.max_texture_gather_components;
+ case PIPE_CAP_TEXTURE_GATHER_SM5:
+ case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
+ case PIPE_CAP_SAMPLE_SHADING:
+ case PIPE_CAP_FAKE_SW_MSAA:
+ case PIPE_CAP_TEXTURE_GATHER_OFFSETS:
+ case PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION:
+ case PIPE_CAP_MAX_VERTEX_STREAMS:
+ case PIPE_CAP_DRAW_INDIRECT:
+ case PIPE_CAP_TGSI_FS_FINE_DERIVATIVE:
+ case PIPE_CAP_CONDITIONAL_RENDER_INVERTED:
+ case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE:
+ case PIPE_CAP_SAMPLER_VIEW_TARGET:
+ case PIPE_CAP_CLIP_HALFZ:
+ case PIPE_CAP_VERTEXID_NOBASE:
+ case PIPE_CAP_POLYGON_OFFSET_CLAMP:
+ case PIPE_CAP_MULTISAMPLE_Z_RESOLVE:
+ case PIPE_CAP_RESOURCE_FROM_USER_MEMORY:
+ case PIPE_CAP_DEVICE_RESET_STATUS_QUERY:
+ case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS:
+ case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
+ case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR:
+ case PIPE_CAP_DEPTH_BOUNDS_TEST:
+ case PIPE_CAP_TGSI_TXQS:
+ case PIPE_CAP_FORCE_PERSAMPLE_INTERP:
+ case PIPE_CAP_SHAREABLE_SHADERS:
+ return 0;
+ case PIPE_CAP_VENDOR_ID:
+ return 0x1af4;
+ case PIPE_CAP_DEVICE_ID:
+ return 0x1010;
+ case PIPE_CAP_ACCELERATED:
+ return 1;
+ case PIPE_CAP_UMA:
+ case PIPE_CAP_VIDEO_MEMORY:
+ return 0;
+ }
+ /* should only get here on unhandled cases */
+ debug_printf("Unexpected PIPE_CAP %d query\n", param);
+ return 0;
+}
+
+static int
+virgl_get_shader_param(struct pipe_screen *screen, unsigned shader, enum pipe_shader_cap param)
+{
+ struct virgl_screen *vscreen = virgl_screen(screen);
+ switch(shader)
+ {
+ case PIPE_SHADER_FRAGMENT:
+ case PIPE_SHADER_VERTEX:
+ case PIPE_SHADER_GEOMETRY:
+ switch (param) {
+ case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
+ case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
+ case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
+ case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
+ return INT_MAX;
+ case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
+ case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
+ case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
+ return 1;
+ case PIPE_SHADER_CAP_MAX_INPUTS:
+ if (vscreen->caps.caps.v1.glsl_level < 150)
+ return 16;
+ return shader == PIPE_SHADER_VERTEX ? 16 : 32;
+ case PIPE_SHADER_CAP_MAX_OUTPUTS:
+ return 128;
+ // case PIPE_SHADER_CAP_MAX_CONSTS:
+ // return 4096;
+ case PIPE_SHADER_CAP_MAX_TEMPS:
+ return 256;
+ case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
+ return vscreen->caps.caps.v1.max_uniform_blocks;
+ // case PIPE_SHADER_CAP_MAX_ADDRS:
+ // return 1;
+ case PIPE_SHADER_CAP_MAX_PREDS:
+ return 0;
+ case PIPE_SHADER_CAP_SUBROUTINES:
+ return 1;
+ case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
+ return 16;
+ case PIPE_SHADER_CAP_INTEGERS:
+ return vscreen->caps.caps.v1.glsl_level >= 130;
+ case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
+ return 32;
+ case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE:
+ return 4096 * sizeof(float[4]);
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static float
+virgl_get_paramf(struct pipe_screen *screen, enum pipe_capf param)
+{
+ switch (param) {
+ case PIPE_CAPF_MAX_LINE_WIDTH:
+ /* fall-through */
+ case PIPE_CAPF_MAX_LINE_WIDTH_AA:
+ return 255.0; /* arbitrary */
+ case PIPE_CAPF_MAX_POINT_WIDTH:
+ /* fall-through */
+ case PIPE_CAPF_MAX_POINT_WIDTH_AA:
+ return 255.0; /* arbitrary */
+ case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
+ return 16.0;
+ case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
+ return 16.0; /* arbitrary */
+ case PIPE_CAPF_GUARD_BAND_LEFT:
+ case PIPE_CAPF_GUARD_BAND_TOP:
+ case PIPE_CAPF_GUARD_BAND_RIGHT:
+ case PIPE_CAPF_GUARD_BAND_BOTTOM:
+ return 0.0;
+ }
+ /* should only get here on unhandled cases */
+ debug_printf("Unexpected PIPE_CAPF %d query\n", param);
+ return 0.0;
+}
+
+static boolean
+virgl_is_vertex_format_supported(struct pipe_screen *screen,
+ enum pipe_format format)
+{
+ struct virgl_screen *vscreen = virgl_screen(screen);
+ const struct util_format_description *format_desc;
+ int i;
+
+ format_desc = util_format_description(format);
+ if (!format_desc)
+ return FALSE;
+
+ if (format == PIPE_FORMAT_R11G11B10_FLOAT) {
+ int vformat = VIRGL_FORMAT_R11G11B10_FLOAT;
+ int big = vformat / 32;
+ int small = vformat % 32;
+ if (!(vscreen->caps.caps.v1.vertexbuffer.bitmask[big] & (1 << small)))
+ return FALSE;
+ return TRUE;
+ }
+
+ /* Find the first non-VOID channel. */
+ for (i = 0; i < 4; i++) {
+ if (format_desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
+ break;
+ }
+ }
+
+ if (i == 4)
+ return FALSE;
+
+ if (format_desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
+ return FALSE;
+
+ if (format_desc->channel[i].type == UTIL_FORMAT_TYPE_FIXED)
+ return FALSE;
+ return TRUE;
+}
+
+/**
+ * Query format support for creating a texture, drawing surface, etc.
+ * \param format the format to test
+ * \param type one of PIPE_TEXTURE, PIPE_SURFACE
+ */
+static boolean
+virgl_is_format_supported( struct pipe_screen *screen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned sample_count,
+ unsigned bind)
+{
+ struct virgl_screen *vscreen = virgl_screen(screen);
+ const struct util_format_description *format_desc;
+ int i;
+
+ assert(target == PIPE_BUFFER ||
+ target == PIPE_TEXTURE_1D ||
+ target == PIPE_TEXTURE_1D_ARRAY ||
+ target == PIPE_TEXTURE_2D ||
+ target == PIPE_TEXTURE_2D_ARRAY ||
+ target == PIPE_TEXTURE_RECT ||
+ target == PIPE_TEXTURE_3D ||
+ target == PIPE_TEXTURE_CUBE ||
+ target == PIPE_TEXTURE_CUBE_ARRAY);
+
+ format_desc = util_format_description(format);
+ if (!format_desc)
+ return FALSE;
+
+ if (util_format_is_intensity(format))
+ return FALSE;
+
+ if (sample_count > 1) {
+ if (!vscreen->caps.caps.v1.bset.texture_multisample)
+ return FALSE;
+ if (sample_count > vscreen->caps.caps.v1.max_samples)
+ return FALSE;
+ }
+
+ if (bind & PIPE_BIND_VERTEX_BUFFER) {
+ return virgl_is_vertex_format_supported(screen, format);
+ }
+
+ if (bind & PIPE_BIND_RENDER_TARGET) {
+ if (format_desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS)
+ return FALSE;
+
+ /*
+ * Although possible, it is unnatural to render into compressed or YUV
+ * surfaces. So disable these here to avoid going into weird paths
+ * inside the state trackers.
+ */
+ if (format_desc->block.width != 1 ||
+ format_desc->block.height != 1)
+ return FALSE;
+
+ {
+ int big = format / 32;
+ int small = format % 32;
+ if (!(vscreen->caps.caps.v1.render.bitmask[big] & (1 << small)))
+ return FALSE;
+ }
+ }
+
+ if (bind & PIPE_BIND_DEPTH_STENCIL) {
+ if (format_desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS)
+ return FALSE;
+ }
+
+ /*
+ * All other operations (sampling, transfer, etc).
+ */
+
+ if (format_desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
+ if (util_format_s3tc_enabled)
+ goto out_lookup;
+ return FALSE;
+ }
+ if (format_desc->layout == UTIL_FORMAT_LAYOUT_RGTC) {
+ goto out_lookup;
+ }
+
+ if (format == PIPE_FORMAT_R11G11B10_FLOAT) {
+ goto out_lookup;
+ } else if (format == PIPE_FORMAT_R9G9B9E5_FLOAT) {
+ goto out_lookup;
+ }
+
+ /* Find the first non-VOID channel. */
+ for (i = 0; i < 4; i++) {
+ if (format_desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
+ break;
+ }
+ }
+
+ if (i == 4)
+ return FALSE;
+
+ /* no L4A4 */
+ if (format_desc->nr_channels < 4 && format_desc->channel[i].size == 4)
+ return FALSE;
+
+ out_lookup:
+ {
+ int big = format / 32;
+ int small = format % 32;
+ if (!(vscreen->caps.caps.v1.sampler.bitmask[big] & (1 << small)))
+ return FALSE;
+ }
+ /*
+ * Everything else should be supported by u_format.
+ */
+ return TRUE;
+}
+
+static void virgl_flush_frontbuffer(struct pipe_screen *screen,
+ struct pipe_resource *res,
+ unsigned level, unsigned layer,
+ void *winsys_drawable_handle, struct pipe_box *sub_box)
+{
+ struct virgl_screen *vscreen = virgl_screen(screen);
+ struct virgl_winsys *vws = vscreen->vws;
+ struct virgl_resource *vres = (struct virgl_resource *)res;
+
+ if (vws->flush_frontbuffer)
+ vws->flush_frontbuffer(vws, vres->hw_res, level, layer, winsys_drawable_handle,
+ sub_box);
+}
+
+static void virgl_fence_reference(struct pipe_screen *screen,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+ struct virgl_screen *vscreen = virgl_screen(screen);
+ struct virgl_winsys *vws = vscreen->vws;
+
+ vws->fence_reference(vws, ptr, fence);
+}
+
+static boolean virgl_fence_finish(struct pipe_screen *screen,
+ struct pipe_fence_handle *fence,
+ uint64_t timeout)
+{
+ struct virgl_screen *vscreen = virgl_screen(screen);
+ struct virgl_winsys *vws = vscreen->vws;
+
+ return vws->fence_wait(vws, fence, timeout);
+}
+
+static uint64_t
+virgl_get_timestamp(struct pipe_screen *_screen)
+{
+ return os_time_get_nano();
+}
+
+static void
+virgl_destroy_screen(struct pipe_screen *screen)
+{
+ struct virgl_screen *vscreen = virgl_screen(screen);
+ struct virgl_winsys *vws = vscreen->vws;
+
+ if (vws)
+ vws->destroy(vws);
+ FREE(vscreen);
+}
+
+struct pipe_screen *
+virgl_create_screen(struct virgl_winsys *vws)
+{
+ struct virgl_screen *screen = CALLOC_STRUCT(virgl_screen);
+
+ if (!screen)
+ return NULL;
+
+ screen->vws = vws;
+ screen->winsys = NULL;
+ screen->base.get_name = virgl_get_name;
+ screen->base.get_vendor = virgl_get_vendor;
+ screen->base.get_param = virgl_get_param;
+ screen->base.get_shader_param = virgl_get_shader_param;
+ screen->base.get_paramf = virgl_get_paramf;
+ screen->base.is_format_supported = virgl_is_format_supported;
+ screen->base.destroy = virgl_destroy_screen;
+ screen->base.context_create = virgl_context_create;
+ screen->base.flush_frontbuffer = virgl_flush_frontbuffer;
+ screen->base.get_timestamp = virgl_get_timestamp;
+ screen->base.fence_reference = virgl_fence_reference;
+ //screen->base.fence_signalled = virgl_fence_signalled;
+ screen->base.fence_finish = virgl_fence_finish;
+
+ virgl_init_screen_resource_functions(&screen->base);
+
+ vws->get_caps(vws, &screen->caps);
+
+
+ util_format_s3tc_init();
+ return &screen->base;
+}
diff --git a/src/gallium/drivers/virgl/virgl_streamout.c b/src/gallium/drivers/virgl/virgl_streamout.c
new file mode 100644
index 00000000000..95420f688d3
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_streamout.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "util/u_memory.h"
+#include "util/u_inlines.h"
+#include "virgl_context.h"
+#include "virgl_encode.h"
+#include "virgl_resource.h"
+
+static struct pipe_stream_output_target *virgl_create_so_target(
+ struct pipe_context *ctx,
+ struct pipe_resource *buffer,
+ unsigned buffer_offset,
+ unsigned buffer_size)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_resource *res = (struct virgl_resource *)buffer;
+ struct virgl_so_target *t = CALLOC_STRUCT(virgl_so_target);
+ uint32_t handle;
+
+ if (!t)
+ return NULL;
+ handle = virgl_object_assign_handle();
+
+ t->base.reference.count = 1;
+ t->base.context = ctx;
+ pipe_resource_reference(&t->base.buffer, buffer);
+ t->base.buffer_offset = buffer_offset;
+ t->base.buffer_size = buffer_size;
+ t->handle = handle;
+ res->clean = FALSE;
+ virgl_encoder_create_so_target(vctx, handle, res, buffer_offset, buffer_size);
+ return &t->base;
+}
+
+static void virgl_destroy_so_target(struct pipe_context *ctx,
+ struct pipe_stream_output_target *target)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_so_target *t = (struct virgl_so_target *)target;
+
+ pipe_resource_reference(&t->base.buffer, NULL);
+ virgl_encode_delete_object(vctx, t->handle, VIRGL_OBJECT_STREAMOUT_TARGET);
+ FREE(t);
+}
+
+static void virgl_set_so_targets(struct pipe_context *ctx,
+ unsigned num_targets,
+ struct pipe_stream_output_target **targets,
+ const unsigned *offset)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ int i;
+ for (i = 0; i < num_targets; i++) {
+ pipe_resource_reference(&vctx->so_targets[i].base.buffer, targets[i]->buffer);
+ }
+ for (i = num_targets; i < vctx->num_so_targets; i++)
+ pipe_resource_reference(&vctx->so_targets[i].base.buffer, NULL);
+ vctx->num_so_targets = num_targets;
+ virgl_encoder_set_so_targets(vctx, num_targets, targets, 0);//append_bitmask);
+}
+
+void virgl_init_so_functions(struct virgl_context *vctx)
+{
+ vctx->base.create_stream_output_target = virgl_create_so_target;
+ vctx->base.stream_output_target_destroy = virgl_destroy_so_target;
+ vctx->base.set_stream_output_targets = virgl_set_so_targets;
+}
diff --git a/src/gallium/drivers/virgl/virgl_texture.c b/src/gallium/drivers/virgl/virgl_texture.c
new file mode 100644
index 00000000000..97d02eba567
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_texture.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "util/u_memory.h"
+#include "util/u_format.h"
+#include "virgl.h"
+#include "virgl_resource.h"
+#include "virgl_context.h"
+
+static void virgl_copy_region_with_blit(struct pipe_context *pipe,
+ struct pipe_resource *dst,
+ unsigned dst_level,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *src,
+ unsigned src_level,
+ const struct pipe_box *src_box)
+{
+ struct pipe_blit_info blit;
+
+ memset(&blit, 0, sizeof(blit));
+ blit.src.resource = src;
+ blit.src.format = src->format;
+ blit.src.level = src_level;
+ blit.src.box = *src_box;
+ blit.dst.resource = dst;
+ blit.dst.format = dst->format;
+ blit.dst.level = dst_level;
+ blit.dst.box.x = dstx;
+ blit.dst.box.y = dsty;
+ blit.dst.box.z = dstz;
+ blit.dst.box.width = src_box->width;
+ blit.dst.box.height = src_box->height;
+ blit.dst.box.depth = src_box->depth;
+ blit.mask = util_format_get_mask(src->format) &
+ util_format_get_mask(dst->format);
+ blit.filter = PIPE_TEX_FILTER_NEAREST;
+
+ if (blit.mask) {
+ pipe->blit(pipe, &blit);
+ }
+}
+static void virgl_init_temp_resource_from_box(struct pipe_resource *res,
+ struct pipe_resource *orig,
+ const struct pipe_box *box,
+ unsigned level, unsigned flags)
+{
+ memset(res, 0, sizeof(*res));
+ res->format = orig->format;
+ res->width0 = box->width;
+ res->height0 = box->height;
+ res->depth0 = 1;
+ res->array_size = 1;
+ res->usage = PIPE_USAGE_STAGING;
+ res->flags = flags;
+
+ /* We must set the correct texture target and dimensions for a 3D box. */
+ if (box->depth > 1 && util_max_layer(orig, level) > 0)
+ res->target = orig->target;
+ else
+ res->target = PIPE_TEXTURE_2D;
+
+ switch (res->target) {
+ case PIPE_TEXTURE_1D_ARRAY:
+ case PIPE_TEXTURE_2D_ARRAY:
+ case PIPE_TEXTURE_CUBE_ARRAY:
+ res->array_size = box->depth;
+ break;
+ case PIPE_TEXTURE_3D:
+ res->depth0 = box->depth;
+ break;
+ default:
+ break;
+ }
+}
+
+static unsigned
+vrend_get_tex_image_offset(const struct virgl_texture *res,
+ unsigned level, unsigned layer)
+{
+ const struct pipe_resource *pres = &res->base.u.b;
+ const unsigned hgt = u_minify(pres->height0, level);
+ const unsigned nblocksy = util_format_get_nblocksy(pres->format, hgt);
+ unsigned offset = res->level_offset[level];
+
+ if (pres->target == PIPE_TEXTURE_CUBE ||
+ pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
+ pres->target == PIPE_TEXTURE_3D ||
+ pres->target == PIPE_TEXTURE_2D_ARRAY) {
+ offset += layer * nblocksy * res->stride[level];
+ }
+ else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
+ offset += layer * res->stride[level];
+ }
+ else {
+ assert(layer == 0);
+ }
+
+ return offset;
+}
+
+static void *virgl_texture_transfer_map(struct pipe_context *ctx,
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **transfer)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_screen *vs = virgl_screen(ctx->screen);
+ struct virgl_texture *vtex = (struct virgl_texture *)resource;
+ enum pipe_format format = resource->format;
+ struct virgl_transfer *trans;
+ void *ptr;
+ boolean readback = TRUE;
+ uint32_t offset;
+ struct virgl_hw_res *hw_res;
+ const unsigned h = u_minify(vtex->base.u.b.height0, level);
+ const unsigned nblocksy = util_format_get_nblocksy(format, h);
+ bool is_depth = util_format_has_depth(util_format_description(resource->format));
+ uint32_t l_stride;
+ bool doflushwait;
+
+ doflushwait = virgl_res_needs_flush_wait(vctx, &vtex->base, usage);
+ if (doflushwait)
+ ctx->flush(ctx, NULL, 0);
+
+ trans = util_slab_alloc(&vctx->texture_transfer_pool);
+ if (trans == NULL)
+ return NULL;
+
+ trans->base.resource = resource;
+ trans->base.level = level;
+ trans->base.usage = usage;
+ trans->base.box = *box;
+ trans->base.stride = vtex->stride[level];
+ trans->base.layer_stride = trans->base.stride * nblocksy;
+
+ if (resource->target != PIPE_TEXTURE_3D &&
+ resource->target != PIPE_TEXTURE_CUBE &&
+ resource->target != PIPE_TEXTURE_1D_ARRAY &&
+ resource->target != PIPE_TEXTURE_2D_ARRAY &&
+ resource->target != PIPE_TEXTURE_CUBE_ARRAY)
+ l_stride = 0;
+ else
+ l_stride = trans->base.layer_stride;
+
+ if (is_depth && resource->nr_samples > 1) {
+ struct pipe_resource tmp_resource;
+ virgl_init_temp_resource_from_box(&tmp_resource, resource, box,
+ level, 0);
+
+ trans->resolve_tmp = (struct virgl_resource *)ctx->screen->resource_create(ctx->screen, &tmp_resource);
+
+ virgl_copy_region_with_blit(ctx, &trans->resolve_tmp->u.b, 0, 0, 0, 0, resource, level, box);
+ ctx->flush(ctx, NULL, 0);
+ /* we want to do a resolve blit into the temporary */
+ hw_res = trans->resolve_tmp->hw_res;
+ offset = 0;
+ } else {
+ offset = vrend_get_tex_image_offset(vtex, level, box->z);
+
+ offset += box->y / util_format_get_blockheight(format) * trans->base.stride +
+ box->x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
+ hw_res = vtex->base.hw_res;
+ trans->resolve_tmp = NULL;
+ }
+
+ readback = virgl_res_needs_readback(vctx, &vtex->base, usage);
+ if (readback)
+ vs->vws->transfer_get(vs->vws, hw_res, box, trans->base.stride, l_stride, offset, level);
+
+ if (doflushwait || readback)
+ vs->vws->resource_wait(vs->vws, vtex->base.hw_res);
+
+ ptr = vs->vws->resource_map(vs->vws, hw_res);
+ if (!ptr) {
+ return NULL;
+ }
+
+ trans->offset = offset;
+ *transfer = &trans->base;
+
+ return ptr + trans->offset;
+}
+
+static void virgl_texture_transfer_unmap(struct pipe_context *ctx,
+ struct pipe_transfer *transfer)
+{
+ struct virgl_context *vctx = (struct virgl_context *)ctx;
+ struct virgl_transfer *trans = (struct virgl_transfer *)transfer;
+ struct virgl_texture *vtex = (struct virgl_texture *)transfer->resource;
+ uint32_t l_stride;
+
+ if (transfer->resource->target != PIPE_TEXTURE_3D &&
+ transfer->resource->target != PIPE_TEXTURE_CUBE &&
+ transfer->resource->target != PIPE_TEXTURE_1D_ARRAY &&
+ transfer->resource->target != PIPE_TEXTURE_2D_ARRAY &&
+ transfer->resource->target != PIPE_TEXTURE_CUBE_ARRAY)
+ l_stride = 0;
+ else
+ l_stride = trans->base.layer_stride;
+
+ if (trans->base.usage & PIPE_TRANSFER_WRITE) {
+ if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
+ struct virgl_screen *vs = virgl_screen(ctx->screen);
+ vtex->base.clean = FALSE;
+ vctx->num_transfers++;
+ vs->vws->transfer_put(vs->vws, vtex->base.hw_res,
+ &transfer->box, trans->base.stride, l_stride, trans->offset, transfer->level);
+
+ }
+ }
+
+ if (trans->resolve_tmp)
+ pipe_resource_reference((struct pipe_resource **)&trans->resolve_tmp, NULL);
+
+ util_slab_free(&vctx->texture_transfer_pool, trans);
+}
+
+
+static boolean
+vrend_resource_layout(struct virgl_texture *res,
+ uint32_t *total_size)
+{
+ struct pipe_resource *pt = &res->base.u.b;
+ unsigned level;
+ unsigned width = pt->width0;
+ unsigned height = pt->height0;
+ unsigned depth = pt->depth0;
+ unsigned buffer_size = 0;
+
+ for (level = 0; level <= pt->last_level; level++) {
+ unsigned slices;
+
+ if (pt->target == PIPE_TEXTURE_CUBE)
+ slices = 6;
+ else if (pt->target == PIPE_TEXTURE_3D)
+ slices = depth;
+ else
+ slices = pt->array_size;
+
+ res->stride[level] = util_format_get_stride(pt->format, width);
+ res->level_offset[level] = buffer_size;
+
+ buffer_size += (util_format_get_nblocksy(pt->format, height) *
+ slices * res->stride[level]);
+
+ width = u_minify(width, 1);
+ height = u_minify(height, 1);
+ depth = u_minify(depth, 1);
+ }
+
+ if (pt->nr_samples <= 1)
+ *total_size = buffer_size;
+ else /* don't create guest backing store for MSAA */
+ *total_size = 0;
+ return TRUE;
+}
+
+static boolean virgl_texture_get_handle(struct pipe_screen *screen,
+ struct pipe_resource *ptex,
+ struct winsys_handle *whandle)
+{
+ struct virgl_screen *vs = virgl_screen(screen);
+ struct virgl_texture *vtex = (struct virgl_texture *)ptex;
+
+ return vs->vws->resource_get_handle(vs->vws, vtex->base.hw_res, vtex->stride[0], whandle);
+}
+
+static void virgl_texture_destroy(struct pipe_screen *screen,
+ struct pipe_resource *res)
+{
+ struct virgl_screen *vs = virgl_screen(screen);
+ struct virgl_texture *vtex = (struct virgl_texture *)res;
+ vs->vws->resource_unref(vs->vws, vtex->base.hw_res);
+ FREE(vtex);
+}
+
+static const struct u_resource_vtbl virgl_texture_vtbl =
+{
+ virgl_texture_get_handle, /* get_handle */
+ virgl_texture_destroy, /* resource_destroy */
+ virgl_texture_transfer_map, /* transfer_map */
+ NULL, /* transfer_flush_region */
+ virgl_texture_transfer_unmap, /* transfer_unmap */
+ NULL /* transfer_inline_write */
+};
+
+struct pipe_resource *
+virgl_texture_from_handle(struct virgl_screen *vs,
+ const struct pipe_resource *template,
+ struct winsys_handle *whandle)
+{
+ struct virgl_texture *tex;
+ uint32_t size;
+
+ tex = CALLOC_STRUCT(virgl_texture);
+ tex->base.u.b = *template;
+ tex->base.u.b.screen = &vs->base;
+ pipe_reference_init(&tex->base.u.b.reference, 1);
+ tex->base.u.vtbl = &virgl_texture_vtbl;
+ vrend_resource_layout(tex, &size);
+
+ tex->base.hw_res = vs->vws->resource_create_from_handle(vs->vws, whandle);
+ return &tex->base.u.b;
+}
+
+struct pipe_resource *virgl_texture_create(struct virgl_screen *vs,
+ const struct pipe_resource *template)
+{
+ struct virgl_texture *tex;
+ uint32_t size;
+ unsigned vbind;
+
+ tex = CALLOC_STRUCT(virgl_texture);
+ tex->base.clean = TRUE;
+ tex->base.u.b = *template;
+ tex->base.u.b.screen = &vs->base;
+ pipe_reference_init(&tex->base.u.b.reference, 1);
+ tex->base.u.vtbl = &virgl_texture_vtbl;
+ vrend_resource_layout(tex, &size);
+
+ vbind = pipe_to_virgl_bind(template->bind);
+ tex->base.hw_res = vs->vws->resource_create(vs->vws, template->target, template->format, vbind, template->width0, template->height0, template->depth0, template->array_size, template->last_level, template->nr_samples, size);
+ if (!tex->base.hw_res) {
+ FREE(tex);
+ return NULL;
+ }
+ return &tex->base.u.b;
+}
diff --git a/src/gallium/drivers/virgl/virgl_tgsi.c b/src/gallium/drivers/virgl/virgl_tgsi.c
new file mode 100644
index 00000000000..641b0b3e3b5
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_tgsi.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* the virgl hw tgsi vs what the current gallium want will diverge over time.
+ so add a transform stage to remove things we don't want to send unless
+ the receiver supports it.
+*/
+#include "tgsi/tgsi_transform.h"
+#include "virgl_context.h"
+struct virgl_transform_context {
+ struct tgsi_transform_context base;
+};
+
+/* for now just strip out the new properties the remote doesn't understand
+ yet */
+static void
+virgl_tgsi_transform_property(struct tgsi_transform_context *ctx,
+ struct tgsi_full_property *prop)
+{
+ switch (prop->Property.PropertyName) {
+ case TGSI_PROPERTY_NUM_CLIPDIST_ENABLED:
+ case TGSI_PROPERTY_NUM_CULLDIST_ENABLED:
+ break;
+ default:
+ ctx->emit_property(ctx, prop);
+ break;
+ }
+}
+
+struct tgsi_token *virgl_tgsi_transform(const struct tgsi_token *tokens_in)
+{
+
+ struct virgl_transform_context transform;
+ const uint newLen = tgsi_num_tokens(tokens_in);
+ struct tgsi_token *new_tokens;
+
+ new_tokens = tgsi_alloc_tokens(newLen);
+ if (!new_tokens)
+ return NULL;
+
+ memset(&transform, 0, sizeof(transform));
+ transform.base.transform_property = virgl_tgsi_transform_property;
+ tgsi_transform_shader(tokens_in, new_tokens, newLen, &transform.base);
+
+ return new_tokens;
+}
diff --git a/src/gallium/drivers/virgl/virgl_winsys.h b/src/gallium/drivers/virgl/virgl_winsys.h
new file mode 100644
index 00000000000..76d401b5c12
--- /dev/null
+++ b/src/gallium/drivers/virgl/virgl_winsys.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef VIRGL_WINSYS_H
+#define VIRGL_WINSYS_H
+
+#include "pipe/p_compiler.h"
+
+struct winsys_handle;
+struct virgl_hw_res;
+
+#define VIRGL_MAX_CMDBUF_DWORDS (16*1024)
+
+struct virgl_drm_caps {
+ union virgl_caps caps;
+};
+
+struct virgl_cmd_buf {
+ unsigned cdw;
+ uint32_t *buf;
+};
+
+struct virgl_winsys {
+ unsigned pci_id;
+
+ void (*destroy)(struct virgl_winsys *vws);
+
+ int (*transfer_put)(struct virgl_winsys *vws,
+ struct virgl_hw_res *res,
+ const struct pipe_box *box,
+ uint32_t stride, uint32_t layer_stride,
+ uint32_t buf_offset, uint32_t level);
+
+ int (*transfer_get)(struct virgl_winsys *vws,
+ struct virgl_hw_res *res,
+ const struct pipe_box *box,
+ uint32_t stride, uint32_t layer_stride,
+ uint32_t buf_offset, uint32_t level);
+
+ struct virgl_hw_res *(*resource_create)(struct virgl_winsys *vws,
+ enum pipe_texture_target target,
+ uint32_t format, uint32_t bind,
+ uint32_t width, uint32_t height,
+ uint32_t depth, uint32_t array_size,
+ uint32_t last_level, uint32_t nr_samples,
+ uint32_t size);
+
+ void (*resource_unref)(struct virgl_winsys *vws, struct virgl_hw_res *res);
+
+ void *(*resource_map)(struct virgl_winsys *vws, struct virgl_hw_res *res);
+ void (*resource_wait)(struct virgl_winsys *vws, struct virgl_hw_res *res);
+
+ struct virgl_hw_res *(*resource_create_from_handle)(struct virgl_winsys *vws,
+ struct winsys_handle *whandle);
+ boolean (*resource_get_handle)(struct virgl_winsys *vws,
+ struct virgl_hw_res *res,
+ uint32_t stride,
+ struct winsys_handle *whandle);
+
+ struct virgl_cmd_buf *(*cmd_buf_create)(struct virgl_winsys *ws);
+ void (*cmd_buf_destroy)(struct virgl_cmd_buf *buf);
+
+ void (*emit_res)(struct virgl_winsys *vws, struct virgl_cmd_buf *buf, struct virgl_hw_res *res, boolean write_buffer);
+ int (*submit_cmd)(struct virgl_winsys *vws, struct virgl_cmd_buf *buf);
+
+ boolean (*res_is_referenced)(struct virgl_winsys *vws,
+ struct virgl_cmd_buf *buf,
+ struct virgl_hw_res *res);
+
+ int (*get_caps)(struct virgl_winsys *vws, struct virgl_drm_caps *caps);
+
+ /* fence */
+ struct pipe_fence_handle *(*cs_create_fence)(struct virgl_winsys *vws);
+ bool (*fence_wait)(struct virgl_winsys *vws,
+ struct pipe_fence_handle *fence,
+ uint64_t timeout);
+
+ void (*fence_reference)(struct virgl_winsys *vws,
+ struct pipe_fence_handle **dst,
+ struct pipe_fence_handle *src);
+
+ /* for sw paths */
+ void (*flush_frontbuffer)(struct virgl_winsys *vws,
+ struct virgl_hw_res *res,
+ unsigned level, unsigned layer,
+ void *winsys_drawable_handle,
+ struct pipe_box *sub_box);
+};
+
+
+#endif