summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers
diff options
context:
space:
mode:
authorThierry Reding <[email protected]>2014-05-28 00:36:48 +0200
committerThierry Reding <[email protected]>2018-03-09 11:48:22 +0100
commit1755f608f5201e0a23f00cc3ea1b01edd07eb6ef (patch)
tree895ac31c09458ff34fa74690b18399ff6607b180 /src/gallium/drivers
parent2052dbdae363f4fd184842733ff9c96bd6e7f08c (diff)
tegra: Initial support
Tegra K1 and later use a GPU that can be driven by the Nouveau driver. But the GPU is a pure render node and has no display engine, hence the scanout needs to happen on the Tegra display hardware. The GPU and the display engine each have a separate DRM device node exposed by the kernel. To make the setup appear as a single device, this driver instantiates a Nouveau screen with each instance of a Tegra screen and forwards GPU requests to the Nouveau screen. For purposes of scanout it will import buffers created on the GPU into the display driver. Handles that userspace requests are those of the display driver so that they can be used to create framebuffers. This has been tested with some GBM test programs, as well as kmscube and weston. All of those run without modifications, but I'm sure there is a lot that can be improved. Some fixes contributed by Hector Martin <[email protected]>. Changes in v2: - duplicate file descriptor in winsys to avoid potential issues - require nouveau when building the tegra driver - check for nouveau driver name on render node - remove unneeded dependency on libdrm_tegra - remove zombie references to libudev - add missing headers to C_SOURCES variable - drop unneeded tegra/ prefix for includes - open device files with O_CLOEXEC - update copyrights Changes in v3: - properly unwrap resources in ->resource_copy_region() - support vertex buffers passed by user pointer - allocate custom stream and const uploader - silence error message on pre-Tegra124 - support X without explicit PRIME Changes in v4: - ship Meson build files in distribution tarball - drop duplicate driver_tegra dependency Reviewed-by: Emil Velikov <[email protected]> Acked-by: Emil Velikov <[email protected]> Tested-by: Andre Heider <[email protected]> Reviewed-by: Dmitry Osipenko <[email protected]> Reviewed-by: Dylan Baker <[email protected]> Signed-off-by: Thierry Reding <[email protected]>
Diffstat (limited to 'src/gallium/drivers')
-rw-r--r--src/gallium/drivers/tegra/Automake.inc11
-rw-r--r--src/gallium/drivers/tegra/Makefile.am14
-rw-r--r--src/gallium/drivers/tegra/Makefile.sources6
-rw-r--r--src/gallium/drivers/tegra/meson.build41
-rw-r--r--src/gallium/drivers/tegra/tegra_context.c1384
-rw-r--r--src/gallium/drivers/tegra/tegra_context.h81
-rw-r--r--src/gallium/drivers/tegra/tegra_resource.h76
-rw-r--r--src/gallium/drivers/tegra/tegra_screen.c688
-rw-r--r--src/gallium/drivers/tegra/tegra_screen.h45
9 files changed, 2346 insertions, 0 deletions
diff --git a/src/gallium/drivers/tegra/Automake.inc b/src/gallium/drivers/tegra/Automake.inc
new file mode 100644
index 00000000000..f6528191624
--- /dev/null
+++ b/src/gallium/drivers/tegra/Automake.inc
@@ -0,0 +1,11 @@
+if HAVE_GALLIUM_TEGRA
+
+TARGET_DRIVERS += tegra
+TARGET_CPPFLAGS += -DGALLIUM_TEGRA
+TARGET_LIB_DEPS += \
+ $(top_builddir)/src/gallium/winsys/tegra/drm/libtegradrm.la \
+ $(top_builddir)/src/gallium/drivers/tegra/libtegra.la \
+ $(LIBDRM_LIBS) \
+ $(TEGRA_LIBS)
+
+endif
diff --git a/src/gallium/drivers/tegra/Makefile.am b/src/gallium/drivers/tegra/Makefile.am
new file mode 100644
index 00000000000..1347d2548dc
--- /dev/null
+++ b/src/gallium/drivers/tegra/Makefile.am
@@ -0,0 +1,14 @@
+include Makefile.sources
+include $(top_srcdir)/src/gallium/Automake.inc
+
+AM_CFLAGS = \
+ -I$(top_srcdir)/include/drm-uapi \
+ $(GALLIUM_DRIVER_CFLAGS)
+
+noinst_LTLIBRARIES = libtegra.la
+
+libtegra_la_SOURCES = \
+ $(C_SOURCES)
+
+EXTRA_DIST = \
+ meson.build
diff --git a/src/gallium/drivers/tegra/Makefile.sources b/src/gallium/drivers/tegra/Makefile.sources
new file mode 100644
index 00000000000..af4ff838c7c
--- /dev/null
+++ b/src/gallium/drivers/tegra/Makefile.sources
@@ -0,0 +1,6 @@
+C_SOURCES := \
+ tegra_context.c \
+ tegra_context.h \
+ tegra_resource.h \
+ tegra_screen.c \
+ tegra_screen.h
diff --git a/src/gallium/drivers/tegra/meson.build b/src/gallium/drivers/tegra/meson.build
new file mode 100644
index 00000000000..79ccd143f29
--- /dev/null
+++ b/src/gallium/drivers/tegra/meson.build
@@ -0,0 +1,41 @@
+# Copyright © 2018 NVIDIA CORPORATION
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+files_tegra = files(
+ 'tegra_context.c',
+ 'tegra_context.h',
+ 'tegra_resource.h',
+ 'tegra_screen.c',
+)
+
+libtegra = static_library(
+ 'tegra',
+ files_tegra,
+ c_args : [c_vis_args],
+ include_directories : [
+ inc_include, inc_src, inc_gallium, inc_gallium_aux, inc_gallium_drivers,
+ inc_gallium_winsys, inc_drm_uapi
+ ],
+)
+
+driver_tegra = declare_dependency(
+ compile_args : '-DGALLIUM_TEGRA',
+ link_with : [libtegra, libtegradrm],
+)
diff --git a/src/gallium/drivers/tegra/tegra_context.c b/src/gallium/drivers/tegra/tegra_context.c
new file mode 100644
index 00000000000..bbc03628336
--- /dev/null
+++ b/src/gallium/drivers/tegra/tegra_context.c
@@ -0,0 +1,1384 @@
+/*
+ * Copyright © 2014-2018 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <inttypes.h>
+#include <stdlib.h>
+
+#include "util/u_debug.h"
+#include "util/u_inlines.h"
+#include "util/u_upload_mgr.h"
+
+#include "tegra_context.h"
+#include "tegra_resource.h"
+#include "tegra_screen.h"
+
+static void
+tegra_destroy(struct pipe_context *pcontext)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ if (context->base.stream_uploader)
+ u_upload_destroy(context->base.stream_uploader);
+
+ context->gpu->destroy(context->gpu);
+ free(context);
+}
+
+static void
+tegra_draw_vbo(struct pipe_context *pcontext,
+ const struct pipe_draw_info *pinfo)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+ struct pipe_draw_indirect_info indirect;
+ struct pipe_draw_info info;
+
+ if (pinfo && (pinfo->indirect || pinfo->index_size)) {
+ memcpy(&info, pinfo, sizeof(info));
+
+ if (pinfo->indirect) {
+ memcpy(&indirect, pinfo->indirect, sizeof(indirect));
+ indirect.buffer = tegra_resource_unwrap(info.indirect->buffer);
+ info.indirect = &indirect;
+ }
+
+ if (pinfo->index_size && !pinfo->has_user_indices)
+ info.index.resource = tegra_resource_unwrap(info.index.resource);
+
+ pinfo = &info;
+ }
+
+ context->gpu->draw_vbo(context->gpu, pinfo);
+}
+
+static void
+tegra_render_condition(struct pipe_context *pcontext,
+ struct pipe_query *query,
+ boolean condition,
+ unsigned int mode)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->render_condition(context->gpu, query, condition, mode);
+}
+
+static struct pipe_query *
+tegra_create_query(struct pipe_context *pcontext, unsigned int query_type,
+ unsigned int index)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_query(context->gpu, query_type, index);
+}
+
+static struct pipe_query *
+tegra_create_batch_query(struct pipe_context *pcontext,
+ unsigned int num_queries,
+ unsigned int *queries)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_batch_query(context->gpu, num_queries,
+ queries);
+}
+
+static void
+tegra_destroy_query(struct pipe_context *pcontext, struct pipe_query *query)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->destroy_query(context->gpu, query);
+}
+
+static boolean
+tegra_begin_query(struct pipe_context *pcontext, struct pipe_query *query)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->begin_query(context->gpu, query);
+}
+
+static bool
+tegra_end_query(struct pipe_context *pcontext, struct pipe_query *query)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->end_query(context->gpu, query);
+}
+
+static boolean
+tegra_get_query_result(struct pipe_context *pcontext,
+ struct pipe_query *query,
+ boolean wait,
+ union pipe_query_result *result)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->get_query_result(context->gpu, query, wait,
+ result);
+}
+
+static void
+tegra_get_query_result_resource(struct pipe_context *pcontext,
+ struct pipe_query *query,
+ boolean wait,
+ enum pipe_query_value_type result_type,
+ int index,
+ struct pipe_resource *resource,
+ unsigned int offset)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->get_query_result_resource(context->gpu, query, wait,
+ result_type, index, resource,
+ offset);
+}
+
+static void
+tegra_set_active_query_state(struct pipe_context *pcontext, boolean enable)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_active_query_state(context->gpu, enable);
+}
+
+static void *
+tegra_create_blend_state(struct pipe_context *pcontext,
+ const struct pipe_blend_state *cso)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_blend_state(context->gpu, cso);
+}
+
+static void
+tegra_bind_blend_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->bind_blend_state(context->gpu, so);
+}
+
+static void
+tegra_delete_blend_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->delete_blend_state(context->gpu, so);
+}
+
+static void *
+tegra_create_sampler_state(struct pipe_context *pcontext,
+ const struct pipe_sampler_state *cso)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_sampler_state(context->gpu, cso);
+}
+
+static void
+tegra_bind_sampler_states(struct pipe_context *pcontext, unsigned shader,
+ unsigned start_slot, unsigned num_samplers,
+ void **samplers)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->bind_sampler_states(context->gpu, shader, start_slot,
+ num_samplers, samplers);
+}
+
+static void
+tegra_delete_sampler_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->delete_sampler_state(context->gpu, so);
+}
+
+static void *
+tegra_create_rasterizer_state(struct pipe_context *pcontext,
+ const struct pipe_rasterizer_state *cso)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_rasterizer_state(context->gpu, cso);
+}
+
+static void
+tegra_bind_rasterizer_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->bind_rasterizer_state(context->gpu, so);
+}
+
+static void
+tegra_delete_rasterizer_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->delete_rasterizer_state(context->gpu, so);
+}
+
+static void *
+tegra_create_depth_stencil_alpha_state(struct pipe_context *pcontext,
+ const struct pipe_depth_stencil_alpha_state *cso)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_depth_stencil_alpha_state(context->gpu, cso);
+}
+
+static void
+tegra_bind_depth_stencil_alpha_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->bind_depth_stencil_alpha_state(context->gpu, so);
+}
+
+static void
+tegra_delete_depth_stencil_alpha_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->delete_depth_stencil_alpha_state(context->gpu, so);
+}
+
+static void *
+tegra_create_fs_state(struct pipe_context *pcontext,
+ const struct pipe_shader_state *cso)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_fs_state(context->gpu, cso);
+}
+
+static void
+tegra_bind_fs_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->bind_fs_state(context->gpu, so);
+}
+
+static void
+tegra_delete_fs_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->delete_fs_state(context->gpu, so);
+}
+
+static void *
+tegra_create_vs_state(struct pipe_context *pcontext,
+ const struct pipe_shader_state *cso)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_vs_state(context->gpu, cso);
+}
+
+static void
+tegra_bind_vs_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->bind_vs_state(context->gpu, so);
+}
+
+static void
+tegra_delete_vs_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->delete_vs_state(context->gpu, so);
+}
+
+static void *
+tegra_create_gs_state(struct pipe_context *pcontext,
+ const struct pipe_shader_state *cso)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_gs_state(context->gpu, cso);
+}
+
+static void
+tegra_bind_gs_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->bind_gs_state(context->gpu, so);
+}
+
+static void
+tegra_delete_gs_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->delete_gs_state(context->gpu, so);
+}
+
+static void *
+tegra_create_tcs_state(struct pipe_context *pcontext,
+ const struct pipe_shader_state *cso)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_tcs_state(context->gpu, cso);
+}
+
+static void
+tegra_bind_tcs_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->bind_tcs_state(context->gpu, so);
+}
+
+static void
+tegra_delete_tcs_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->delete_tcs_state(context->gpu, so);
+}
+
+static void *
+tegra_create_tes_state(struct pipe_context *pcontext,
+ const struct pipe_shader_state *cso)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_tes_state(context->gpu, cso);
+}
+
+static void
+tegra_bind_tes_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->bind_tes_state(context->gpu, so);
+}
+
+static void
+tegra_delete_tes_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->delete_tes_state(context->gpu, so);
+}
+
+static void *
+tegra_create_vertex_elements_state(struct pipe_context *pcontext,
+ unsigned num_elements,
+ const struct pipe_vertex_element *elements)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_vertex_elements_state(context->gpu,
+ num_elements,
+ elements);
+}
+
+static void
+tegra_bind_vertex_elements_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->bind_vertex_elements_state(context->gpu, so);
+}
+
+static void
+tegra_delete_vertex_elements_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->delete_vertex_elements_state(context->gpu, so);
+}
+
+static void
+tegra_set_blend_color(struct pipe_context *pcontext,
+ const struct pipe_blend_color *color)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_blend_color(context->gpu, color);
+}
+
+static void
+tegra_set_stencil_ref(struct pipe_context *pcontext,
+ const struct pipe_stencil_ref *ref)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_stencil_ref(context->gpu, ref);
+}
+
+static void
+tegra_set_sample_mask(struct pipe_context *pcontext, unsigned int mask)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_sample_mask(context->gpu, mask);
+}
+
+static void
+tegra_set_min_samples(struct pipe_context *pcontext, unsigned int samples)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_min_samples(context->gpu, samples);
+}
+
+static void
+tegra_set_clip_state(struct pipe_context *pcontext,
+ const struct pipe_clip_state *state)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_clip_state(context->gpu, state);
+}
+
+static void
+tegra_set_constant_buffer(struct pipe_context *pcontext, unsigned int shader,
+ unsigned int index,
+ const struct pipe_constant_buffer *buf)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+ struct pipe_constant_buffer buffer;
+
+ if (buf && buf->buffer) {
+ memcpy(&buffer, buf, sizeof(buffer));
+ buffer.buffer = tegra_resource_unwrap(buffer.buffer);
+ buf = &buffer;
+ }
+
+ context->gpu->set_constant_buffer(context->gpu, shader, index, buf);
+}
+
+static void
+tegra_set_framebuffer_state(struct pipe_context *pcontext,
+ const struct pipe_framebuffer_state *fb)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+ struct pipe_framebuffer_state state;
+ unsigned i;
+
+ if (fb) {
+ memcpy(&state, fb, sizeof(state));
+
+ for (i = 0; i < fb->nr_cbufs; i++)
+ state.cbufs[i] = tegra_surface_unwrap(fb->cbufs[i]);
+
+ while (i < PIPE_MAX_COLOR_BUFS)
+ state.cbufs[i++] = NULL;
+
+ state.zsbuf = tegra_surface_unwrap(fb->zsbuf);
+
+ fb = &state;
+ }
+
+ context->gpu->set_framebuffer_state(context->gpu, fb);
+}
+
+static void
+tegra_set_polygon_stipple(struct pipe_context *pcontext,
+ const struct pipe_poly_stipple *stipple)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_polygon_stipple(context->gpu, stipple);
+}
+
+static void
+tegra_set_scissor_states(struct pipe_context *pcontext, unsigned start_slot,
+ unsigned num_scissors,
+ const struct pipe_scissor_state *scissors)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_scissor_states(context->gpu, start_slot, num_scissors,
+ scissors);
+}
+
+static void
+tegra_set_window_rectangles(struct pipe_context *pcontext, boolean include,
+ unsigned int num_rectangles,
+ const struct pipe_scissor_state *rectangles)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_window_rectangles(context->gpu, include, num_rectangles,
+ rectangles);
+}
+
+static void
+tegra_set_viewport_states(struct pipe_context *pcontext, unsigned start_slot,
+ unsigned num_viewports,
+ const struct pipe_viewport_state *viewports)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_viewport_states(context->gpu, start_slot, num_viewports,
+ viewports);
+}
+
+static void
+tegra_set_sampler_views(struct pipe_context *pcontext, unsigned shader,
+ unsigned start_slot, unsigned num_views,
+ struct pipe_sampler_view **pviews)
+{
+ struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
+ struct tegra_context *context = to_tegra_context(pcontext);
+ unsigned i;
+
+ for (i = 0; i < num_views; i++)
+ views[i] = tegra_sampler_view_unwrap(pviews[i]);
+
+ context->gpu->set_sampler_views(context->gpu, shader, start_slot,
+ num_views, views);
+}
+
+static void
+tegra_set_tess_state(struct pipe_context *pcontext,
+ const float default_outer_level[4],
+ const float default_inner_level[2])
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_tess_state(context->gpu, default_outer_level,
+ default_inner_level);
+}
+
+static void
+tegra_set_debug_callback(struct pipe_context *pcontext,
+ const struct pipe_debug_callback *callback)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_debug_callback(context->gpu, callback);
+}
+
+static void
+tegra_set_shader_buffers(struct pipe_context *pcontext, unsigned int shader,
+ unsigned start, unsigned count,
+ const struct pipe_shader_buffer *buffers)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_shader_buffers(context->gpu, shader, start, count,
+ buffers);
+}
+
+static void
+tegra_set_shader_images(struct pipe_context *pcontext, unsigned int shader,
+ unsigned start, unsigned count,
+ const struct pipe_image_view *images)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_shader_images(context->gpu, shader, start, count,
+ images);
+}
+
+static void
+tegra_set_vertex_buffers(struct pipe_context *pcontext, unsigned start_slot,
+ unsigned num_buffers,
+ const struct pipe_vertex_buffer *buffers)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+ struct pipe_vertex_buffer buf[PIPE_MAX_SHADER_INPUTS];
+ unsigned i;
+
+ if (num_buffers && buffers) {
+ memcpy(buf, buffers, num_buffers * sizeof(struct pipe_vertex_buffer));
+
+ for (i = 0; i < num_buffers; i++) {
+ if (!buf[i].is_user_buffer)
+ buf[i].buffer.resource = tegra_resource_unwrap(buf[i].buffer.resource);
+ }
+
+ buffers = buf;
+ }
+
+ context->gpu->set_vertex_buffers(context->gpu, start_slot, num_buffers,
+ buffers);
+}
+
+static struct pipe_stream_output_target *
+tegra_create_stream_output_target(struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ unsigned buffer_offset,
+ unsigned buffer_size)
+{
+ struct tegra_resource *resource = to_tegra_resource(presource);
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_stream_output_target(context->gpu,
+ resource->gpu,
+ buffer_offset,
+ buffer_size);
+}
+
+static void
+tegra_stream_output_target_destroy(struct pipe_context *pcontext,
+ struct pipe_stream_output_target *target)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->stream_output_target_destroy(context->gpu, target);
+}
+
+static void
+tegra_set_stream_output_targets(struct pipe_context *pcontext,
+ unsigned num_targets,
+ struct pipe_stream_output_target **targets,
+ const unsigned *offsets)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_stream_output_targets(context->gpu, num_targets,
+ targets, offsets);
+}
+
+static void
+tegra_resource_copy_region(struct pipe_context *pcontext,
+ struct pipe_resource *pdst,
+ unsigned int dst_level,
+ unsigned int dstx,
+ unsigned int dsty,
+ unsigned int dstz,
+ struct pipe_resource *psrc,
+ unsigned int src_level,
+ const struct pipe_box *src_box)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+ struct tegra_resource *dst = to_tegra_resource(pdst);
+ struct tegra_resource *src = to_tegra_resource(psrc);
+
+ context->gpu->resource_copy_region(context->gpu, dst->gpu, dst_level, dstx,
+ dsty, dstz, src->gpu, src_level,
+ src_box);
+}
+
+static void
+tegra_blit(struct pipe_context *pcontext, const struct pipe_blit_info *pinfo)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+ struct pipe_blit_info info;
+
+ if (pinfo) {
+ memcpy(&info, pinfo, sizeof(info));
+ info.dst.resource = tegra_resource_unwrap(info.dst.resource);
+ info.src.resource = tegra_resource_unwrap(info.src.resource);
+ pinfo = &info;
+ }
+
+ context->gpu->blit(context->gpu, pinfo);
+}
+
+static void
+tegra_clear(struct pipe_context *pcontext, unsigned buffers,
+ const union pipe_color_union *color, double depth,
+ unsigned stencil)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->clear(context->gpu, buffers, color, depth, stencil);
+}
+
+static void
+tegra_clear_render_target(struct pipe_context *pcontext,
+ struct pipe_surface *pdst,
+ const union pipe_color_union *color,
+ unsigned int dstx,
+ unsigned int dsty,
+ unsigned int width,
+ unsigned int height,
+ bool render_condition)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+ struct tegra_surface *dst = to_tegra_surface(pdst);
+
+ context->gpu->clear_render_target(context->gpu, dst->gpu, color, dstx,
+ dsty, width, height, render_condition);
+}
+
+static void
+tegra_clear_depth_stencil(struct pipe_context *pcontext,
+ struct pipe_surface *pdst,
+ unsigned int flags,
+ double depth,
+ unsigned int stencil,
+ unsigned int dstx,
+ unsigned int dsty,
+ unsigned int width,
+ unsigned int height,
+ bool render_condition)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+ struct tegra_surface *dst = to_tegra_surface(pdst);
+
+ context->gpu->clear_depth_stencil(context->gpu, dst->gpu, flags, depth,
+ stencil, dstx, dsty, width, height,
+ render_condition);
+}
+
+static void
+tegra_clear_texture(struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ unsigned int level,
+ const struct pipe_box *box,
+ const void *data)
+{
+ struct tegra_resource *resource = to_tegra_resource(presource);
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->clear_texture(context->gpu, resource->gpu, level, box, data);
+}
+
+static void
+tegra_clear_buffer(struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ unsigned int offset,
+ unsigned int size,
+ const void *value,
+ int value_size)
+{
+ struct tegra_resource *resource = to_tegra_resource(presource);
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->clear_buffer(context->gpu, resource->gpu, offset, size,
+ value, value_size);
+}
+
+static void
+tegra_flush(struct pipe_context *pcontext, struct pipe_fence_handle **fence,
+ unsigned flags)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->flush(context->gpu, fence, flags);
+}
+
+static void
+tegra_create_fence_fd(struct pipe_context *pcontext,
+ struct pipe_fence_handle **fence,
+ int fd, enum pipe_fd_type type)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
+ context->gpu->create_fence_fd(context->gpu, fence, fd, type);
+}
+
+static void
+tegra_fence_server_sync(struct pipe_context *pcontext,
+ struct pipe_fence_handle *fence)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->fence_server_sync(context->gpu, fence);
+}
+
+static struct pipe_sampler_view *
+tegra_create_sampler_view(struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ const struct pipe_sampler_view *template)
+{
+ struct tegra_resource *resource = to_tegra_resource(presource);
+ struct tegra_context *context = to_tegra_context(pcontext);
+ struct tegra_sampler_view *view;
+
+ view = calloc(1, sizeof(*view));
+ if (!view)
+ return NULL;
+
+ view->gpu = context->gpu->create_sampler_view(context->gpu, resource->gpu,
+ template);
+ memcpy(&view->base, view->gpu, sizeof(*view->gpu));
+ /* overwrite to prevent reference from being released */
+ view->base.texture = NULL;
+
+ pipe_reference_init(&view->base.reference, 1);
+ pipe_resource_reference(&view->base.texture, presource);
+ view->base.context = pcontext;
+
+ return &view->base;
+}
+
+static void
+tegra_sampler_view_destroy(struct pipe_context *pcontext,
+ struct pipe_sampler_view *pview)
+{
+ struct tegra_sampler_view *view = to_tegra_sampler_view(pview);
+
+ pipe_resource_reference(&view->base.texture, NULL);
+ pipe_sampler_view_reference(&view->gpu, NULL);
+ free(view);
+}
+
+static struct pipe_surface *
+tegra_create_surface(struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ const struct pipe_surface *template)
+{
+ struct tegra_resource *resource = to_tegra_resource(presource);
+ struct tegra_context *context = to_tegra_context(pcontext);
+ struct tegra_surface *surface;
+
+ surface = calloc(1, sizeof(*surface));
+ if (!surface)
+ return NULL;
+
+ surface->gpu = context->gpu->create_surface(context->gpu, resource->gpu,
+ template);
+ if (!surface->gpu) {
+ free(surface);
+ return NULL;
+ }
+
+ memcpy(&surface->base, surface->gpu, sizeof(*surface->gpu));
+ /* overwrite to prevent reference from being released */
+ surface->base.texture = NULL;
+
+ pipe_reference_init(&surface->base.reference, 1);
+ pipe_resource_reference(&surface->base.texture, presource);
+ surface->base.context = &context->base;
+
+ return &surface->base;
+}
+
+static void
+tegra_surface_destroy(struct pipe_context *pcontext,
+ struct pipe_surface *psurface)
+{
+ struct tegra_surface *surface = to_tegra_surface(psurface);
+
+ pipe_resource_reference(&surface->base.texture, NULL);
+ pipe_surface_reference(&surface->gpu, NULL);
+ free(surface);
+}
+
+static void *
+tegra_transfer_map(struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ unsigned level, unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer)
+{
+ struct tegra_resource *resource = to_tegra_resource(presource);
+ struct tegra_context *context = to_tegra_context(pcontext);
+ struct tegra_transfer *transfer;
+
+ transfer = calloc(1, sizeof(*transfer));
+ if (!transfer)
+ return NULL;
+
+ transfer->map = context->gpu->transfer_map(context->gpu, resource->gpu,
+ level, usage, box,
+ &transfer->gpu);
+ memcpy(&transfer->base, transfer->gpu, sizeof(*transfer->gpu));
+ transfer->base.resource = NULL;
+ pipe_resource_reference(&transfer->base.resource, presource);
+
+ *ptransfer = &transfer->base;
+
+ return transfer->map;
+}
+
+static void
+tegra_transfer_flush_region(struct pipe_context *pcontext,
+ struct pipe_transfer *ptransfer,
+ const struct pipe_box *box)
+{
+ struct tegra_transfer *transfer = to_tegra_transfer(ptransfer);
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->transfer_flush_region(context->gpu, transfer->gpu, box);
+}
+
+static void
+tegra_transfer_unmap(struct pipe_context *pcontext,
+ struct pipe_transfer *ptransfer)
+{
+ struct tegra_transfer *transfer = to_tegra_transfer(ptransfer);
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->transfer_unmap(context->gpu, transfer->gpu);
+ pipe_resource_reference(&transfer->base.resource, NULL);
+ free(transfer);
+}
+
+static void
+tegra_buffer_subdata(struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ unsigned usage, unsigned offset,
+ unsigned size, const void *data)
+{
+ struct tegra_resource *resource = to_tegra_resource(presource);
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->buffer_subdata(context->gpu, resource->gpu, usage, offset,
+ size, data);
+}
+
+static void
+tegra_texture_subdata(struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ const void *data,
+ unsigned stride,
+ unsigned layer_stride)
+{
+ struct tegra_resource *resource = to_tegra_resource(presource);
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->texture_subdata(context->gpu, resource->gpu, level, usage,
+ box, data, stride, layer_stride);
+}
+
+static void
+tegra_texture_barrier(struct pipe_context *pcontext, unsigned int flags)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->texture_barrier(context->gpu, flags);
+}
+
+static void
+tegra_memory_barrier(struct pipe_context *pcontext, unsigned int flags)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->memory_barrier(context->gpu, flags);
+}
+
+static struct pipe_video_codec *
+tegra_create_video_codec(struct pipe_context *pcontext,
+ const struct pipe_video_codec *template)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_video_codec(context->gpu, template);
+}
+
+static struct pipe_video_buffer *
+tegra_create_video_buffer(struct pipe_context *pcontext,
+ const struct pipe_video_buffer *template)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_video_buffer(context->gpu, template);
+}
+
+static void *
+tegra_create_compute_state(struct pipe_context *pcontext,
+ const struct pipe_compute_state *template)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_compute_state(context->gpu, template);
+}
+
+static void
+tegra_bind_compute_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->bind_compute_state(context->gpu, so);
+}
+
+static void
+tegra_delete_compute_state(struct pipe_context *pcontext, void *so)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->delete_compute_state(context->gpu, so);
+}
+
+static void
+tegra_set_compute_resources(struct pipe_context *pcontext,
+ unsigned int start, unsigned int count,
+ struct pipe_surface **resources)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ /* XXX unwrap resources */
+
+ context->gpu->set_compute_resources(context->gpu, start, count, resources);
+}
+
+static void
+tegra_set_global_binding(struct pipe_context *pcontext, unsigned int first,
+ unsigned int count, struct pipe_resource **resources,
+ uint32_t **handles)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ /* XXX unwrap resources */
+
+ context->gpu->set_global_binding(context->gpu, first, count, resources,
+ handles);
+}
+
+static void
+tegra_launch_grid(struct pipe_context *pcontext,
+ const struct pipe_grid_info *info)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ /* XXX unwrap info->indirect? */
+
+ context->gpu->launch_grid(context->gpu, info);
+}
+
+static void
+tegra_get_sample_position(struct pipe_context *pcontext, unsigned int count,
+ unsigned int index, float *value)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->get_sample_position(context->gpu, count, index, value);
+}
+
+static uint64_t
+tegra_get_timestamp(struct pipe_context *pcontext)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->get_timestamp(context->gpu);
+}
+
+static void
+tegra_flush_resource(struct pipe_context *pcontext,
+ struct pipe_resource *presource)
+{
+ struct tegra_resource *resource = to_tegra_resource(presource);
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->flush_resource(context->gpu, resource->gpu);
+}
+
+static void
+tegra_invalidate_resource(struct pipe_context *pcontext,
+ struct pipe_resource *presource)
+{
+ struct tegra_resource *resource = to_tegra_resource(presource);
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->invalidate_resource(context->gpu, resource->gpu);
+}
+
+static enum pipe_reset_status
+tegra_get_device_reset_status(struct pipe_context *pcontext)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->get_device_reset_status(context->gpu);
+}
+
+static void
+tegra_set_device_reset_callback(struct pipe_context *pcontext,
+ const struct pipe_device_reset_callback *cb)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->set_device_reset_callback(context->gpu, cb);
+}
+
+static void
+tegra_dump_debug_state(struct pipe_context *pcontext, FILE *stream,
+ unsigned int flags)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->dump_debug_state(context->gpu, stream, flags);
+}
+
+static void
+tegra_emit_string_marker(struct pipe_context *pcontext, const char *string,
+ int length)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->emit_string_marker(context->gpu, string, length);
+}
+
+static boolean
+tegra_generate_mipmap(struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ enum pipe_format format,
+ unsigned int base_level,
+ unsigned int last_level,
+ unsigned int first_layer,
+ unsigned int last_layer)
+{
+ struct tegra_resource *resource = to_tegra_resource(presource);
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->generate_mipmap(context->gpu, resource->gpu, format,
+ base_level, last_level, first_layer,
+ last_layer);
+}
+
+static uint64_t
+tegra_create_texture_handle(struct pipe_context *pcontext,
+ struct pipe_sampler_view *view,
+ const struct pipe_sampler_state *state)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_texture_handle(context->gpu, view, state);
+}
+
+static void tegra_delete_texture_handle(struct pipe_context *pcontext,
+ uint64_t handle)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->delete_texture_handle(context->gpu, handle);
+}
+
+static void tegra_make_texture_handle_resident(struct pipe_context *pcontext,
+ uint64_t handle, bool resident)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->make_texture_handle_resident(context->gpu, handle, resident);
+}
+
+static uint64_t tegra_create_image_handle(struct pipe_context *pcontext,
+ const struct pipe_image_view *image)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ return context->gpu->create_image_handle(context->gpu, image);
+}
+
+static void tegra_delete_image_handle(struct pipe_context *pcontext,
+ uint64_t handle)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->delete_image_handle(context->gpu, handle);
+}
+
+static void tegra_make_image_handle_resident(struct pipe_context *pcontext,
+ uint64_t handle, unsigned access,
+ bool resident)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+
+ context->gpu->make_image_handle_resident(context->gpu, handle, access,
+ resident);
+}
+
+struct pipe_context *
+tegra_screen_context_create(struct pipe_screen *pscreen, void *priv,
+ unsigned int flags)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+ struct tegra_context *context;
+
+ context = calloc(1, sizeof(*context));
+ if (!context)
+ return NULL;
+
+ context->gpu = screen->gpu->context_create(screen->gpu, priv, flags);
+ if (!context->gpu) {
+ debug_error("failed to create GPU context\n");
+ goto free;
+ }
+
+ context->base.screen = &screen->base;
+ context->base.priv = priv;
+
+ /*
+ * Create custom stream and const uploaders. Note that technically nouveau
+ * already creates uploaders that could be reused, but that would make the
+ * resource unwrapping rather complicate. The reason for that is that both
+ * uploaders create resources based on the context that they were created
+ * from, which means that nouveau's uploader will use the nouveau context
+ * which means that those resources must not be unwrapped. So before each
+ * resource is unwrapped, the code would need to check that it does not
+ * correspond to the uploaders' buffers.
+ *
+ * However, duplicating the uploaders here sounds worse than it is. The
+ * default implementation that nouveau uses allocates buffers lazily, and
+ * since it is never used, no buffers will every be allocated and the only
+ * memory wasted is that occupied by the nouveau uploader itself.
+ */
+ context->base.stream_uploader = u_upload_create_default(&context->base);
+ if (!context->base.stream_uploader)
+ goto destroy;
+
+ context->base.const_uploader = context->base.stream_uploader;
+
+ context->base.destroy = tegra_destroy;
+
+ context->base.draw_vbo = tegra_draw_vbo;
+
+ context->base.render_condition = tegra_render_condition;
+
+ context->base.create_query = tegra_create_query;
+ context->base.create_batch_query = tegra_create_batch_query;
+ context->base.destroy_query = tegra_destroy_query;
+ context->base.begin_query = tegra_begin_query;
+ context->base.end_query = tegra_end_query;
+ context->base.get_query_result = tegra_get_query_result;
+ context->base.get_query_result_resource = tegra_get_query_result_resource;
+ context->base.set_active_query_state = tegra_set_active_query_state;
+
+ context->base.create_blend_state = tegra_create_blend_state;
+ context->base.bind_blend_state = tegra_bind_blend_state;
+ context->base.delete_blend_state = tegra_delete_blend_state;
+
+ context->base.create_sampler_state = tegra_create_sampler_state;
+ context->base.bind_sampler_states = tegra_bind_sampler_states;
+ context->base.delete_sampler_state = tegra_delete_sampler_state;
+
+ context->base.create_rasterizer_state = tegra_create_rasterizer_state;
+ context->base.bind_rasterizer_state = tegra_bind_rasterizer_state;
+ context->base.delete_rasterizer_state = tegra_delete_rasterizer_state;
+
+ context->base.create_depth_stencil_alpha_state = tegra_create_depth_stencil_alpha_state;
+ context->base.bind_depth_stencil_alpha_state = tegra_bind_depth_stencil_alpha_state;
+ context->base.delete_depth_stencil_alpha_state = tegra_delete_depth_stencil_alpha_state;
+
+ context->base.create_fs_state = tegra_create_fs_state;
+ context->base.bind_fs_state = tegra_bind_fs_state;
+ context->base.delete_fs_state = tegra_delete_fs_state;
+
+ context->base.create_vs_state = tegra_create_vs_state;
+ context->base.bind_vs_state = tegra_bind_vs_state;
+ context->base.delete_vs_state = tegra_delete_vs_state;
+
+ context->base.create_gs_state = tegra_create_gs_state;
+ context->base.bind_gs_state = tegra_bind_gs_state;
+ context->base.delete_gs_state = tegra_delete_gs_state;
+
+ context->base.create_tcs_state = tegra_create_tcs_state;
+ context->base.bind_tcs_state = tegra_bind_tcs_state;
+ context->base.delete_tcs_state = tegra_delete_tcs_state;
+
+ context->base.create_tes_state = tegra_create_tes_state;
+ context->base.bind_tes_state = tegra_bind_tes_state;
+ context->base.delete_tes_state = tegra_delete_tes_state;
+
+ context->base.create_vertex_elements_state = tegra_create_vertex_elements_state;
+ context->base.bind_vertex_elements_state = tegra_bind_vertex_elements_state;
+ context->base.delete_vertex_elements_state = tegra_delete_vertex_elements_state;
+
+ context->base.set_blend_color = tegra_set_blend_color;
+ context->base.set_stencil_ref = tegra_set_stencil_ref;
+ context->base.set_sample_mask = tegra_set_sample_mask;
+ context->base.set_min_samples = tegra_set_min_samples;
+ context->base.set_clip_state = tegra_set_clip_state;
+
+ context->base.set_constant_buffer = tegra_set_constant_buffer;
+ context->base.set_framebuffer_state = tegra_set_framebuffer_state;
+ context->base.set_polygon_stipple = tegra_set_polygon_stipple;
+ context->base.set_scissor_states = tegra_set_scissor_states;
+ context->base.set_window_rectangles = tegra_set_window_rectangles;
+ context->base.set_viewport_states = tegra_set_viewport_states;
+ context->base.set_sampler_views = tegra_set_sampler_views;
+ context->base.set_tess_state = tegra_set_tess_state;
+
+ context->base.set_debug_callback = tegra_set_debug_callback;
+
+ context->base.set_shader_buffers = tegra_set_shader_buffers;
+ context->base.set_shader_images = tegra_set_shader_images;
+ context->base.set_vertex_buffers = tegra_set_vertex_buffers;
+
+ context->base.create_stream_output_target = tegra_create_stream_output_target;
+ context->base.stream_output_target_destroy = tegra_stream_output_target_destroy;
+ context->base.set_stream_output_targets = tegra_set_stream_output_targets;
+
+ context->base.resource_copy_region = tegra_resource_copy_region;
+ context->base.blit = tegra_blit;
+ context->base.clear = tegra_clear;
+ context->base.clear_render_target = tegra_clear_render_target;
+ context->base.clear_depth_stencil = tegra_clear_depth_stencil;
+ context->base.clear_texture = tegra_clear_texture;
+ context->base.clear_buffer = tegra_clear_buffer;
+ context->base.flush = tegra_flush;
+
+ context->base.create_fence_fd = tegra_create_fence_fd;
+ context->base.fence_server_sync = tegra_fence_server_sync;
+
+ context->base.create_sampler_view = tegra_create_sampler_view;
+ context->base.sampler_view_destroy = tegra_sampler_view_destroy;
+
+ context->base.create_surface = tegra_create_surface;
+ context->base.surface_destroy = tegra_surface_destroy;
+
+ context->base.transfer_map = tegra_transfer_map;
+ context->base.transfer_flush_region = tegra_transfer_flush_region;
+ context->base.transfer_unmap = tegra_transfer_unmap;
+ context->base.buffer_subdata = tegra_buffer_subdata;
+ context->base.texture_subdata = tegra_texture_subdata;
+
+ context->base.texture_barrier = tegra_texture_barrier;
+ context->base.memory_barrier = tegra_memory_barrier;
+
+ context->base.create_video_codec = tegra_create_video_codec;
+ context->base.create_video_buffer = tegra_create_video_buffer;
+
+ context->base.create_compute_state = tegra_create_compute_state;
+ context->base.bind_compute_state = tegra_bind_compute_state;
+ context->base.delete_compute_state = tegra_delete_compute_state;
+ context->base.set_compute_resources = tegra_set_compute_resources;
+ context->base.set_global_binding = tegra_set_global_binding;
+ context->base.launch_grid = tegra_launch_grid;
+ context->base.get_sample_position = tegra_get_sample_position;
+ context->base.get_timestamp = tegra_get_timestamp;
+
+ context->base.flush_resource = tegra_flush_resource;
+ context->base.invalidate_resource = tegra_invalidate_resource;
+
+ context->base.get_device_reset_status = tegra_get_device_reset_status;
+ context->base.set_device_reset_callback = tegra_set_device_reset_callback;
+ context->base.dump_debug_state = tegra_dump_debug_state;
+ context->base.emit_string_marker = tegra_emit_string_marker;
+
+ context->base.generate_mipmap = tegra_generate_mipmap;
+
+ context->base.create_texture_handle = tegra_create_texture_handle;
+ context->base.delete_texture_handle = tegra_delete_texture_handle;
+ context->base.make_texture_handle_resident = tegra_make_texture_handle_resident;
+ context->base.create_image_handle = tegra_create_image_handle;
+ context->base.delete_image_handle = tegra_delete_image_handle;
+ context->base.make_image_handle_resident = tegra_make_image_handle_resident;
+
+ return &context->base;
+
+destroy:
+ context->gpu->destroy(context->gpu);
+free:
+ free(context);
+ return NULL;
+}
diff --git a/src/gallium/drivers/tegra/tegra_context.h b/src/gallium/drivers/tegra/tegra_context.h
new file mode 100644
index 00000000000..4869b0913a6
--- /dev/null
+++ b/src/gallium/drivers/tegra/tegra_context.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright © 2014-2018 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef TEGRA_CONTEXT_H
+#define TEGRA_CONTEXT_H
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+
+struct tegra_screen;
+
+struct tegra_context {
+ struct pipe_context base;
+ struct pipe_context *gpu;
+};
+
+static inline struct tegra_context *
+to_tegra_context(struct pipe_context *context)
+{
+ return (struct tegra_context *)context;
+}
+
+struct pipe_context *
+tegra_screen_context_create(struct pipe_screen *pscreen, void *priv,
+ unsigned int flags);
+
+struct tegra_sampler_view {
+ struct pipe_sampler_view base;
+ struct pipe_sampler_view *gpu;
+};
+
+static inline struct tegra_sampler_view *
+to_tegra_sampler_view(struct pipe_sampler_view *view)
+{
+ return (struct tegra_sampler_view *)view;
+}
+
+static inline struct pipe_sampler_view *
+tegra_sampler_view_unwrap(struct pipe_sampler_view *view)
+{
+ if (!view)
+ return NULL;
+
+ return to_tegra_sampler_view(view)->gpu;
+}
+
+struct tegra_transfer {
+ struct pipe_transfer base;
+ struct pipe_transfer *gpu;
+
+ unsigned int count;
+ void *map;
+};
+
+static inline struct tegra_transfer *
+to_tegra_transfer(struct pipe_transfer *transfer)
+{
+ return (struct tegra_transfer *)transfer;
+}
+
+#endif /* TEGRA_SCREEN_H */
diff --git a/src/gallium/drivers/tegra/tegra_resource.h b/src/gallium/drivers/tegra/tegra_resource.h
new file mode 100644
index 00000000000..67507d64590
--- /dev/null
+++ b/src/gallium/drivers/tegra/tegra_resource.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright © 2014-2018 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef TEGRA_RESOURCE_H
+#define TEGRA_RESOURCE_H
+
+#include "pipe/p_state.h"
+
+struct winsys_handle;
+
+struct tegra_resource {
+ struct pipe_resource base;
+ struct pipe_resource *gpu;
+
+ uint64_t modifier;
+ uint32_t stride;
+ uint32_t handle;
+ size_t size;
+};
+
+static inline struct tegra_resource *
+to_tegra_resource(struct pipe_resource *resource)
+{
+ return (struct tegra_resource *)resource;
+}
+
+static inline struct pipe_resource *
+tegra_resource_unwrap(struct pipe_resource *resource)
+{
+ if (!resource)
+ return NULL;
+
+ return to_tegra_resource(resource)->gpu;
+}
+
+struct tegra_surface {
+ struct pipe_surface base;
+ struct pipe_surface *gpu;
+};
+
+static inline struct tegra_surface *
+to_tegra_surface(struct pipe_surface *surface)
+{
+ return (struct tegra_surface *)surface;
+}
+
+static inline struct pipe_surface *
+tegra_surface_unwrap(struct pipe_surface *surface)
+{
+ if (!surface)
+ return NULL;
+
+ return to_tegra_surface(surface)->gpu;
+}
+
+#endif /* TEGRA_RESOURCE_H */
diff --git a/src/gallium/drivers/tegra/tegra_screen.c b/src/gallium/drivers/tegra/tegra_screen.c
new file mode 100644
index 00000000000..669f22a1944
--- /dev/null
+++ b/src/gallium/drivers/tegra/tegra_screen.c
@@ -0,0 +1,688 @@
+/*
+ * Copyright © 2014-2018 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include <sys/stat.h>
+
+#include <drm_fourcc.h>
+#include <tegra_drm.h>
+#include <xf86drm.h>
+
+#include "pipe/p_state.h"
+#include "util/u_debug.h"
+#include "util/u_inlines.h"
+
+#include "state_tracker/drm_driver.h"
+
+#include "nouveau/drm/nouveau_drm_public.h"
+
+#include "tegra_context.h"
+#include "tegra_resource.h"
+#include "tegra_screen.h"
+
+static void tegra_screen_destroy(struct pipe_screen *pscreen)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ screen->gpu->destroy(screen->gpu);
+ free(pscreen);
+}
+
+static const char *
+tegra_screen_get_name(struct pipe_screen *pscreen)
+{
+ return "tegra";
+}
+
+static const char *
+tegra_screen_get_vendor(struct pipe_screen *pscreen)
+{
+ return "NVIDIA";
+}
+
+static const char *
+tegra_screen_get_device_vendor(struct pipe_screen *pscreen)
+{
+ return "NVIDIA";
+}
+
+static int
+tegra_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->get_param(screen->gpu, param);
+}
+
+static float
+tegra_screen_get_paramf(struct pipe_screen *pscreen, enum pipe_capf param)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->get_paramf(screen->gpu, param);
+}
+
+static int
+tegra_screen_get_shader_param(struct pipe_screen *pscreen, unsigned shader,
+ enum pipe_shader_cap param)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->get_shader_param(screen->gpu, shader, param);
+}
+
+static int
+tegra_screen_get_video_param(struct pipe_screen *pscreen,
+ enum pipe_video_profile profile,
+ enum pipe_video_entrypoint entrypoint,
+ enum pipe_video_cap param)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->get_video_param(screen->gpu, profile, entrypoint,
+ param);
+}
+
+static int
+tegra_screen_get_compute_param(struct pipe_screen *pscreen,
+ enum pipe_shader_ir ir_type,
+ enum pipe_compute_cap param,
+ void *retp)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->get_compute_param(screen->gpu, ir_type, param,
+ retp);
+}
+
+static uint64_t
+tegra_screen_get_timestamp(struct pipe_screen *pscreen)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->get_timestamp(screen->gpu);
+}
+
+static boolean
+tegra_screen_is_format_supported(struct pipe_screen *pscreen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned sample_count,
+ unsigned usage)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->is_format_supported(screen->gpu, format, target,
+ sample_count, usage);
+}
+
+static boolean
+tegra_screen_is_video_format_supported(struct pipe_screen *pscreen,
+ enum pipe_format format,
+ enum pipe_video_profile profile,
+ enum pipe_video_entrypoint entrypoint)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->is_video_format_supported(screen->gpu, format, profile,
+ entrypoint);
+}
+
+static boolean
+tegra_screen_can_create_resource(struct pipe_screen *pscreen,
+ const struct pipe_resource *template)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->can_create_resource(screen->gpu, template);
+}
+
+static int tegra_open_render_node(void)
+{
+ drmDevicePtr *devices, device;
+ int err, render = -ENOENT, fd;
+ unsigned int num, i;
+
+ err = drmGetDevices2(0, NULL, 0);
+ if (err < 0)
+ return err;
+
+ num = err;
+
+ devices = calloc(num, sizeof(*devices));
+ if (!devices)
+ return -ENOMEM;
+
+ err = drmGetDevices2(0, devices, num);
+ if (err < 0) {
+ render = err;
+ goto free;
+ }
+
+ for (i = 0; i < num; i++) {
+ device = devices[i];
+
+ if ((device->available_nodes & (1 << DRM_NODE_RENDER)) &&
+ (device->bustype == DRM_BUS_PLATFORM)) {
+ drmVersionPtr version;
+
+ fd = open(device->nodes[DRM_NODE_RENDER], O_RDWR | O_CLOEXEC);
+ if (fd < 0)
+ continue;
+
+ version = drmGetVersion(fd);
+ if (!version) {
+ close(fd);
+ continue;
+ }
+
+ if (strcmp(version->name, "nouveau") != 0) {
+ close(fd);
+ continue;
+ }
+
+ drmFreeVersion(version);
+ render = fd;
+ break;
+ }
+ }
+
+ drmFreeDevices(devices, num);
+
+free:
+ free(devices);
+ return render;
+}
+
+static int tegra_screen_import_resource(struct tegra_screen *screen,
+ struct tegra_resource *resource,
+ bool has_modifiers)
+{
+ unsigned usage = PIPE_HANDLE_USAGE_READ;
+ struct drm_tegra_gem_set_tiling args;
+ struct winsys_handle handle;
+ boolean status;
+ int fd, err;
+
+ memset(&handle, 0, sizeof(handle));
+ handle.modifier = DRM_FORMAT_MOD_INVALID;
+ handle.type = DRM_API_HANDLE_TYPE_FD;
+
+ status = screen->gpu->resource_get_handle(screen->gpu, NULL, resource->gpu,
+ &handle, usage);
+ if (!status)
+ return -EINVAL;
+
+ assert(handle.modifier != DRM_FORMAT_MOD_INVALID);
+
+ if (handle.modifier == DRM_FORMAT_MOD_INVALID) {
+ close(handle.handle);
+ return -EINVAL;
+ }
+
+ resource->modifier = handle.modifier;
+ resource->stride = handle.stride;
+ fd = handle.handle;
+
+ err = drmPrimeFDToHandle(screen->fd, fd, &resource->handle);
+ if (err < 0)
+ err = -errno;
+
+ close(fd);
+
+ if (!has_modifiers) {
+ memset(&args, 0, sizeof(args));
+ args.handle = resource->handle;
+
+ switch (handle.modifier) {
+ case DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED:
+ args.mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB:
+ args.mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
+ args.value = 0;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB:
+ args.mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
+ args.value = 1;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB:
+ args.mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
+ args.value = 2;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB:
+ args.mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
+ args.value = 3;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB:
+ args.mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
+ args.value = 4;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB:
+ args.mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
+ args.value = 5;
+ break;
+
+ default:
+ debug_printf("unsupported modifier %" PRIx64 ", assuming linear\n",
+ handle.modifier);
+ /* fall-through */
+
+ case DRM_FORMAT_MOD_LINEAR:
+ args.mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
+ break;
+ }
+
+ err = drmIoctl(screen->fd, DRM_IOCTL_TEGRA_GEM_SET_TILING, &args);
+ if (err < 0) {
+ fprintf(stderr, "failed to set tiling parameters: %s\n",
+ strerror(errno));
+ err = -errno;
+ goto out;
+ }
+ }
+
+ return 0;
+
+out:
+ return err;
+}
+
+static struct pipe_resource *
+tegra_screen_resource_create(struct pipe_screen *pscreen,
+ const struct pipe_resource *template)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+ struct tegra_resource *resource;
+ int err;
+
+ resource = calloc(1, sizeof(*resource));
+ if (!resource)
+ return NULL;
+
+ resource->gpu = screen->gpu->resource_create(screen->gpu, template);
+ if (!resource->gpu)
+ goto free;
+
+ /* import scanout buffers for display */
+ if (template->bind & PIPE_BIND_SCANOUT) {
+ err = tegra_screen_import_resource(screen, resource, false);
+ if (err < 0)
+ goto destroy;
+ }
+
+ memcpy(&resource->base, resource->gpu, sizeof(*resource->gpu));
+ pipe_reference_init(&resource->base.reference, 1);
+ resource->base.screen = &screen->base;
+
+ return &resource->base;
+
+destroy:
+ screen->gpu->resource_destroy(screen->gpu, resource->gpu);
+free:
+ free(resource);
+ return NULL;
+}
+
+/* XXX */
+static struct pipe_resource *
+tegra_screen_resource_create_front(struct pipe_screen *pscreen,
+ const struct pipe_resource *template,
+ const void *map_front_private)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+ struct pipe_resource *resource;
+
+ resource = screen->gpu->resource_create_front(screen->gpu, template,
+ map_front_private);
+ if (resource)
+ resource->screen = pscreen;
+
+ return resource;
+}
+
+static struct pipe_resource *
+tegra_screen_resource_from_handle(struct pipe_screen *pscreen,
+ const struct pipe_resource *template,
+ struct winsys_handle *handle,
+ unsigned usage)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+ struct tegra_resource *resource;
+
+ resource = calloc(1, sizeof(*resource));
+ if (!resource)
+ return NULL;
+
+ resource->gpu = screen->gpu->resource_from_handle(screen->gpu, template,
+ handle, usage);
+ if (!resource->gpu) {
+ free(resource);
+ return NULL;
+ }
+
+ memcpy(&resource->base, resource->gpu, sizeof(*resource->gpu));
+ pipe_reference_init(&resource->base.reference, 1);
+ resource->base.screen = &screen->base;
+
+ return &resource->base;
+}
+
+/* XXX */
+static struct pipe_resource *
+tegra_screen_resource_from_user_memory(struct pipe_screen *pscreen,
+ const struct pipe_resource *template,
+ void *buffer)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+ struct pipe_resource *resource;
+
+ resource = screen->gpu->resource_from_user_memory(screen->gpu, template,
+ buffer);
+ if (resource)
+ resource->screen = pscreen;
+
+ return resource;
+}
+
+static boolean
+tegra_screen_resource_get_handle(struct pipe_screen *pscreen,
+ struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ struct winsys_handle *handle,
+ unsigned usage)
+{
+ struct tegra_resource *resource = to_tegra_resource(presource);
+ struct tegra_context *context = to_tegra_context(pcontext);
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+ boolean ret = TRUE;
+
+ /*
+ * Assume that KMS handles for scanout resources will only ever be used
+ * to pass buffers into Tegra DRM for display. In all other cases, return
+ * the Nouveau handle, assuming they will be used for sharing in DRI2/3.
+ */
+ if (handle->type == DRM_API_HANDLE_TYPE_KMS &&
+ presource->bind & PIPE_BIND_SCANOUT) {
+ handle->modifier = resource->modifier;
+ handle->handle = resource->handle;
+ handle->stride = resource->stride;
+ } else {
+ ret = screen->gpu->resource_get_handle(screen->gpu,
+ context ? context->gpu : NULL,
+ resource->gpu, handle, usage);
+ }
+
+ return ret;
+}
+
+static void
+tegra_screen_resource_destroy(struct pipe_screen *pscreen,
+ struct pipe_resource *presource)
+{
+ struct tegra_resource *resource = to_tegra_resource(presource);
+
+ pipe_resource_reference(&resource->gpu, NULL);
+ free(resource);
+}
+
+static void
+tegra_screen_flush_frontbuffer(struct pipe_screen *pscreen,
+ struct pipe_resource *resource,
+ unsigned int level,
+ unsigned int layer,
+ void *winsys_drawable_handle,
+ struct pipe_box *box)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ screen->gpu->flush_frontbuffer(screen->gpu, resource, level, layer,
+ winsys_drawable_handle, box);
+}
+
+static void
+tegra_screen_fence_reference(struct pipe_screen *pscreen,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ screen->gpu->fence_reference(screen->gpu, ptr, fence);
+}
+
+static boolean
+tegra_screen_fence_finish(struct pipe_screen *pscreen,
+ struct pipe_context *pcontext,
+ struct pipe_fence_handle *fence,
+ uint64_t timeout)
+{
+ struct tegra_context *context = to_tegra_context(pcontext);
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->fence_finish(screen->gpu,
+ context ? context->gpu : NULL,
+ fence, timeout);
+}
+
+static int
+tegra_screen_fence_get_fd(struct pipe_screen *pscreen,
+ struct pipe_fence_handle *fence)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->fence_get_fd(screen->gpu, fence);
+}
+
+static int
+tegra_screen_get_driver_query_info(struct pipe_screen *pscreen,
+ unsigned int index,
+ struct pipe_driver_query_info *info)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->get_driver_query_info(screen->gpu, index, info);
+}
+
+static int
+tegra_screen_get_driver_query_group_info(struct pipe_screen *pscreen,
+ unsigned int index,
+ struct pipe_driver_query_group_info *info)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->get_driver_query_group_info(screen->gpu, index, info);
+}
+
+static void
+tegra_screen_query_memory_info(struct pipe_screen *pscreen,
+ struct pipe_memory_info *info)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ screen->gpu->query_memory_info(screen->gpu, info);
+}
+
+static const void *
+tegra_screen_get_compiler_options(struct pipe_screen *pscreen,
+ enum pipe_shader_ir ir,
+ unsigned int shader)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+ const void *options = NULL;
+
+ if (screen->gpu->get_compiler_options)
+ options = screen->gpu->get_compiler_options(screen->gpu, ir, shader);
+
+ return options;
+}
+
+static struct disk_cache *
+tegra_screen_get_disk_shader_cache(struct pipe_screen *pscreen)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->get_disk_shader_cache(screen->gpu);
+}
+
+static struct pipe_resource *
+tegra_screen_resource_create_with_modifiers(struct pipe_screen *pscreen,
+ const struct pipe_resource *template,
+ const uint64_t *modifiers,
+ int count)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+ struct tegra_resource *resource;
+ int err;
+
+ resource = calloc(1, sizeof(*resource));
+ if (!resource)
+ return NULL;
+
+ resource->gpu = screen->gpu->resource_create_with_modifiers(screen->gpu,
+ template,
+ modifiers,
+ count);
+ if (!resource->gpu)
+ goto free;
+
+ err = tegra_screen_import_resource(screen, resource, true);
+ if (err < 0)
+ goto destroy;
+
+ memcpy(&resource->base, resource->gpu, sizeof(*resource->gpu));
+ pipe_reference_init(&resource->base.reference, 1);
+ resource->base.screen = &screen->base;
+
+ return &resource->base;
+
+destroy:
+ screen->gpu->resource_destroy(screen->gpu, resource->gpu);
+free:
+ free(resource);
+ return NULL;
+}
+
+static void tegra_screen_query_dmabuf_modifiers(struct pipe_screen *pscreen,
+ enum pipe_format format,
+ int max, uint64_t *modifiers,
+ unsigned int *external_only,
+ int *count)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ screen->gpu->query_dmabuf_modifiers(screen->gpu, format, max, modifiers,
+ external_only, count);
+}
+
+static struct pipe_memory_object *
+tegra_screen_memobj_create_from_handle(struct pipe_screen *pscreen,
+ struct winsys_handle *handle,
+ bool dedicated)
+{
+ struct tegra_screen *screen = to_tegra_screen(pscreen);
+
+ return screen->gpu->memobj_create_from_handle(screen->gpu, handle,
+ dedicated);
+}
+
+struct pipe_screen *
+tegra_screen_create(int fd)
+{
+ struct tegra_screen *screen;
+
+ screen = calloc(1, sizeof(*screen));
+ if (!screen)
+ return NULL;
+
+ screen->fd = fd;
+
+ screen->gpu_fd = tegra_open_render_node();
+ if (screen->gpu_fd < 0) {
+ if (errno != ENOENT)
+ fprintf(stderr, "failed to open GPU device: %s\n", strerror(errno));
+
+ free(screen);
+ return NULL;
+ }
+
+ screen->gpu = nouveau_drm_screen_create(screen->gpu_fd);
+ if (!screen->gpu) {
+ fprintf(stderr, "failed to create GPU screen\n");
+ close(screen->gpu_fd);
+ free(screen);
+ return NULL;
+ }
+
+ screen->base.destroy = tegra_screen_destroy;
+ screen->base.get_name = tegra_screen_get_name;
+ screen->base.get_vendor = tegra_screen_get_vendor;
+ screen->base.get_device_vendor = tegra_screen_get_device_vendor;
+ screen->base.get_param = tegra_screen_get_param;
+ screen->base.get_paramf = tegra_screen_get_paramf;
+ screen->base.get_shader_param = tegra_screen_get_shader_param;
+ screen->base.get_video_param = tegra_screen_get_video_param;
+ screen->base.get_compute_param = tegra_screen_get_compute_param;
+ screen->base.get_timestamp = tegra_screen_get_timestamp;
+ screen->base.context_create = tegra_screen_context_create;
+ screen->base.is_format_supported = tegra_screen_is_format_supported;
+ screen->base.is_video_format_supported = tegra_screen_is_video_format_supported;
+
+ /* allow fallback implementation if GPU driver doesn't implement it */
+ if (screen->gpu->can_create_resource)
+ screen->base.can_create_resource = tegra_screen_can_create_resource;
+
+ screen->base.resource_create = tegra_screen_resource_create;
+ screen->base.resource_create_front = tegra_screen_resource_create_front;
+ screen->base.resource_from_handle = tegra_screen_resource_from_handle;
+ screen->base.resource_from_user_memory = tegra_screen_resource_from_user_memory;
+ screen->base.resource_get_handle = tegra_screen_resource_get_handle;
+ screen->base.resource_destroy = tegra_screen_resource_destroy;
+
+ screen->base.flush_frontbuffer = tegra_screen_flush_frontbuffer;
+ screen->base.fence_reference = tegra_screen_fence_reference;
+ screen->base.fence_finish = tegra_screen_fence_finish;
+ screen->base.fence_get_fd = tegra_screen_fence_get_fd;
+
+ screen->base.get_driver_query_info = tegra_screen_get_driver_query_info;
+ screen->base.get_driver_query_group_info = tegra_screen_get_driver_query_group_info;
+ screen->base.query_memory_info = tegra_screen_query_memory_info;
+
+ screen->base.get_compiler_options = tegra_screen_get_compiler_options;
+ screen->base.get_disk_shader_cache = tegra_screen_get_disk_shader_cache;
+
+ screen->base.resource_create_with_modifiers = tegra_screen_resource_create_with_modifiers;
+ screen->base.query_dmabuf_modifiers = tegra_screen_query_dmabuf_modifiers;
+ screen->base.memobj_create_from_handle = tegra_screen_memobj_create_from_handle;
+
+ return &screen->base;
+}
diff --git a/src/gallium/drivers/tegra/tegra_screen.h b/src/gallium/drivers/tegra/tegra_screen.h
new file mode 100644
index 00000000000..558d22f2f99
--- /dev/null
+++ b/src/gallium/drivers/tegra/tegra_screen.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright © 2014-2018 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef TEGRA_SCREEN_H
+#define TEGRA_SCREEN_H
+
+#include "pipe/p_screen.h"
+
+struct tegra_screen {
+ struct pipe_screen base;
+ int fd;
+
+ struct pipe_screen *gpu;
+ int gpu_fd;
+};
+
+static inline struct tegra_screen *
+to_tegra_screen(struct pipe_screen *pscreen)
+{
+ return (struct tegra_screen *)pscreen;
+}
+
+struct pipe_screen *tegra_screen_create(int fd);
+
+#endif /* TEGRA_SCREEN_H */