summaryrefslogtreecommitdiffstats
path: root/src/gallium/auxiliary/driver_ddebug
diff options
context:
space:
mode:
authorMarek Olšák <[email protected]>2018-04-07 14:01:12 -0400
committerMarek Olšák <[email protected]>2018-04-13 14:08:14 -0400
commit6ff0c6f4ebcb87ea6c6fe5a4ba90b548f666067d (patch)
tree30926986da28bb0b67b857d8f2cf7eeaa77f8773 /src/gallium/auxiliary/driver_ddebug
parent918b798668c5465d85ca542423e4cf525dc79b31 (diff)
gallium: move ddebug, noop, rbug, trace to auxiliary to improve build times
which also simplifies the build scripts.
Diffstat (limited to 'src/gallium/auxiliary/driver_ddebug')
-rw-r--r--src/gallium/auxiliary/driver_ddebug/dd_context.c877
-rw-r--r--src/gallium/auxiliary/driver_ddebug/dd_draw.c1645
-rw-r--r--src/gallium/auxiliary/driver_ddebug/dd_pipe.h371
-rw-r--r--src/gallium/auxiliary/driver_ddebug/dd_public.h36
-rw-r--r--src/gallium/auxiliary/driver_ddebug/dd_screen.c593
-rw-r--r--src/gallium/auxiliary/driver_ddebug/dd_util.h106
6 files changed, 3628 insertions, 0 deletions
diff --git a/src/gallium/auxiliary/driver_ddebug/dd_context.c b/src/gallium/auxiliary/driver_ddebug/dd_context.c
new file mode 100644
index 00000000000..dd7b3e086cd
--- /dev/null
+++ b/src/gallium/auxiliary/driver_ddebug/dd_context.c
@@ -0,0 +1,877 @@
+/**************************************************************************
+ *
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "dd_pipe.h"
+#include "tgsi/tgsi_parse.h"
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+
+
+static void
+safe_memcpy(void *dst, const void *src, size_t size)
+{
+ if (src)
+ memcpy(dst, src, size);
+ else
+ memset(dst, 0, size);
+}
+
+
+/********************************************************************
+ * queries
+ */
+
+static struct pipe_query *
+dd_context_create_query(struct pipe_context *_pipe, unsigned query_type,
+ unsigned index)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+ struct pipe_query *query;
+
+ query = pipe->create_query(pipe, query_type, index);
+
+ /* Wrap query object. */
+ if (query) {
+ struct dd_query *dd_query = CALLOC_STRUCT(dd_query);
+ if (dd_query) {
+ dd_query->type = query_type;
+ dd_query->query = query;
+ query = (struct pipe_query *)dd_query;
+ } else {
+ pipe->destroy_query(pipe, query);
+ query = NULL;
+ }
+ }
+
+ return query;
+}
+
+static struct pipe_query *
+dd_context_create_batch_query(struct pipe_context *_pipe, unsigned num_queries,
+ unsigned *query_types)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+ struct pipe_query *query;
+
+ query = pipe->create_batch_query(pipe, num_queries, query_types);
+
+ /* Wrap query object. */
+ if (query) {
+ struct dd_query *dd_query = CALLOC_STRUCT(dd_query);
+ if (dd_query) {
+ /* no special handling for batch queries yet */
+ dd_query->type = query_types[0];
+ dd_query->query = query;
+ query = (struct pipe_query *)dd_query;
+ } else {
+ pipe->destroy_query(pipe, query);
+ query = NULL;
+ }
+ }
+
+ return query;
+}
+
+static void
+dd_context_destroy_query(struct pipe_context *_pipe,
+ struct pipe_query *query)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ pipe->destroy_query(pipe, dd_query_unwrap(query));
+ FREE(query);
+}
+
+static boolean
+dd_context_begin_query(struct pipe_context *_pipe, struct pipe_query *query)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+
+ return pipe->begin_query(pipe, dd_query_unwrap(query));
+}
+
+static bool
+dd_context_end_query(struct pipe_context *_pipe, struct pipe_query *query)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+
+ return pipe->end_query(pipe, dd_query_unwrap(query));
+}
+
+static boolean
+dd_context_get_query_result(struct pipe_context *_pipe,
+ struct pipe_query *query, boolean wait,
+ union pipe_query_result *result)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ return pipe->get_query_result(pipe, dd_query_unwrap(query), wait, result);
+}
+
+static void
+dd_context_set_active_query_state(struct pipe_context *_pipe, boolean enable)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ pipe->set_active_query_state(pipe, enable);
+}
+
+static void
+dd_context_render_condition(struct pipe_context *_pipe,
+ struct pipe_query *query, boolean condition,
+ enum pipe_render_cond_flag mode)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_state *dstate = &dctx->draw_state;
+
+ pipe->render_condition(pipe, dd_query_unwrap(query), condition, mode);
+ dstate->render_cond.query = dd_query(query);
+ dstate->render_cond.condition = condition;
+ dstate->render_cond.mode = mode;
+}
+
+
+/********************************************************************
+ * constant (immutable) non-shader states
+ */
+
+#define DD_CSO_CREATE(name, shortname) \
+ static void * \
+ dd_context_create_##name##_state(struct pipe_context *_pipe, \
+ const struct pipe_##name##_state *state) \
+ { \
+ struct pipe_context *pipe = dd_context(_pipe)->pipe; \
+ struct dd_state *hstate = CALLOC_STRUCT(dd_state); \
+ \
+ if (!hstate) \
+ return NULL; \
+ hstate->cso = pipe->create_##name##_state(pipe, state); \
+ hstate->state.shortname = *state; \
+ return hstate; \
+ }
+
+#define DD_CSO_BIND(name, shortname) \
+ static void \
+ dd_context_bind_##name##_state(struct pipe_context *_pipe, void *state) \
+ { \
+ struct dd_context *dctx = dd_context(_pipe); \
+ struct pipe_context *pipe = dctx->pipe; \
+ struct dd_state *hstate = state; \
+ \
+ dctx->draw_state.shortname = hstate; \
+ pipe->bind_##name##_state(pipe, hstate ? hstate->cso : NULL); \
+ }
+
+#define DD_CSO_DELETE(name) \
+ static void \
+ dd_context_delete_##name##_state(struct pipe_context *_pipe, void *state) \
+ { \
+ struct dd_context *dctx = dd_context(_pipe); \
+ struct pipe_context *pipe = dctx->pipe; \
+ struct dd_state *hstate = state; \
+ \
+ pipe->delete_##name##_state(pipe, hstate->cso); \
+ FREE(hstate); \
+ }
+
+#define DD_CSO_WHOLE(name, shortname) \
+ DD_CSO_CREATE(name, shortname) \
+ DD_CSO_BIND(name, shortname) \
+ DD_CSO_DELETE(name)
+
+DD_CSO_WHOLE(blend, blend)
+DD_CSO_WHOLE(rasterizer, rs)
+DD_CSO_WHOLE(depth_stencil_alpha, dsa)
+
+DD_CSO_CREATE(sampler, sampler)
+DD_CSO_DELETE(sampler)
+
+static void
+dd_context_bind_sampler_states(struct pipe_context *_pipe,
+ enum pipe_shader_type shader,
+ unsigned start, unsigned count, void **states)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+
+ memcpy(&dctx->draw_state.sampler_states[shader][start], states,
+ sizeof(void*) * count);
+
+ if (states) {
+ void *samp[PIPE_MAX_SAMPLERS];
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct dd_state *s = states[i];
+ samp[i] = s ? s->cso : NULL;
+ }
+
+ pipe->bind_sampler_states(pipe, shader, start, count, samp);
+ }
+ else
+ pipe->bind_sampler_states(pipe, shader, start, count, NULL);
+}
+
+static void *
+dd_context_create_vertex_elements_state(struct pipe_context *_pipe,
+ unsigned num_elems,
+ const struct pipe_vertex_element *elems)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+ struct dd_state *hstate = CALLOC_STRUCT(dd_state);
+
+ if (!hstate)
+ return NULL;
+ hstate->cso = pipe->create_vertex_elements_state(pipe, num_elems, elems);
+ memcpy(hstate->state.velems.velems, elems, sizeof(elems[0]) * num_elems);
+ hstate->state.velems.count = num_elems;
+ return hstate;
+}
+
+DD_CSO_BIND(vertex_elements, velems)
+DD_CSO_DELETE(vertex_elements)
+
+
+/********************************************************************
+ * shaders
+ */
+
+#define DD_SHADER_NOCREATE(NAME, name) \
+ static void \
+ dd_context_bind_##name##_state(struct pipe_context *_pipe, void *state) \
+ { \
+ struct dd_context *dctx = dd_context(_pipe); \
+ struct pipe_context *pipe = dctx->pipe; \
+ struct dd_state *hstate = state; \
+ \
+ dctx->draw_state.shaders[PIPE_SHADER_##NAME] = hstate; \
+ pipe->bind_##name##_state(pipe, hstate ? hstate->cso : NULL); \
+ } \
+ \
+ static void \
+ dd_context_delete_##name##_state(struct pipe_context *_pipe, void *state) \
+ { \
+ struct dd_context *dctx = dd_context(_pipe); \
+ struct pipe_context *pipe = dctx->pipe; \
+ struct dd_state *hstate = state; \
+ \
+ pipe->delete_##name##_state(pipe, hstate->cso); \
+ if (hstate->state.shader.type == PIPE_SHADER_IR_TGSI) \
+ tgsi_free_tokens(hstate->state.shader.tokens); \
+ FREE(hstate); \
+ }
+
+#define DD_SHADER(NAME, name) \
+ static void * \
+ dd_context_create_##name##_state(struct pipe_context *_pipe, \
+ const struct pipe_shader_state *state) \
+ { \
+ struct pipe_context *pipe = dd_context(_pipe)->pipe; \
+ struct dd_state *hstate = CALLOC_STRUCT(dd_state); \
+ \
+ if (!hstate) \
+ return NULL; \
+ hstate->cso = pipe->create_##name##_state(pipe, state); \
+ hstate->state.shader = *state; \
+ if (hstate->state.shader.type == PIPE_SHADER_IR_TGSI) \
+ hstate->state.shader.tokens = tgsi_dup_tokens(state->tokens); \
+ return hstate; \
+ } \
+ \
+ DD_SHADER_NOCREATE(NAME, name)
+
+DD_SHADER(FRAGMENT, fs)
+DD_SHADER(VERTEX, vs)
+DD_SHADER(GEOMETRY, gs)
+DD_SHADER(TESS_CTRL, tcs)
+DD_SHADER(TESS_EVAL, tes)
+
+static void * \
+dd_context_create_compute_state(struct pipe_context *_pipe,
+ const struct pipe_compute_state *state)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+ struct dd_state *hstate = CALLOC_STRUCT(dd_state);
+
+ if (!hstate)
+ return NULL;
+ hstate->cso = pipe->create_compute_state(pipe, state);
+
+ hstate->state.shader.type = state->ir_type;
+
+ if (state->ir_type == PIPE_SHADER_IR_TGSI)
+ hstate->state.shader.tokens = tgsi_dup_tokens(state->prog);
+
+ return hstate;
+}
+
+DD_SHADER_NOCREATE(COMPUTE, compute)
+
+/********************************************************************
+ * immediate states
+ */
+
+#define DD_IMM_STATE(name, type, deref, ref) \
+ static void \
+ dd_context_set_##name(struct pipe_context *_pipe, type deref) \
+ { \
+ struct dd_context *dctx = dd_context(_pipe); \
+ struct pipe_context *pipe = dctx->pipe; \
+ \
+ dctx->draw_state.name = deref; \
+ pipe->set_##name(pipe, ref); \
+ }
+
+DD_IMM_STATE(blend_color, const struct pipe_blend_color, *state, state)
+DD_IMM_STATE(stencil_ref, const struct pipe_stencil_ref, *state, state)
+DD_IMM_STATE(clip_state, const struct pipe_clip_state, *state, state)
+DD_IMM_STATE(sample_mask, unsigned, sample_mask, sample_mask)
+DD_IMM_STATE(min_samples, unsigned, min_samples, min_samples)
+DD_IMM_STATE(framebuffer_state, const struct pipe_framebuffer_state, *state, state)
+DD_IMM_STATE(polygon_stipple, const struct pipe_poly_stipple, *state, state)
+
+static void
+dd_context_set_constant_buffer(struct pipe_context *_pipe,
+ enum pipe_shader_type shader, uint index,
+ const struct pipe_constant_buffer *constant_buffer)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+
+ safe_memcpy(&dctx->draw_state.constant_buffers[shader][index],
+ constant_buffer, sizeof(*constant_buffer));
+ pipe->set_constant_buffer(pipe, shader, index, constant_buffer);
+}
+
+static void
+dd_context_set_scissor_states(struct pipe_context *_pipe,
+ unsigned start_slot, unsigned num_scissors,
+ const struct pipe_scissor_state *states)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+
+ safe_memcpy(&dctx->draw_state.scissors[start_slot], states,
+ sizeof(*states) * num_scissors);
+ pipe->set_scissor_states(pipe, start_slot, num_scissors, states);
+}
+
+static void
+dd_context_set_viewport_states(struct pipe_context *_pipe,
+ unsigned start_slot, unsigned num_viewports,
+ const struct pipe_viewport_state *states)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+
+ safe_memcpy(&dctx->draw_state.viewports[start_slot], states,
+ sizeof(*states) * num_viewports);
+ pipe->set_viewport_states(pipe, start_slot, num_viewports, states);
+}
+
+static void dd_context_set_tess_state(struct pipe_context *_pipe,
+ const float default_outer_level[4],
+ const float default_inner_level[2])
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+
+ memcpy(dctx->draw_state.tess_default_levels, default_outer_level,
+ sizeof(float) * 4);
+ memcpy(dctx->draw_state.tess_default_levels+4, default_inner_level,
+ sizeof(float) * 2);
+ pipe->set_tess_state(pipe, default_outer_level, default_inner_level);
+}
+
+
+/********************************************************************
+ * views
+ */
+
+static struct pipe_surface *
+dd_context_create_surface(struct pipe_context *_pipe,
+ struct pipe_resource *resource,
+ const struct pipe_surface *surf_tmpl)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+ struct pipe_surface *view =
+ pipe->create_surface(pipe, resource, surf_tmpl);
+
+ if (!view)
+ return NULL;
+ view->context = _pipe;
+ return view;
+}
+
+static void
+dd_context_surface_destroy(struct pipe_context *_pipe,
+ struct pipe_surface *surf)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ pipe->surface_destroy(pipe, surf);
+}
+
+static struct pipe_sampler_view *
+dd_context_create_sampler_view(struct pipe_context *_pipe,
+ struct pipe_resource *resource,
+ const struct pipe_sampler_view *templ)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+ struct pipe_sampler_view *view =
+ pipe->create_sampler_view(pipe, resource, templ);
+
+ if (!view)
+ return NULL;
+ view->context = _pipe;
+ return view;
+}
+
+static void
+dd_context_sampler_view_destroy(struct pipe_context *_pipe,
+ struct pipe_sampler_view *view)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ pipe->sampler_view_destroy(pipe, view);
+}
+
+static struct pipe_stream_output_target *
+dd_context_create_stream_output_target(struct pipe_context *_pipe,
+ struct pipe_resource *res,
+ unsigned buffer_offset,
+ unsigned buffer_size)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+ struct pipe_stream_output_target *view =
+ pipe->create_stream_output_target(pipe, res, buffer_offset,
+ buffer_size);
+
+ if (!view)
+ return NULL;
+ view->context = _pipe;
+ return view;
+}
+
+static void
+dd_context_stream_output_target_destroy(struct pipe_context *_pipe,
+ struct pipe_stream_output_target *target)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ pipe->stream_output_target_destroy(pipe, target);
+}
+
+
+/********************************************************************
+ * set states
+ */
+
+static void
+dd_context_set_sampler_views(struct pipe_context *_pipe,
+ enum pipe_shader_type shader,
+ unsigned start, unsigned num,
+ struct pipe_sampler_view **views)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+
+ safe_memcpy(&dctx->draw_state.sampler_views[shader][start], views,
+ sizeof(views[0]) * num);
+ pipe->set_sampler_views(pipe, shader, start, num, views);
+}
+
+static void
+dd_context_set_shader_images(struct pipe_context *_pipe,
+ enum pipe_shader_type shader,
+ unsigned start, unsigned num,
+ const struct pipe_image_view *views)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+
+ safe_memcpy(&dctx->draw_state.shader_images[shader][start], views,
+ sizeof(views[0]) * num);
+ pipe->set_shader_images(pipe, shader, start, num, views);
+}
+
+static void
+dd_context_set_shader_buffers(struct pipe_context *_pipe, unsigned shader,
+ unsigned start, unsigned num_buffers,
+ const struct pipe_shader_buffer *buffers)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+
+ safe_memcpy(&dctx->draw_state.shader_buffers[shader][start], buffers,
+ sizeof(buffers[0]) * num_buffers);
+ pipe->set_shader_buffers(pipe, shader, start, num_buffers, buffers);
+}
+
+static void
+dd_context_set_vertex_buffers(struct pipe_context *_pipe,
+ unsigned start, unsigned num_buffers,
+ const struct pipe_vertex_buffer *buffers)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+
+ safe_memcpy(&dctx->draw_state.vertex_buffers[start], buffers,
+ sizeof(buffers[0]) * num_buffers);
+ pipe->set_vertex_buffers(pipe, start, num_buffers, buffers);
+}
+
+static void
+dd_context_set_stream_output_targets(struct pipe_context *_pipe,
+ unsigned num_targets,
+ struct pipe_stream_output_target **tgs,
+ const unsigned *offsets)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_state *dstate = &dctx->draw_state;
+
+ dstate->num_so_targets = num_targets;
+ safe_memcpy(dstate->so_targets, tgs, sizeof(*tgs) * num_targets);
+ safe_memcpy(dstate->so_offsets, offsets, sizeof(*offsets) * num_targets);
+ pipe->set_stream_output_targets(pipe, num_targets, tgs, offsets);
+}
+
+void
+dd_thread_join(struct dd_context *dctx)
+{
+ mtx_lock(&dctx->mutex);
+ dctx->kill_thread = true;
+ cnd_signal(&dctx->cond);
+ mtx_unlock(&dctx->mutex);
+ thrd_join(dctx->thread, NULL);
+}
+
+static void
+dd_context_destroy(struct pipe_context *_pipe)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+
+ dd_thread_join(dctx);
+ mtx_destroy(&dctx->mutex);
+ cnd_destroy(&dctx->cond);
+
+ assert(list_empty(&dctx->records));
+ assert(!dctx->record_pending);
+
+ if (pipe->set_log_context) {
+ pipe->set_log_context(pipe, NULL);
+
+ if (dd_screen(dctx->base.screen)->dump_mode == DD_DUMP_ALL_CALLS) {
+ FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen), 0);
+ if (f) {
+ fprintf(f, "Remainder of driver log:\n\n");
+ }
+
+ u_log_new_page_print(&dctx->log, f);
+ fclose(f);
+ }
+ }
+ u_log_context_destroy(&dctx->log);
+
+ pipe->destroy(pipe);
+ FREE(dctx);
+}
+
+
+/********************************************************************
+ * miscellaneous
+ */
+
+static void
+dd_context_texture_barrier(struct pipe_context *_pipe, unsigned flags)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ pipe->texture_barrier(pipe, flags);
+}
+
+static void
+dd_context_memory_barrier(struct pipe_context *_pipe, unsigned flags)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ pipe->memory_barrier(pipe, flags);
+}
+
+static bool
+dd_context_resource_commit(struct pipe_context *_pipe,
+ struct pipe_resource *resource,
+ unsigned level, struct pipe_box *box, bool commit)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ return pipe->resource_commit(pipe, resource, level, box, commit);
+}
+
+static void
+dd_context_get_sample_position(struct pipe_context *_pipe,
+ unsigned sample_count, unsigned sample_index,
+ float *out_value)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ return pipe->get_sample_position(pipe, sample_count, sample_index,
+ out_value);
+}
+
+static void
+dd_context_invalidate_resource(struct pipe_context *_pipe,
+ struct pipe_resource *resource)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ pipe->invalidate_resource(pipe, resource);
+}
+
+static enum pipe_reset_status
+dd_context_get_device_reset_status(struct pipe_context *_pipe)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ return pipe->get_device_reset_status(pipe);
+}
+
+static void
+dd_context_set_device_reset_callback(struct pipe_context *_pipe,
+ const struct pipe_device_reset_callback *cb)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ return pipe->set_device_reset_callback(pipe, cb);
+}
+
+static void
+dd_context_emit_string_marker(struct pipe_context *_pipe,
+ const char *string, int len)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+
+ pipe->emit_string_marker(pipe, string, len);
+ dd_parse_apitrace_marker(string, len, &dctx->draw_state.apitrace_call_number);
+}
+
+static void
+dd_context_dump_debug_state(struct pipe_context *_pipe, FILE *stream,
+ unsigned flags)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ return pipe->dump_debug_state(pipe, stream, flags);
+}
+
+static uint64_t
+dd_context_create_texture_handle(struct pipe_context *_pipe,
+ struct pipe_sampler_view *view,
+ const struct pipe_sampler_state *state)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ return pipe->create_texture_handle(pipe, view, state);
+}
+
+static void
+dd_context_delete_texture_handle(struct pipe_context *_pipe, uint64_t handle)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ pipe->delete_texture_handle(pipe, handle);
+}
+
+static void
+dd_context_make_texture_handle_resident(struct pipe_context *_pipe,
+ uint64_t handle, bool resident)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ pipe->make_texture_handle_resident(pipe, handle, resident);
+}
+
+static uint64_t
+dd_context_create_image_handle(struct pipe_context *_pipe,
+ const struct pipe_image_view *image)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ return pipe->create_image_handle(pipe, image);
+}
+
+static void
+dd_context_delete_image_handle(struct pipe_context *_pipe, uint64_t handle)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ pipe->delete_image_handle(pipe, handle);
+}
+
+static void
+dd_context_make_image_handle_resident(struct pipe_context *_pipe,
+ uint64_t handle, unsigned access,
+ bool resident)
+{
+ struct pipe_context *pipe = dd_context(_pipe)->pipe;
+
+ pipe->make_image_handle_resident(pipe, handle, access, resident);
+}
+
+struct pipe_context *
+dd_context_create(struct dd_screen *dscreen, struct pipe_context *pipe)
+{
+ struct dd_context *dctx;
+
+ if (!pipe)
+ return NULL;
+
+ dctx = CALLOC_STRUCT(dd_context);
+ if (!dctx)
+ goto fail;
+
+ dctx->pipe = pipe;
+ dctx->base.priv = pipe->priv; /* expose wrapped priv data */
+ dctx->base.screen = &dscreen->base;
+ dctx->base.stream_uploader = pipe->stream_uploader;
+ dctx->base.const_uploader = pipe->const_uploader;
+
+ dctx->base.destroy = dd_context_destroy;
+
+ CTX_INIT(render_condition);
+ CTX_INIT(create_query);
+ CTX_INIT(create_batch_query);
+ CTX_INIT(destroy_query);
+ CTX_INIT(begin_query);
+ CTX_INIT(end_query);
+ CTX_INIT(get_query_result);
+ CTX_INIT(set_active_query_state);
+ CTX_INIT(create_blend_state);
+ CTX_INIT(bind_blend_state);
+ CTX_INIT(delete_blend_state);
+ CTX_INIT(create_sampler_state);
+ CTX_INIT(bind_sampler_states);
+ CTX_INIT(delete_sampler_state);
+ CTX_INIT(create_rasterizer_state);
+ CTX_INIT(bind_rasterizer_state);
+ CTX_INIT(delete_rasterizer_state);
+ CTX_INIT(create_depth_stencil_alpha_state);
+ CTX_INIT(bind_depth_stencil_alpha_state);
+ CTX_INIT(delete_depth_stencil_alpha_state);
+ CTX_INIT(create_fs_state);
+ CTX_INIT(bind_fs_state);
+ CTX_INIT(delete_fs_state);
+ CTX_INIT(create_vs_state);
+ CTX_INIT(bind_vs_state);
+ CTX_INIT(delete_vs_state);
+ CTX_INIT(create_gs_state);
+ CTX_INIT(bind_gs_state);
+ CTX_INIT(delete_gs_state);
+ CTX_INIT(create_tcs_state);
+ CTX_INIT(bind_tcs_state);
+ CTX_INIT(delete_tcs_state);
+ CTX_INIT(create_tes_state);
+ CTX_INIT(bind_tes_state);
+ CTX_INIT(delete_tes_state);
+ CTX_INIT(create_compute_state);
+ CTX_INIT(bind_compute_state);
+ CTX_INIT(delete_compute_state);
+ CTX_INIT(create_vertex_elements_state);
+ CTX_INIT(bind_vertex_elements_state);
+ CTX_INIT(delete_vertex_elements_state);
+ CTX_INIT(set_blend_color);
+ CTX_INIT(set_stencil_ref);
+ CTX_INIT(set_sample_mask);
+ CTX_INIT(set_min_samples);
+ CTX_INIT(set_clip_state);
+ CTX_INIT(set_constant_buffer);
+ CTX_INIT(set_framebuffer_state);
+ CTX_INIT(set_polygon_stipple);
+ CTX_INIT(set_scissor_states);
+ CTX_INIT(set_viewport_states);
+ CTX_INIT(set_sampler_views);
+ CTX_INIT(set_tess_state);
+ CTX_INIT(set_shader_buffers);
+ CTX_INIT(set_shader_images);
+ CTX_INIT(set_vertex_buffers);
+ CTX_INIT(create_stream_output_target);
+ CTX_INIT(stream_output_target_destroy);
+ CTX_INIT(set_stream_output_targets);
+ CTX_INIT(create_sampler_view);
+ CTX_INIT(sampler_view_destroy);
+ CTX_INIT(create_surface);
+ CTX_INIT(surface_destroy);
+ CTX_INIT(texture_barrier);
+ CTX_INIT(memory_barrier);
+ CTX_INIT(resource_commit);
+ /* create_video_codec */
+ /* create_video_buffer */
+ /* set_compute_resources */
+ /* set_global_binding */
+ CTX_INIT(get_sample_position);
+ CTX_INIT(invalidate_resource);
+ CTX_INIT(get_device_reset_status);
+ CTX_INIT(set_device_reset_callback);
+ CTX_INIT(dump_debug_state);
+ CTX_INIT(emit_string_marker);
+ CTX_INIT(create_texture_handle);
+ CTX_INIT(delete_texture_handle);
+ CTX_INIT(make_texture_handle_resident);
+ CTX_INIT(create_image_handle);
+ CTX_INIT(delete_image_handle);
+ CTX_INIT(make_image_handle_resident);
+
+ dd_init_draw_functions(dctx);
+
+ u_log_context_init(&dctx->log);
+ if (pipe->set_log_context)
+ pipe->set_log_context(pipe, &dctx->log);
+
+ dctx->draw_state.sample_mask = ~0;
+
+ list_inithead(&dctx->records);
+ (void) mtx_init(&dctx->mutex, mtx_plain);
+ (void) cnd_init(&dctx->cond);
+ dctx->thread = u_thread_create(dd_thread_main, dctx);
+ if (!dctx->thread) {
+ mtx_destroy(&dctx->mutex);
+ goto fail;
+ }
+
+ return &dctx->base;
+
+fail:
+ FREE(dctx);
+ pipe->destroy(pipe);
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/driver_ddebug/dd_draw.c b/src/gallium/auxiliary/driver_ddebug/dd_draw.c
new file mode 100644
index 00000000000..c404ea0607f
--- /dev/null
+++ b/src/gallium/auxiliary/driver_ddebug/dd_draw.c
@@ -0,0 +1,1645 @@
+/**************************************************************************
+ *
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "dd_pipe.h"
+
+#include "util/u_dump.h"
+#include "util/u_format.h"
+#include "util/u_framebuffer.h"
+#include "util/u_helpers.h"
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_scan.h"
+#include "util/os_time.h"
+#include <inttypes.h>
+
+
+static void
+dd_write_header(FILE *f, struct pipe_screen *screen, unsigned apitrace_call_number)
+{
+ char cmd_line[4096];
+ if (os_get_command_line(cmd_line, sizeof(cmd_line)))
+ fprintf(f, "Command: %s\n", cmd_line);
+ fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
+ fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
+ fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
+
+ if (apitrace_call_number)
+ fprintf(f, "Last apitrace call: %u\n\n", apitrace_call_number);
+}
+
+FILE *
+dd_get_file_stream(struct dd_screen *dscreen, unsigned apitrace_call_number)
+{
+ struct pipe_screen *screen = dscreen->screen;
+
+ FILE *f = dd_get_debug_file(dscreen->verbose);
+ if (!f)
+ return NULL;
+
+ dd_write_header(f, screen, apitrace_call_number);
+ return f;
+}
+
+static void
+dd_dump_dmesg(FILE *f)
+{
+ char line[2000];
+ FILE *p = popen("dmesg | tail -n60", "r");
+
+ if (!p)
+ return;
+
+ fprintf(f, "\nLast 60 lines of dmesg:\n\n");
+ while (fgets(line, sizeof(line), p))
+ fputs(line, f);
+
+ pclose(p);
+}
+
+static unsigned
+dd_num_active_viewports(struct dd_draw_state *dstate)
+{
+ struct tgsi_shader_info info;
+ const struct tgsi_token *tokens;
+
+ if (dstate->shaders[PIPE_SHADER_GEOMETRY])
+ tokens = dstate->shaders[PIPE_SHADER_GEOMETRY]->state.shader.tokens;
+ else if (dstate->shaders[PIPE_SHADER_TESS_EVAL])
+ tokens = dstate->shaders[PIPE_SHADER_TESS_EVAL]->state.shader.tokens;
+ else if (dstate->shaders[PIPE_SHADER_VERTEX])
+ tokens = dstate->shaders[PIPE_SHADER_VERTEX]->state.shader.tokens;
+ else
+ return 1;
+
+ if (tokens) {
+ tgsi_scan_shader(tokens, &info);
+ if (info.writes_viewport_index)
+ return PIPE_MAX_VIEWPORTS;
+ }
+
+ return 1;
+}
+
+#define COLOR_RESET "\033[0m"
+#define COLOR_SHADER "\033[1;32m"
+#define COLOR_STATE "\033[1;33m"
+
+#define DUMP(name, var) do { \
+ fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
+ util_dump_##name(f, var); \
+ fprintf(f, "\n"); \
+} while(0)
+
+#define DUMP_I(name, var, i) do { \
+ fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
+ util_dump_##name(f, var); \
+ fprintf(f, "\n"); \
+} while(0)
+
+#define DUMP_M(name, var, member) do { \
+ fprintf(f, " " #member ": "); \
+ util_dump_##name(f, (var)->member); \
+ fprintf(f, "\n"); \
+} while(0)
+
+#define DUMP_M_ADDR(name, var, member) do { \
+ fprintf(f, " " #member ": "); \
+ util_dump_##name(f, &(var)->member); \
+ fprintf(f, "\n"); \
+} while(0)
+
+#define PRINT_NAMED(type, name, value) \
+do { \
+ fprintf(f, COLOR_STATE "%s" COLOR_RESET " = ", name); \
+ util_dump_##type(f, value); \
+ fprintf(f, "\n"); \
+} while (0)
+
+static void
+util_dump_uint(FILE *f, unsigned i)
+{
+ fprintf(f, "%u", i);
+}
+
+static void
+util_dump_int(FILE *f, int i)
+{
+ fprintf(f, "%d", i);
+}
+
+static void
+util_dump_hex(FILE *f, unsigned i)
+{
+ fprintf(f, "0x%x", i);
+}
+
+static void
+util_dump_double(FILE *f, double d)
+{
+ fprintf(f, "%f", d);
+}
+
+static void
+util_dump_format(FILE *f, enum pipe_format format)
+{
+ fprintf(f, "%s", util_format_name(format));
+}
+
+static void
+util_dump_color_union(FILE *f, const union pipe_color_union *color)
+{
+ fprintf(f, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
+ color->f[0], color->f[1], color->f[2], color->f[3],
+ color->ui[0], color->ui[1], color->ui[2], color->ui[3]);
+}
+
+static void
+dd_dump_render_condition(struct dd_draw_state *dstate, FILE *f)
+{
+ if (dstate->render_cond.query) {
+ fprintf(f, "render condition:\n");
+ DUMP_M(query_type, &dstate->render_cond, query->type);
+ DUMP_M(uint, &dstate->render_cond, condition);
+ DUMP_M(uint, &dstate->render_cond, mode);
+ fprintf(f, "\n");
+ }
+}
+
+static void
+dd_dump_shader(struct dd_draw_state *dstate, enum pipe_shader_type sh, FILE *f)
+{
+ int i;
+ const char *shader_str[PIPE_SHADER_TYPES];
+
+ shader_str[PIPE_SHADER_VERTEX] = "VERTEX";
+ shader_str[PIPE_SHADER_TESS_CTRL] = "TESS_CTRL";
+ shader_str[PIPE_SHADER_TESS_EVAL] = "TESS_EVAL";
+ shader_str[PIPE_SHADER_GEOMETRY] = "GEOMETRY";
+ shader_str[PIPE_SHADER_FRAGMENT] = "FRAGMENT";
+ shader_str[PIPE_SHADER_COMPUTE] = "COMPUTE";
+
+ if (sh == PIPE_SHADER_TESS_CTRL &&
+ !dstate->shaders[PIPE_SHADER_TESS_CTRL] &&
+ dstate->shaders[PIPE_SHADER_TESS_EVAL])
+ fprintf(f, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
+ "default_inner_level = {%f, %f}}\n",
+ dstate->tess_default_levels[0],
+ dstate->tess_default_levels[1],
+ dstate->tess_default_levels[2],
+ dstate->tess_default_levels[3],
+ dstate->tess_default_levels[4],
+ dstate->tess_default_levels[5]);
+
+ if (sh == PIPE_SHADER_FRAGMENT)
+ if (dstate->rs) {
+ unsigned num_viewports = dd_num_active_viewports(dstate);
+
+ if (dstate->rs->state.rs.clip_plane_enable)
+ DUMP(clip_state, &dstate->clip_state);
+
+ for (i = 0; i < num_viewports; i++)
+ DUMP_I(viewport_state, &dstate->viewports[i], i);
+
+ if (dstate->rs->state.rs.scissor)
+ for (i = 0; i < num_viewports; i++)
+ DUMP_I(scissor_state, &dstate->scissors[i], i);
+
+ DUMP(rasterizer_state, &dstate->rs->state.rs);
+
+ if (dstate->rs->state.rs.poly_stipple_enable)
+ DUMP(poly_stipple, &dstate->polygon_stipple);
+ fprintf(f, "\n");
+ }
+
+ if (!dstate->shaders[sh])
+ return;
+
+ fprintf(f, COLOR_SHADER "begin shader: %s" COLOR_RESET "\n", shader_str[sh]);
+ DUMP(shader_state, &dstate->shaders[sh]->state.shader);
+
+ for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++)
+ if (dstate->constant_buffers[sh][i].buffer ||
+ dstate->constant_buffers[sh][i].user_buffer) {
+ DUMP_I(constant_buffer, &dstate->constant_buffers[sh][i], i);
+ if (dstate->constant_buffers[sh][i].buffer)
+ DUMP_M(resource, &dstate->constant_buffers[sh][i], buffer);
+ }
+
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
+ if (dstate->sampler_states[sh][i])
+ DUMP_I(sampler_state, &dstate->sampler_states[sh][i]->state.sampler, i);
+
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
+ if (dstate->sampler_views[sh][i]) {
+ DUMP_I(sampler_view, dstate->sampler_views[sh][i], i);
+ DUMP_M(resource, dstate->sampler_views[sh][i], texture);
+ }
+
+ for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++)
+ if (dstate->shader_images[sh][i].resource) {
+ DUMP_I(image_view, &dstate->shader_images[sh][i], i);
+ if (dstate->shader_images[sh][i].resource)
+ DUMP_M(resource, &dstate->shader_images[sh][i], resource);
+ }
+
+ for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++)
+ if (dstate->shader_buffers[sh][i].buffer) {
+ DUMP_I(shader_buffer, &dstate->shader_buffers[sh][i], i);
+ if (dstate->shader_buffers[sh][i].buffer)
+ DUMP_M(resource, &dstate->shader_buffers[sh][i], buffer);
+ }
+
+ fprintf(f, COLOR_SHADER "end shader: %s" COLOR_RESET "\n\n", shader_str[sh]);
+}
+
+static void
+dd_dump_draw_vbo(struct dd_draw_state *dstate, struct pipe_draw_info *info, FILE *f)
+{
+ int sh, i;
+
+ DUMP(draw_info, info);
+ if (info->count_from_stream_output)
+ DUMP_M(stream_output_target, info,
+ count_from_stream_output);
+ if (info->indirect) {
+ DUMP_M(resource, info, indirect->buffer);
+ if (info->indirect->indirect_draw_count)
+ DUMP_M(resource, info, indirect->indirect_draw_count);
+ }
+
+ fprintf(f, "\n");
+
+ /* TODO: dump active queries */
+
+ dd_dump_render_condition(dstate, f);
+
+ for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
+ if (dstate->vertex_buffers[i].buffer.resource) {
+ DUMP_I(vertex_buffer, &dstate->vertex_buffers[i], i);
+ if (!dstate->vertex_buffers[i].is_user_buffer)
+ DUMP_M(resource, &dstate->vertex_buffers[i], buffer.resource);
+ }
+
+ if (dstate->velems) {
+ PRINT_NAMED(uint, "num vertex elements",
+ dstate->velems->state.velems.count);
+ for (i = 0; i < dstate->velems->state.velems.count; i++) {
+ fprintf(f, " ");
+ DUMP_I(vertex_element, &dstate->velems->state.velems.velems[i], i);
+ }
+ }
+
+ PRINT_NAMED(uint, "num stream output targets", dstate->num_so_targets);
+ for (i = 0; i < dstate->num_so_targets; i++)
+ if (dstate->so_targets[i]) {
+ DUMP_I(stream_output_target, dstate->so_targets[i], i);
+ DUMP_M(resource, dstate->so_targets[i], buffer);
+ fprintf(f, " offset = %i\n", dstate->so_offsets[i]);
+ }
+
+ fprintf(f, "\n");
+ for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
+ if (sh == PIPE_SHADER_COMPUTE)
+ continue;
+
+ dd_dump_shader(dstate, sh, f);
+ }
+
+ if (dstate->dsa)
+ DUMP(depth_stencil_alpha_state, &dstate->dsa->state.dsa);
+ DUMP(stencil_ref, &dstate->stencil_ref);
+
+ if (dstate->blend)
+ DUMP(blend_state, &dstate->blend->state.blend);
+ DUMP(blend_color, &dstate->blend_color);
+
+ PRINT_NAMED(uint, "min_samples", dstate->min_samples);
+ PRINT_NAMED(hex, "sample_mask", dstate->sample_mask);
+ fprintf(f, "\n");
+
+ DUMP(framebuffer_state, &dstate->framebuffer_state);
+ for (i = 0; i < dstate->framebuffer_state.nr_cbufs; i++)
+ if (dstate->framebuffer_state.cbufs[i]) {
+ fprintf(f, " " COLOR_STATE "cbufs[%i]:" COLOR_RESET "\n ", i);
+ DUMP(surface, dstate->framebuffer_state.cbufs[i]);
+ fprintf(f, " ");
+ DUMP(resource, dstate->framebuffer_state.cbufs[i]->texture);
+ }
+ if (dstate->framebuffer_state.zsbuf) {
+ fprintf(f, " " COLOR_STATE "zsbuf:" COLOR_RESET "\n ");
+ DUMP(surface, dstate->framebuffer_state.zsbuf);
+ fprintf(f, " ");
+ DUMP(resource, dstate->framebuffer_state.zsbuf->texture);
+ }
+ fprintf(f, "\n");
+}
+
+static void
+dd_dump_launch_grid(struct dd_draw_state *dstate, struct pipe_grid_info *info, FILE *f)
+{
+ fprintf(f, "%s:\n", __func__+8);
+ DUMP(grid_info, info);
+ fprintf(f, "\n");
+
+ dd_dump_shader(dstate, PIPE_SHADER_COMPUTE, f);
+ fprintf(f, "\n");
+}
+
+static void
+dd_dump_resource_copy_region(struct dd_draw_state *dstate,
+ struct call_resource_copy_region *info,
+ FILE *f)
+{
+ fprintf(f, "%s:\n", __func__+8);
+ DUMP_M(resource, info, dst);
+ DUMP_M(uint, info, dst_level);
+ DUMP_M(uint, info, dstx);
+ DUMP_M(uint, info, dsty);
+ DUMP_M(uint, info, dstz);
+ DUMP_M(resource, info, src);
+ DUMP_M(uint, info, src_level);
+ DUMP_M_ADDR(box, info, src_box);
+}
+
+static void
+dd_dump_blit(struct dd_draw_state *dstate, struct pipe_blit_info *info, FILE *f)
+{
+ fprintf(f, "%s:\n", __func__+8);
+ DUMP_M(resource, info, dst.resource);
+ DUMP_M(uint, info, dst.level);
+ DUMP_M_ADDR(box, info, dst.box);
+ DUMP_M(format, info, dst.format);
+
+ DUMP_M(resource, info, src.resource);
+ DUMP_M(uint, info, src.level);
+ DUMP_M_ADDR(box, info, src.box);
+ DUMP_M(format, info, src.format);
+
+ DUMP_M(hex, info, mask);
+ DUMP_M(uint, info, filter);
+ DUMP_M(uint, info, scissor_enable);
+ DUMP_M_ADDR(scissor_state, info, scissor);
+ DUMP_M(uint, info, render_condition_enable);
+
+ if (info->render_condition_enable)
+ dd_dump_render_condition(dstate, f);
+}
+
+static void
+dd_dump_generate_mipmap(struct dd_draw_state *dstate, FILE *f)
+{
+ fprintf(f, "%s:\n", __func__+8);
+ /* TODO */
+}
+
+static void
+dd_dump_get_query_result_resource(struct call_get_query_result_resource *info, FILE *f)
+{
+ fprintf(f, "%s:\n", __func__ + 8);
+ DUMP_M(query_type, info, query_type);
+ DUMP_M(uint, info, wait);
+ DUMP_M(query_value_type, info, result_type);
+ DUMP_M(int, info, index);
+ DUMP_M(resource, info, resource);
+ DUMP_M(uint, info, offset);
+}
+
+static void
+dd_dump_flush_resource(struct dd_draw_state *dstate, struct pipe_resource *res,
+ FILE *f)
+{
+ fprintf(f, "%s:\n", __func__+8);
+ DUMP(resource, res);
+}
+
+static void
+dd_dump_clear(struct dd_draw_state *dstate, struct call_clear *info, FILE *f)
+{
+ fprintf(f, "%s:\n", __func__+8);
+ DUMP_M(uint, info, buffers);
+ DUMP_M_ADDR(color_union, info, color);
+ DUMP_M(double, info, depth);
+ DUMP_M(hex, info, stencil);
+}
+
+static void
+dd_dump_clear_buffer(struct dd_draw_state *dstate, struct call_clear_buffer *info,
+ FILE *f)
+{
+ int i;
+ const char *value = (const char*)info->clear_value;
+
+ fprintf(f, "%s:\n", __func__+8);
+ DUMP_M(resource, info, res);
+ DUMP_M(uint, info, offset);
+ DUMP_M(uint, info, size);
+ DUMP_M(uint, info, clear_value_size);
+
+ fprintf(f, " clear_value:");
+ for (i = 0; i < info->clear_value_size; i++)
+ fprintf(f, " %02x", value[i]);
+ fprintf(f, "\n");
+}
+
+static void
+dd_dump_transfer_map(struct call_transfer_map *info, FILE *f)
+{
+ fprintf(f, "%s:\n", __func__+8);
+ DUMP_M_ADDR(transfer, info, transfer);
+ DUMP_M(ptr, info, transfer_ptr);
+ DUMP_M(ptr, info, ptr);
+}
+
+static void
+dd_dump_transfer_flush_region(struct call_transfer_flush_region *info, FILE *f)
+{
+ fprintf(f, "%s:\n", __func__+8);
+ DUMP_M_ADDR(transfer, info, transfer);
+ DUMP_M(ptr, info, transfer_ptr);
+ DUMP_M_ADDR(box, info, box);
+}
+
+static void
+dd_dump_transfer_unmap(struct call_transfer_unmap *info, FILE *f)
+{
+ fprintf(f, "%s:\n", __func__+8);
+ DUMP_M_ADDR(transfer, info, transfer);
+ DUMP_M(ptr, info, transfer_ptr);
+}
+
+static void
+dd_dump_buffer_subdata(struct call_buffer_subdata *info, FILE *f)
+{
+ fprintf(f, "%s:\n", __func__+8);
+ DUMP_M(resource, info, resource);
+ DUMP_M(transfer_usage, info, usage);
+ DUMP_M(uint, info, offset);
+ DUMP_M(uint, info, size);
+ DUMP_M(ptr, info, data);
+}
+
+static void
+dd_dump_texture_subdata(struct call_texture_subdata *info, FILE *f)
+{
+ fprintf(f, "%s:\n", __func__+8);
+ DUMP_M(resource, info, resource);
+ DUMP_M(uint, info, level);
+ DUMP_M(transfer_usage, info, usage);
+ DUMP_M_ADDR(box, info, box);
+ DUMP_M(ptr, info, data);
+ DUMP_M(uint, info, stride);
+ DUMP_M(uint, info, layer_stride);
+}
+
+static void
+dd_dump_clear_texture(struct dd_draw_state *dstate, FILE *f)
+{
+ fprintf(f, "%s:\n", __func__+8);
+ /* TODO */
+}
+
+static void
+dd_dump_clear_render_target(struct dd_draw_state *dstate, FILE *f)
+{
+ fprintf(f, "%s:\n", __func__+8);
+ /* TODO */
+}
+
+static void
+dd_dump_clear_depth_stencil(struct dd_draw_state *dstate, FILE *f)
+{
+ fprintf(f, "%s:\n", __func__+8);
+ /* TODO */
+}
+
+static void
+dd_dump_driver_state(struct dd_context *dctx, FILE *f, unsigned flags)
+{
+ if (dctx->pipe->dump_debug_state) {
+ fprintf(f,"\n\n**************************************************"
+ "***************************\n");
+ fprintf(f, "Driver-specific state:\n\n");
+ dctx->pipe->dump_debug_state(dctx->pipe, f, flags);
+ }
+}
+
+static void
+dd_dump_call(FILE *f, struct dd_draw_state *state, struct dd_call *call)
+{
+ switch (call->type) {
+ case CALL_DRAW_VBO:
+ dd_dump_draw_vbo(state, &call->info.draw_vbo.draw, f);
+ break;
+ case CALL_LAUNCH_GRID:
+ dd_dump_launch_grid(state, &call->info.launch_grid, f);
+ break;
+ case CALL_RESOURCE_COPY_REGION:
+ dd_dump_resource_copy_region(state,
+ &call->info.resource_copy_region, f);
+ break;
+ case CALL_BLIT:
+ dd_dump_blit(state, &call->info.blit, f);
+ break;
+ case CALL_FLUSH_RESOURCE:
+ dd_dump_flush_resource(state, call->info.flush_resource, f);
+ break;
+ case CALL_CLEAR:
+ dd_dump_clear(state, &call->info.clear, f);
+ break;
+ case CALL_CLEAR_BUFFER:
+ dd_dump_clear_buffer(state, &call->info.clear_buffer, f);
+ break;
+ case CALL_CLEAR_TEXTURE:
+ dd_dump_clear_texture(state, f);
+ break;
+ case CALL_CLEAR_RENDER_TARGET:
+ dd_dump_clear_render_target(state, f);
+ break;
+ case CALL_CLEAR_DEPTH_STENCIL:
+ dd_dump_clear_depth_stencil(state, f);
+ break;
+ case CALL_GENERATE_MIPMAP:
+ dd_dump_generate_mipmap(state, f);
+ break;
+ case CALL_GET_QUERY_RESULT_RESOURCE:
+ dd_dump_get_query_result_resource(&call->info.get_query_result_resource, f);
+ break;
+ case CALL_TRANSFER_MAP:
+ dd_dump_transfer_map(&call->info.transfer_map, f);
+ break;
+ case CALL_TRANSFER_FLUSH_REGION:
+ dd_dump_transfer_flush_region(&call->info.transfer_flush_region, f);
+ break;
+ case CALL_TRANSFER_UNMAP:
+ dd_dump_transfer_unmap(&call->info.transfer_unmap, f);
+ break;
+ case CALL_BUFFER_SUBDATA:
+ dd_dump_buffer_subdata(&call->info.buffer_subdata, f);
+ break;
+ case CALL_TEXTURE_SUBDATA:
+ dd_dump_texture_subdata(&call->info.texture_subdata, f);
+ break;
+ }
+}
+
+static void
+dd_kill_process(void)
+{
+ sync();
+ fprintf(stderr, "dd: Aborting the process...\n");
+ fflush(stdout);
+ fflush(stderr);
+ exit(1);
+}
+
+static void
+dd_unreference_copy_of_call(struct dd_call *dst)
+{
+ switch (dst->type) {
+ case CALL_DRAW_VBO:
+ pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output, NULL);
+ pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer, NULL);
+ pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count, NULL);
+ if (dst->info.draw_vbo.draw.index_size &&
+ !dst->info.draw_vbo.draw.has_user_indices)
+ pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
+ else
+ dst->info.draw_vbo.draw.index.user = NULL;
+ break;
+ case CALL_LAUNCH_GRID:
+ pipe_resource_reference(&dst->info.launch_grid.indirect, NULL);
+ break;
+ case CALL_RESOURCE_COPY_REGION:
+ pipe_resource_reference(&dst->info.resource_copy_region.dst, NULL);
+ pipe_resource_reference(&dst->info.resource_copy_region.src, NULL);
+ break;
+ case CALL_BLIT:
+ pipe_resource_reference(&dst->info.blit.dst.resource, NULL);
+ pipe_resource_reference(&dst->info.blit.src.resource, NULL);
+ break;
+ case CALL_FLUSH_RESOURCE:
+ pipe_resource_reference(&dst->info.flush_resource, NULL);
+ break;
+ case CALL_CLEAR:
+ break;
+ case CALL_CLEAR_BUFFER:
+ pipe_resource_reference(&dst->info.clear_buffer.res, NULL);
+ break;
+ case CALL_CLEAR_TEXTURE:
+ break;
+ case CALL_CLEAR_RENDER_TARGET:
+ break;
+ case CALL_CLEAR_DEPTH_STENCIL:
+ break;
+ case CALL_GENERATE_MIPMAP:
+ pipe_resource_reference(&dst->info.generate_mipmap.res, NULL);
+ break;
+ case CALL_GET_QUERY_RESULT_RESOURCE:
+ pipe_resource_reference(&dst->info.get_query_result_resource.resource, NULL);
+ break;
+ case CALL_TRANSFER_MAP:
+ pipe_resource_reference(&dst->info.transfer_map.transfer.resource, NULL);
+ break;
+ case CALL_TRANSFER_FLUSH_REGION:
+ pipe_resource_reference(&dst->info.transfer_flush_region.transfer.resource, NULL);
+ break;
+ case CALL_TRANSFER_UNMAP:
+ pipe_resource_reference(&dst->info.transfer_unmap.transfer.resource, NULL);
+ break;
+ case CALL_BUFFER_SUBDATA:
+ pipe_resource_reference(&dst->info.buffer_subdata.resource, NULL);
+ break;
+ case CALL_TEXTURE_SUBDATA:
+ pipe_resource_reference(&dst->info.texture_subdata.resource, NULL);
+ break;
+ }
+}
+
+static void
+dd_init_copy_of_draw_state(struct dd_draw_state_copy *state)
+{
+ unsigned i,j;
+
+ /* Just clear pointers to gallium objects. Don't clear the whole structure,
+ * because it would kill performance with its size of 130 KB.
+ */
+ memset(state->base.vertex_buffers, 0,
+ sizeof(state->base.vertex_buffers));
+ memset(state->base.so_targets, 0,
+ sizeof(state->base.so_targets));
+ memset(state->base.constant_buffers, 0,
+ sizeof(state->base.constant_buffers));
+ memset(state->base.sampler_views, 0,
+ sizeof(state->base.sampler_views));
+ memset(state->base.shader_images, 0,
+ sizeof(state->base.shader_images));
+ memset(state->base.shader_buffers, 0,
+ sizeof(state->base.shader_buffers));
+ memset(&state->base.framebuffer_state, 0,
+ sizeof(state->base.framebuffer_state));
+
+ memset(state->shaders, 0, sizeof(state->shaders));
+
+ state->base.render_cond.query = &state->render_cond;
+
+ for (i = 0; i < PIPE_SHADER_TYPES; i++) {
+ state->base.shaders[i] = &state->shaders[i];
+ for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
+ state->base.sampler_states[i][j] = &state->sampler_states[i][j];
+ }
+
+ state->base.velems = &state->velems;
+ state->base.rs = &state->rs;
+ state->base.dsa = &state->dsa;
+ state->base.blend = &state->blend;
+}
+
+static void
+dd_unreference_copy_of_draw_state(struct dd_draw_state_copy *state)
+{
+ struct dd_draw_state *dst = &state->base;
+ unsigned i,j;
+
+ for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
+ pipe_vertex_buffer_unreference(&dst->vertex_buffers[i]);
+ for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
+ pipe_so_target_reference(&dst->so_targets[i], NULL);
+
+ for (i = 0; i < PIPE_SHADER_TYPES; i++) {
+ if (dst->shaders[i])
+ tgsi_free_tokens(dst->shaders[i]->state.shader.tokens);
+
+ for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++)
+ pipe_resource_reference(&dst->constant_buffers[i][j].buffer, NULL);
+ for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
+ pipe_sampler_view_reference(&dst->sampler_views[i][j], NULL);
+ for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++)
+ pipe_resource_reference(&dst->shader_images[i][j].resource, NULL);
+ for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++)
+ pipe_resource_reference(&dst->shader_buffers[i][j].buffer, NULL);
+ }
+
+ util_unreference_framebuffer_state(&dst->framebuffer_state);
+}
+
+static void
+dd_copy_draw_state(struct dd_draw_state *dst, struct dd_draw_state *src)
+{
+ unsigned i,j;
+
+ if (src->render_cond.query) {
+ *dst->render_cond.query = *src->render_cond.query;
+ dst->render_cond.condition = src->render_cond.condition;
+ dst->render_cond.mode = src->render_cond.mode;
+ } else {
+ dst->render_cond.query = NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
+ pipe_vertex_buffer_reference(&dst->vertex_buffers[i],
+ &src->vertex_buffers[i]);
+ }
+
+ dst->num_so_targets = src->num_so_targets;
+ for (i = 0; i < src->num_so_targets; i++)
+ pipe_so_target_reference(&dst->so_targets[i], src->so_targets[i]);
+ memcpy(dst->so_offsets, src->so_offsets, sizeof(src->so_offsets));
+
+ for (i = 0; i < PIPE_SHADER_TYPES; i++) {
+ if (!src->shaders[i]) {
+ dst->shaders[i] = NULL;
+ continue;
+ }
+
+ if (src->shaders[i]) {
+ dst->shaders[i]->state.shader = src->shaders[i]->state.shader;
+ if (src->shaders[i]->state.shader.tokens) {
+ dst->shaders[i]->state.shader.tokens =
+ tgsi_dup_tokens(src->shaders[i]->state.shader.tokens);
+ } else {
+ dst->shaders[i]->state.shader.ir.nir = NULL;
+ }
+ } else {
+ dst->shaders[i] = NULL;
+ }
+
+ for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++) {
+ pipe_resource_reference(&dst->constant_buffers[i][j].buffer,
+ src->constant_buffers[i][j].buffer);
+ memcpy(&dst->constant_buffers[i][j], &src->constant_buffers[i][j],
+ sizeof(src->constant_buffers[i][j]));
+ }
+
+ for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
+ pipe_sampler_view_reference(&dst->sampler_views[i][j],
+ src->sampler_views[i][j]);
+ if (src->sampler_states[i][j])
+ dst->sampler_states[i][j]->state.sampler =
+ src->sampler_states[i][j]->state.sampler;
+ else
+ dst->sampler_states[i][j] = NULL;
+ }
+
+ for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++) {
+ pipe_resource_reference(&dst->shader_images[i][j].resource,
+ src->shader_images[i][j].resource);
+ memcpy(&dst->shader_images[i][j], &src->shader_images[i][j],
+ sizeof(src->shader_images[i][j]));
+ }
+
+ for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++) {
+ pipe_resource_reference(&dst->shader_buffers[i][j].buffer,
+ src->shader_buffers[i][j].buffer);
+ memcpy(&dst->shader_buffers[i][j], &src->shader_buffers[i][j],
+ sizeof(src->shader_buffers[i][j]));
+ }
+ }
+
+ if (src->velems)
+ dst->velems->state.velems = src->velems->state.velems;
+ else
+ dst->velems = NULL;
+
+ if (src->rs)
+ dst->rs->state.rs = src->rs->state.rs;
+ else
+ dst->rs = NULL;
+
+ if (src->dsa)
+ dst->dsa->state.dsa = src->dsa->state.dsa;
+ else
+ dst->dsa = NULL;
+
+ if (src->blend)
+ dst->blend->state.blend = src->blend->state.blend;
+ else
+ dst->blend = NULL;
+
+ dst->blend_color = src->blend_color;
+ dst->stencil_ref = src->stencil_ref;
+ dst->sample_mask = src->sample_mask;
+ dst->min_samples = src->min_samples;
+ dst->clip_state = src->clip_state;
+ util_copy_framebuffer_state(&dst->framebuffer_state, &src->framebuffer_state);
+ memcpy(dst->scissors, src->scissors, sizeof(src->scissors));
+ memcpy(dst->viewports, src->viewports, sizeof(src->viewports));
+ memcpy(dst->tess_default_levels, src->tess_default_levels,
+ sizeof(src->tess_default_levels));
+ dst->apitrace_call_number = src->apitrace_call_number;
+}
+
+static void
+dd_free_record(struct pipe_screen *screen, struct dd_draw_record *record)
+{
+ u_log_page_destroy(record->log_page);
+ dd_unreference_copy_of_call(&record->call);
+ dd_unreference_copy_of_draw_state(&record->draw_state);
+ screen->fence_reference(screen, &record->prev_bottom_of_pipe, NULL);
+ screen->fence_reference(screen, &record->top_of_pipe, NULL);
+ screen->fence_reference(screen, &record->bottom_of_pipe, NULL);
+ util_queue_fence_destroy(&record->driver_finished);
+ FREE(record);
+}
+
+static void
+dd_write_record(FILE *f, struct dd_draw_record *record)
+{
+ PRINT_NAMED(ptr, "pipe", record->dctx->pipe);
+ PRINT_NAMED(ns, "time before (API call)", record->time_before);
+ PRINT_NAMED(ns, "time after (driver done)", record->time_after);
+ fprintf(f, "\n");
+
+ dd_dump_call(f, &record->draw_state.base, &record->call);
+
+ if (record->log_page) {
+ fprintf(f,"\n\n**************************************************"
+ "***************************\n");
+ fprintf(f, "Context Log:\n\n");
+ u_log_page_print(record->log_page, f);
+ }
+}
+
+static void
+dd_maybe_dump_record(struct dd_screen *dscreen, struct dd_draw_record *record)
+{
+ if (dscreen->dump_mode == DD_DUMP_ONLY_HANGS ||
+ (dscreen->dump_mode == DD_DUMP_APITRACE_CALL &&
+ dscreen->apitrace_dump_call != record->draw_state.base.apitrace_call_number))
+ return;
+
+ char name[512];
+ dd_get_debug_filename_and_mkdir(name, sizeof(name), dscreen->verbose);
+ FILE *f = fopen(name, "w");
+ if (!f) {
+ fprintf(stderr, "dd: failed to open %s\n", name);
+ return;
+ }
+
+ dd_write_header(f, dscreen->screen, record->draw_state.base.apitrace_call_number);
+ dd_write_record(f, record);
+
+ fclose(f);
+}
+
+static const char *
+dd_fence_state(struct pipe_screen *screen, struct pipe_fence_handle *fence,
+ bool *not_reached)
+{
+ if (!fence)
+ return "---";
+
+ bool ok = screen->fence_finish(screen, NULL, fence, 0);
+
+ if (not_reached && !ok)
+ *not_reached = true;
+
+ return ok ? "YES" : "NO ";
+}
+
+static void
+dd_report_hang(struct dd_context *dctx)
+{
+ struct dd_screen *dscreen = dd_screen(dctx->base.screen);
+ struct pipe_screen *screen = dscreen->screen;
+ bool encountered_hang = false;
+ bool stop_output = false;
+ unsigned num_later = 0;
+
+ fprintf(stderr, "GPU hang detected, collecting information...\n\n");
+
+ fprintf(stderr, "Draw # driver prev BOP TOP BOP dump file\n"
+ "-------------------------------------------------------------\n");
+
+ list_for_each_entry(struct dd_draw_record, record, &dctx->records, list) {
+ if (!encountered_hang &&
+ screen->fence_finish(screen, NULL, record->bottom_of_pipe, 0)) {
+ dd_maybe_dump_record(dscreen, record);
+ continue;
+ }
+
+ if (stop_output) {
+ dd_maybe_dump_record(dscreen, record);
+ num_later++;
+ continue;
+ }
+
+ bool driver = util_queue_fence_is_signalled(&record->driver_finished);
+ bool top_not_reached = false;
+ const char *prev_bop = dd_fence_state(screen, record->prev_bottom_of_pipe, NULL);
+ const char *top = dd_fence_state(screen, record->top_of_pipe, &top_not_reached);
+ const char *bop = dd_fence_state(screen, record->bottom_of_pipe, NULL);
+
+ fprintf(stderr, "%-9u %s %s %s %s ",
+ record->draw_call, driver ? "YES" : "NO ", prev_bop, top, bop);
+
+ char name[512];
+ dd_get_debug_filename_and_mkdir(name, sizeof(name), false);
+
+ FILE *f = fopen(name, "w");
+ if (!f) {
+ fprintf(stderr, "fopen failed\n");
+ } else {
+ fprintf(stderr, "%s\n", name);
+
+ dd_write_header(f, dscreen->screen, record->draw_state.base.apitrace_call_number);
+ dd_write_record(f, record);
+
+ if (!encountered_hang) {
+ dd_dump_driver_state(dctx, f, PIPE_DUMP_DEVICE_STATUS_REGISTERS);
+ dd_dump_dmesg(f);
+ }
+
+ fclose(f);
+ }
+
+ if (top_not_reached)
+ stop_output = true;
+ encountered_hang = true;
+ }
+
+ if (num_later || dctx->record_pending) {
+ fprintf(stderr, "... and %u%s additional draws.\n", num_later,
+ dctx->record_pending ? "+1 (pending)" : "");
+ }
+
+ fprintf(stderr, "\nDone.\n");
+ dd_kill_process();
+}
+
+int
+dd_thread_main(void *input)
+{
+ struct dd_context *dctx = (struct dd_context *)input;
+ struct dd_screen *dscreen = dd_screen(dctx->base.screen);
+ struct pipe_screen *screen = dscreen->screen;
+
+ mtx_lock(&dctx->mutex);
+
+ for (;;) {
+ struct list_head records;
+ struct pipe_fence_handle *fence;
+ struct pipe_fence_handle *fence2 = NULL;
+
+ list_replace(&dctx->records, &records);
+ list_inithead(&dctx->records);
+ dctx->num_records = 0;
+
+ if (dctx->api_stalled)
+ cnd_signal(&dctx->cond);
+
+ if (!list_empty(&records)) {
+ /* Wait for the youngest draw. This means hangs can take a bit longer
+ * to detect, but it's more efficient this way. */
+ struct dd_draw_record *youngest =
+ LIST_ENTRY(struct dd_draw_record, records.prev, list);
+ fence = youngest->bottom_of_pipe;
+ } else if (dctx->record_pending) {
+ /* Wait for pending fences, in case the driver ends up hanging internally. */
+ fence = dctx->record_pending->prev_bottom_of_pipe;
+ fence2 = dctx->record_pending->top_of_pipe;
+ } else if (dctx->kill_thread) {
+ break;
+ } else {
+ cnd_wait(&dctx->cond, &dctx->mutex);
+ continue;
+ }
+ mtx_unlock(&dctx->mutex);
+
+ /* Fences can be NULL legitimately when timeout detection is disabled. */
+ if ((fence &&
+ !screen->fence_finish(screen, NULL, fence,
+ (uint64_t)dscreen->timeout_ms * 1000*1000)) ||
+ (fence2 &&
+ !screen->fence_finish(screen, NULL, fence2,
+ (uint64_t)dscreen->timeout_ms * 1000*1000))) {
+ mtx_lock(&dctx->mutex);
+ list_splice(&records, &dctx->records);
+ dd_report_hang(dctx);
+ /* we won't actually get here */
+ mtx_unlock(&dctx->mutex);
+ }
+
+ list_for_each_entry_safe(struct dd_draw_record, record, &records, list) {
+ dd_maybe_dump_record(dscreen, record);
+ list_del(&record->list);
+ dd_free_record(screen, record);
+ }
+
+ mtx_lock(&dctx->mutex);
+ }
+ mtx_unlock(&dctx->mutex);
+ return 0;
+}
+
+static struct dd_draw_record *
+dd_create_record(struct dd_context *dctx)
+{
+ struct dd_draw_record *record;
+
+ record = MALLOC_STRUCT(dd_draw_record);
+ if (!record)
+ return NULL;
+
+ record->dctx = dctx;
+ record->draw_call = dctx->num_draw_calls;
+
+ record->prev_bottom_of_pipe = NULL;
+ record->top_of_pipe = NULL;
+ record->bottom_of_pipe = NULL;
+ record->log_page = NULL;
+ util_queue_fence_init(&record->driver_finished);
+
+ dd_init_copy_of_draw_state(&record->draw_state);
+ dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
+
+ return record;
+}
+
+static void
+dd_context_flush(struct pipe_context *_pipe,
+ struct pipe_fence_handle **fence, unsigned flags)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+
+ pipe->flush(pipe, fence, flags);
+}
+
+static void
+dd_before_draw(struct dd_context *dctx, struct dd_draw_record *record)
+{
+ struct dd_screen *dscreen = dd_screen(dctx->base.screen);
+ struct pipe_context *pipe = dctx->pipe;
+ struct pipe_screen *screen = dscreen->screen;
+
+ record->time_before = os_time_get_nano();
+
+ if (dscreen->timeout_ms > 0) {
+ if (dscreen->flush_always && dctx->num_draw_calls >= dscreen->skip_count) {
+ pipe->flush(pipe, &record->prev_bottom_of_pipe, 0);
+ screen->fence_reference(screen, &record->top_of_pipe, record->prev_bottom_of_pipe);
+ } else {
+ pipe->flush(pipe, &record->prev_bottom_of_pipe,
+ PIPE_FLUSH_DEFERRED | PIPE_FLUSH_BOTTOM_OF_PIPE);
+ pipe->flush(pipe, &record->top_of_pipe,
+ PIPE_FLUSH_DEFERRED | PIPE_FLUSH_TOP_OF_PIPE);
+ }
+
+ mtx_lock(&dctx->mutex);
+ dctx->record_pending = record;
+ if (list_empty(&dctx->records))
+ cnd_signal(&dctx->cond);
+ mtx_unlock(&dctx->mutex);
+ }
+}
+
+static void
+dd_after_draw_async(void *data)
+{
+ struct dd_draw_record *record = (struct dd_draw_record *)data;
+ struct dd_context *dctx = record->dctx;
+ struct dd_screen *dscreen = dd_screen(dctx->base.screen);
+
+ record->log_page = u_log_new_page(&dctx->log);
+ record->time_after = os_time_get_nano();
+
+ if (!util_queue_fence_is_signalled(&record->driver_finished))
+ util_queue_fence_signal(&record->driver_finished);
+
+ if (dscreen->dump_mode == DD_DUMP_APITRACE_CALL &&
+ dscreen->apitrace_dump_call > dctx->draw_state.apitrace_call_number) {
+ dd_thread_join(dctx);
+ /* No need to continue. */
+ exit(0);
+ }
+}
+
+static void
+dd_after_draw(struct dd_context *dctx, struct dd_draw_record *record)
+{
+ struct dd_screen *dscreen = dd_screen(dctx->base.screen);
+ struct pipe_context *pipe = dctx->pipe;
+
+ if (dscreen->timeout_ms > 0) {
+ unsigned flush_flags;
+ if (dscreen->flush_always && dctx->num_draw_calls >= dscreen->skip_count)
+ flush_flags = 0;
+ else
+ flush_flags = PIPE_FLUSH_DEFERRED | PIPE_FLUSH_BOTTOM_OF_PIPE;
+ pipe->flush(pipe, &record->bottom_of_pipe, flush_flags);
+
+ assert(record == dctx->record_pending);
+ }
+
+ if (pipe->callback) {
+ util_queue_fence_reset(&record->driver_finished);
+ pipe->callback(pipe, dd_after_draw_async, record, true);
+ } else {
+ dd_after_draw_async(record);
+ }
+
+ mtx_lock(&dctx->mutex);
+ if (unlikely(dctx->num_records > 10000)) {
+ dctx->api_stalled = true;
+ /* Since this is only a heuristic to prevent the API thread from getting
+ * too far ahead, we don't need a loop here. */
+ cnd_wait(&dctx->cond, &dctx->mutex);
+ dctx->api_stalled = false;
+ }
+
+ if (list_empty(&dctx->records))
+ cnd_signal(&dctx->cond);
+
+ list_addtail(&record->list, &dctx->records);
+ dctx->record_pending = NULL;
+ dctx->num_records++;
+ mtx_unlock(&dctx->mutex);
+
+ ++dctx->num_draw_calls;
+ if (dscreen->skip_count && dctx->num_draw_calls % 10000 == 0)
+ fprintf(stderr, "Gallium debugger reached %u draw calls.\n",
+ dctx->num_draw_calls);
+}
+
+static void
+dd_context_draw_vbo(struct pipe_context *_pipe,
+ const struct pipe_draw_info *info)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record = dd_create_record(dctx);
+
+ record->call.type = CALL_DRAW_VBO;
+ record->call.info.draw_vbo.draw = *info;
+ record->call.info.draw_vbo.draw.count_from_stream_output = NULL;
+ pipe_so_target_reference(&record->call.info.draw_vbo.draw.count_from_stream_output,
+ info->count_from_stream_output);
+ if (info->index_size && !info->has_user_indices) {
+ record->call.info.draw_vbo.draw.index.resource = NULL;
+ pipe_resource_reference(&record->call.info.draw_vbo.draw.index.resource,
+ info->index.resource);
+ }
+
+ if (info->indirect) {
+ record->call.info.draw_vbo.indirect = *info->indirect;
+ record->call.info.draw_vbo.draw.indirect = &record->call.info.draw_vbo.indirect;
+
+ record->call.info.draw_vbo.indirect.buffer = NULL;
+ pipe_resource_reference(&record->call.info.draw_vbo.indirect.buffer,
+ info->indirect->buffer);
+ record->call.info.draw_vbo.indirect.indirect_draw_count = NULL;
+ pipe_resource_reference(&record->call.info.draw_vbo.indirect.indirect_draw_count,
+ info->indirect->indirect_draw_count);
+ } else {
+ memset(&record->call.info.draw_vbo.indirect, 0, sizeof(*info->indirect));
+ }
+
+ dd_before_draw(dctx, record);
+ pipe->draw_vbo(pipe, info);
+ dd_after_draw(dctx, record);
+}
+
+static void
+dd_context_launch_grid(struct pipe_context *_pipe,
+ const struct pipe_grid_info *info)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record = dd_create_record(dctx);
+
+ record->call.type = CALL_LAUNCH_GRID;
+ record->call.info.launch_grid = *info;
+ record->call.info.launch_grid.indirect = NULL;
+ pipe_resource_reference(&record->call.info.launch_grid.indirect, info->indirect);
+
+ dd_before_draw(dctx, record);
+ pipe->launch_grid(pipe, info);
+ dd_after_draw(dctx, record);
+}
+
+static void
+dd_context_resource_copy_region(struct pipe_context *_pipe,
+ struct pipe_resource *dst, unsigned dst_level,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *src, unsigned src_level,
+ const struct pipe_box *src_box)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record = dd_create_record(dctx);
+
+ record->call.type = CALL_RESOURCE_COPY_REGION;
+ record->call.info.resource_copy_region.dst = NULL;
+ pipe_resource_reference(&record->call.info.resource_copy_region.dst, dst);
+ record->call.info.resource_copy_region.dst_level = dst_level;
+ record->call.info.resource_copy_region.dstx = dstx;
+ record->call.info.resource_copy_region.dsty = dsty;
+ record->call.info.resource_copy_region.dstz = dstz;
+ record->call.info.resource_copy_region.src = NULL;
+ pipe_resource_reference(&record->call.info.resource_copy_region.src, src);
+ record->call.info.resource_copy_region.src_level = src_level;
+ record->call.info.resource_copy_region.src_box = *src_box;
+
+ dd_before_draw(dctx, record);
+ pipe->resource_copy_region(pipe,
+ dst, dst_level, dstx, dsty, dstz,
+ src, src_level, src_box);
+ dd_after_draw(dctx, record);
+}
+
+static void
+dd_context_blit(struct pipe_context *_pipe, const struct pipe_blit_info *info)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record = dd_create_record(dctx);
+
+ record->call.type = CALL_BLIT;
+ record->call.info.blit = *info;
+ record->call.info.blit.dst.resource = NULL;
+ pipe_resource_reference(&record->call.info.blit.dst.resource, info->dst.resource);
+ record->call.info.blit.src.resource = NULL;
+ pipe_resource_reference(&record->call.info.blit.src.resource, info->src.resource);
+
+ dd_before_draw(dctx, record);
+ pipe->blit(pipe, info);
+ dd_after_draw(dctx, record);
+}
+
+static boolean
+dd_context_generate_mipmap(struct pipe_context *_pipe,
+ struct pipe_resource *res,
+ enum pipe_format format,
+ unsigned base_level,
+ unsigned last_level,
+ unsigned first_layer,
+ unsigned last_layer)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record = dd_create_record(dctx);
+ boolean result;
+
+ record->call.type = CALL_GENERATE_MIPMAP;
+ record->call.info.generate_mipmap.res = NULL;
+ pipe_resource_reference(&record->call.info.generate_mipmap.res, res);
+ record->call.info.generate_mipmap.format = format;
+ record->call.info.generate_mipmap.base_level = base_level;
+ record->call.info.generate_mipmap.last_level = last_level;
+ record->call.info.generate_mipmap.first_layer = first_layer;
+ record->call.info.generate_mipmap.last_layer = last_layer;
+
+ dd_before_draw(dctx, record);
+ result = pipe->generate_mipmap(pipe, res, format, base_level, last_level,
+ first_layer, last_layer);
+ dd_after_draw(dctx, record);
+ return result;
+}
+
+static void
+dd_context_get_query_result_resource(struct pipe_context *_pipe,
+ struct pipe_query *query,
+ boolean wait,
+ enum pipe_query_value_type result_type,
+ int index,
+ struct pipe_resource *resource,
+ unsigned offset)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct dd_query *dquery = dd_query(query);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record = dd_create_record(dctx);
+
+ record->call.type = CALL_GET_QUERY_RESULT_RESOURCE;
+ record->call.info.get_query_result_resource.query = query;
+ record->call.info.get_query_result_resource.wait = wait;
+ record->call.info.get_query_result_resource.result_type = result_type;
+ record->call.info.get_query_result_resource.index = index;
+ record->call.info.get_query_result_resource.resource = NULL;
+ pipe_resource_reference(&record->call.info.get_query_result_resource.resource,
+ resource);
+ record->call.info.get_query_result_resource.offset = offset;
+
+ /* The query may be deleted by the time we need to print it. */
+ record->call.info.get_query_result_resource.query_type = dquery->type;
+
+ dd_before_draw(dctx, record);
+ pipe->get_query_result_resource(pipe, dquery->query, wait,
+ result_type, index, resource, offset);
+ dd_after_draw(dctx, record);
+}
+
+static void
+dd_context_flush_resource(struct pipe_context *_pipe,
+ struct pipe_resource *resource)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record = dd_create_record(dctx);
+
+ record->call.type = CALL_FLUSH_RESOURCE;
+ record->call.info.flush_resource = NULL;
+ pipe_resource_reference(&record->call.info.flush_resource, resource);
+
+ dd_before_draw(dctx, record);
+ pipe->flush_resource(pipe, resource);
+ dd_after_draw(dctx, record);
+}
+
+static void
+dd_context_clear(struct pipe_context *_pipe, unsigned buffers,
+ const union pipe_color_union *color, double depth,
+ unsigned stencil)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record = dd_create_record(dctx);
+
+ record->call.type = CALL_CLEAR;
+ record->call.info.clear.buffers = buffers;
+ record->call.info.clear.color = *color;
+ record->call.info.clear.depth = depth;
+ record->call.info.clear.stencil = stencil;
+
+ dd_before_draw(dctx, record);
+ pipe->clear(pipe, buffers, color, depth, stencil);
+ dd_after_draw(dctx, record);
+}
+
+static void
+dd_context_clear_render_target(struct pipe_context *_pipe,
+ struct pipe_surface *dst,
+ const union pipe_color_union *color,
+ unsigned dstx, unsigned dsty,
+ unsigned width, unsigned height,
+ bool render_condition_enabled)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record = dd_create_record(dctx);
+
+ record->call.type = CALL_CLEAR_RENDER_TARGET;
+
+ dd_before_draw(dctx, record);
+ pipe->clear_render_target(pipe, dst, color, dstx, dsty, width, height,
+ render_condition_enabled);
+ dd_after_draw(dctx, record);
+}
+
+static void
+dd_context_clear_depth_stencil(struct pipe_context *_pipe,
+ struct pipe_surface *dst, unsigned clear_flags,
+ double depth, unsigned stencil, unsigned dstx,
+ unsigned dsty, unsigned width, unsigned height,
+ bool render_condition_enabled)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record = dd_create_record(dctx);
+
+ record->call.type = CALL_CLEAR_DEPTH_STENCIL;
+
+ dd_before_draw(dctx, record);
+ pipe->clear_depth_stencil(pipe, dst, clear_flags, depth, stencil,
+ dstx, dsty, width, height,
+ render_condition_enabled);
+ dd_after_draw(dctx, record);
+}
+
+static void
+dd_context_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
+ unsigned offset, unsigned size,
+ const void *clear_value, int clear_value_size)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record = dd_create_record(dctx);
+
+ record->call.type = CALL_CLEAR_BUFFER;
+ record->call.info.clear_buffer.res = NULL;
+ pipe_resource_reference(&record->call.info.clear_buffer.res, res);
+ record->call.info.clear_buffer.offset = offset;
+ record->call.info.clear_buffer.size = size;
+ record->call.info.clear_buffer.clear_value = clear_value;
+ record->call.info.clear_buffer.clear_value_size = clear_value_size;
+
+ dd_before_draw(dctx, record);
+ pipe->clear_buffer(pipe, res, offset, size, clear_value, clear_value_size);
+ dd_after_draw(dctx, record);
+}
+
+static void
+dd_context_clear_texture(struct pipe_context *_pipe,
+ struct pipe_resource *res,
+ unsigned level,
+ const struct pipe_box *box,
+ const void *data)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record = dd_create_record(dctx);
+
+ record->call.type = CALL_CLEAR_TEXTURE;
+
+ dd_before_draw(dctx, record);
+ pipe->clear_texture(pipe, res, level, box, data);
+ dd_after_draw(dctx, record);
+}
+
+/********************************************************************
+ * transfer
+ */
+
+static void *
+dd_context_transfer_map(struct pipe_context *_pipe,
+ struct pipe_resource *resource, unsigned level,
+ unsigned usage, const struct pipe_box *box,
+ struct pipe_transfer **transfer)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record =
+ dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
+
+ if (record) {
+ record->call.type = CALL_TRANSFER_MAP;
+
+ dd_before_draw(dctx, record);
+ }
+ void *ptr = pipe->transfer_map(pipe, resource, level, usage, box, transfer);
+ if (record) {
+ record->call.info.transfer_map.transfer_ptr = *transfer;
+ record->call.info.transfer_map.ptr = ptr;
+ if (*transfer) {
+ record->call.info.transfer_map.transfer = **transfer;
+ record->call.info.transfer_map.transfer.resource = NULL;
+ pipe_resource_reference(&record->call.info.transfer_map.transfer.resource,
+ (*transfer)->resource);
+ } else {
+ memset(&record->call.info.transfer_map.transfer, 0, sizeof(struct pipe_transfer));
+ }
+
+ dd_after_draw(dctx, record);
+ }
+ return ptr;
+}
+
+static void
+dd_context_transfer_flush_region(struct pipe_context *_pipe,
+ struct pipe_transfer *transfer,
+ const struct pipe_box *box)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record =
+ dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
+
+ if (record) {
+ record->call.type = CALL_TRANSFER_FLUSH_REGION;
+ record->call.info.transfer_flush_region.transfer_ptr = transfer;
+ record->call.info.transfer_flush_region.box = *box;
+ record->call.info.transfer_flush_region.transfer = *transfer;
+ record->call.info.transfer_flush_region.transfer.resource = NULL;
+ pipe_resource_reference(
+ &record->call.info.transfer_flush_region.transfer.resource,
+ transfer->resource);
+
+ dd_before_draw(dctx, record);
+ }
+ pipe->transfer_flush_region(pipe, transfer, box);
+ if (record)
+ dd_after_draw(dctx, record);
+}
+
+static void
+dd_context_transfer_unmap(struct pipe_context *_pipe,
+ struct pipe_transfer *transfer)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record =
+ dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
+
+ if (record) {
+ record->call.type = CALL_TRANSFER_UNMAP;
+ record->call.info.transfer_unmap.transfer_ptr = transfer;
+ record->call.info.transfer_unmap.transfer = *transfer;
+ record->call.info.transfer_unmap.transfer.resource = NULL;
+ pipe_resource_reference(
+ &record->call.info.transfer_unmap.transfer.resource,
+ transfer->resource);
+
+ dd_before_draw(dctx, record);
+ }
+ pipe->transfer_unmap(pipe, transfer);
+ if (record)
+ dd_after_draw(dctx, record);
+}
+
+static void
+dd_context_buffer_subdata(struct pipe_context *_pipe,
+ struct pipe_resource *resource,
+ unsigned usage, unsigned offset,
+ unsigned size, const void *data)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record =
+ dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
+
+ if (record) {
+ record->call.type = CALL_BUFFER_SUBDATA;
+ record->call.info.buffer_subdata.resource = NULL;
+ pipe_resource_reference(&record->call.info.buffer_subdata.resource, resource);
+ record->call.info.buffer_subdata.usage = usage;
+ record->call.info.buffer_subdata.offset = offset;
+ record->call.info.buffer_subdata.size = size;
+ record->call.info.buffer_subdata.data = data;
+
+ dd_before_draw(dctx, record);
+ }
+ pipe->buffer_subdata(pipe, resource, usage, offset, size, data);
+ if (record)
+ dd_after_draw(dctx, record);
+}
+
+static void
+dd_context_texture_subdata(struct pipe_context *_pipe,
+ struct pipe_resource *resource,
+ unsigned level, unsigned usage,
+ const struct pipe_box *box,
+ const void *data, unsigned stride,
+ unsigned layer_stride)
+{
+ struct dd_context *dctx = dd_context(_pipe);
+ struct pipe_context *pipe = dctx->pipe;
+ struct dd_draw_record *record =
+ dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
+
+ if (record) {
+ record->call.type = CALL_TEXTURE_SUBDATA;
+ record->call.info.texture_subdata.resource = NULL;
+ pipe_resource_reference(&record->call.info.texture_subdata.resource, resource);
+ record->call.info.texture_subdata.level = level;
+ record->call.info.texture_subdata.usage = usage;
+ record->call.info.texture_subdata.box = *box;
+ record->call.info.texture_subdata.data = data;
+ record->call.info.texture_subdata.stride = stride;
+ record->call.info.texture_subdata.layer_stride = layer_stride;
+
+ dd_before_draw(dctx, record);
+ }
+ pipe->texture_subdata(pipe, resource, level, usage, box, data,
+ stride, layer_stride);
+ if (record)
+ dd_after_draw(dctx, record);
+}
+
+void
+dd_init_draw_functions(struct dd_context *dctx)
+{
+ CTX_INIT(flush);
+ CTX_INIT(draw_vbo);
+ CTX_INIT(launch_grid);
+ CTX_INIT(resource_copy_region);
+ CTX_INIT(blit);
+ CTX_INIT(clear);
+ CTX_INIT(clear_render_target);
+ CTX_INIT(clear_depth_stencil);
+ CTX_INIT(clear_buffer);
+ CTX_INIT(clear_texture);
+ CTX_INIT(flush_resource);
+ CTX_INIT(generate_mipmap);
+ CTX_INIT(get_query_result_resource);
+ CTX_INIT(transfer_map);
+ CTX_INIT(transfer_flush_region);
+ CTX_INIT(transfer_unmap);
+ CTX_INIT(buffer_subdata);
+ CTX_INIT(texture_subdata);
+}
diff --git a/src/gallium/auxiliary/driver_ddebug/dd_pipe.h b/src/gallium/auxiliary/driver_ddebug/dd_pipe.h
new file mode 100644
index 00000000000..07c4d55017f
--- /dev/null
+++ b/src/gallium/auxiliary/driver_ddebug/dd_pipe.h
@@ -0,0 +1,371 @@
+/**************************************************************************
+ *
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef DD_H_
+#define DD_H_
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_screen.h"
+#include "dd_util.h"
+#include "os/os_thread.h"
+#include "util/list.h"
+#include "util/u_log.h"
+#include "util/u_queue.h"
+
+struct dd_context;
+
+enum dd_dump_mode {
+ DD_DUMP_ONLY_HANGS,
+ DD_DUMP_ALL_CALLS,
+ DD_DUMP_APITRACE_CALL,
+};
+
+struct dd_screen
+{
+ struct pipe_screen base;
+ struct pipe_screen *screen;
+ unsigned timeout_ms;
+ enum dd_dump_mode dump_mode;
+ bool flush_always;
+ bool transfers;
+ bool verbose;
+ unsigned skip_count;
+ unsigned apitrace_dump_call;
+};
+
+enum call_type
+{
+ CALL_DRAW_VBO,
+ CALL_LAUNCH_GRID,
+ CALL_RESOURCE_COPY_REGION,
+ CALL_BLIT,
+ CALL_FLUSH_RESOURCE,
+ CALL_CLEAR,
+ CALL_CLEAR_BUFFER,
+ CALL_CLEAR_TEXTURE,
+ CALL_CLEAR_RENDER_TARGET,
+ CALL_CLEAR_DEPTH_STENCIL,
+ CALL_GENERATE_MIPMAP,
+ CALL_GET_QUERY_RESULT_RESOURCE,
+ CALL_TRANSFER_MAP,
+ CALL_TRANSFER_FLUSH_REGION,
+ CALL_TRANSFER_UNMAP,
+ CALL_BUFFER_SUBDATA,
+ CALL_TEXTURE_SUBDATA,
+};
+
+struct call_resource_copy_region
+{
+ struct pipe_resource *dst;
+ unsigned dst_level;
+ unsigned dstx, dsty, dstz;
+ struct pipe_resource *src;
+ unsigned src_level;
+ struct pipe_box src_box;
+};
+
+struct call_clear
+{
+ unsigned buffers;
+ union pipe_color_union color;
+ double depth;
+ unsigned stencil;
+};
+
+struct call_clear_buffer
+{
+ struct pipe_resource *res;
+ unsigned offset;
+ unsigned size;
+ const void *clear_value;
+ int clear_value_size;
+};
+
+struct call_generate_mipmap {
+ struct pipe_resource *res;
+ enum pipe_format format;
+ unsigned base_level;
+ unsigned last_level;
+ unsigned first_layer;
+ unsigned last_layer;
+};
+
+struct call_draw_info {
+ struct pipe_draw_info draw;
+ struct pipe_draw_indirect_info indirect;
+};
+
+struct call_get_query_result_resource {
+ struct pipe_query *query;
+ enum pipe_query_type query_type;
+ boolean wait;
+ enum pipe_query_value_type result_type;
+ int index;
+ struct pipe_resource *resource;
+ unsigned offset;
+};
+
+struct call_transfer_map {
+ struct pipe_transfer *transfer_ptr;
+ struct pipe_transfer transfer;
+ void *ptr;
+};
+
+struct call_transfer_flush_region {
+ struct pipe_transfer *transfer_ptr;
+ struct pipe_transfer transfer;
+ struct pipe_box box;
+};
+
+struct call_transfer_unmap {
+ struct pipe_transfer *transfer_ptr;
+ struct pipe_transfer transfer;
+};
+
+struct call_buffer_subdata {
+ struct pipe_resource *resource;
+ unsigned usage;
+ unsigned offset;
+ unsigned size;
+ const void *data;
+};
+
+struct call_texture_subdata {
+ struct pipe_resource *resource;
+ unsigned level;
+ unsigned usage;
+ struct pipe_box box;
+ const void *data;
+ unsigned stride;
+ unsigned layer_stride;
+};
+
+struct dd_call
+{
+ enum call_type type;
+
+ union {
+ struct call_draw_info draw_vbo;
+ struct pipe_grid_info launch_grid;
+ struct call_resource_copy_region resource_copy_region;
+ struct pipe_blit_info blit;
+ struct pipe_resource *flush_resource;
+ struct call_clear clear;
+ struct call_clear_buffer clear_buffer;
+ struct call_generate_mipmap generate_mipmap;
+ struct call_get_query_result_resource get_query_result_resource;
+ struct call_transfer_map transfer_map;
+ struct call_transfer_flush_region transfer_flush_region;
+ struct call_transfer_unmap transfer_unmap;
+ struct call_buffer_subdata buffer_subdata;
+ struct call_texture_subdata texture_subdata;
+ } info;
+};
+
+struct dd_query
+{
+ unsigned type;
+ struct pipe_query *query;
+};
+
+struct dd_state
+{
+ void *cso;
+
+ union {
+ struct pipe_blend_state blend;
+ struct pipe_depth_stencil_alpha_state dsa;
+ struct pipe_rasterizer_state rs;
+ struct pipe_sampler_state sampler;
+ struct {
+ struct pipe_vertex_element velems[PIPE_MAX_ATTRIBS];
+ unsigned count;
+ } velems;
+ struct pipe_shader_state shader;
+ } state;
+};
+
+struct dd_draw_state
+{
+ struct {
+ struct dd_query *query;
+ bool condition;
+ unsigned mode;
+ } render_cond;
+
+ struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
+
+ unsigned num_so_targets;
+ struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS];
+ unsigned so_offsets[PIPE_MAX_SO_BUFFERS];
+
+ struct dd_state *shaders[PIPE_SHADER_TYPES];
+ struct pipe_constant_buffer constant_buffers[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS];
+ struct pipe_sampler_view *sampler_views[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
+ struct dd_state *sampler_states[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
+ struct pipe_image_view shader_images[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_IMAGES];
+ struct pipe_shader_buffer shader_buffers[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
+
+ struct dd_state *velems;
+ struct dd_state *rs;
+ struct dd_state *dsa;
+ struct dd_state *blend;
+
+ struct pipe_blend_color blend_color;
+ struct pipe_stencil_ref stencil_ref;
+ unsigned sample_mask;
+ unsigned min_samples;
+ struct pipe_clip_state clip_state;
+ struct pipe_framebuffer_state framebuffer_state;
+ struct pipe_poly_stipple polygon_stipple;
+ struct pipe_scissor_state scissors[PIPE_MAX_VIEWPORTS];
+ struct pipe_viewport_state viewports[PIPE_MAX_VIEWPORTS];
+ float tess_default_levels[6];
+
+ unsigned apitrace_call_number;
+};
+
+struct dd_draw_state_copy
+{
+ struct dd_draw_state base;
+
+ /* dd_draw_state_copy does not reference real CSOs. Instead, it points to
+ * these variables, which serve as storage.
+ */
+ struct dd_query render_cond;
+ struct dd_state shaders[PIPE_SHADER_TYPES];
+ struct dd_state sampler_states[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
+ struct dd_state velems;
+ struct dd_state rs;
+ struct dd_state dsa;
+ struct dd_state blend;
+};
+
+struct dd_draw_record {
+ struct list_head list;
+ struct dd_context *dctx;
+
+ int64_t time_before;
+ int64_t time_after;
+ unsigned draw_call;
+
+ struct pipe_fence_handle *prev_bottom_of_pipe;
+ struct pipe_fence_handle *top_of_pipe;
+ struct pipe_fence_handle *bottom_of_pipe;
+
+ struct dd_call call;
+ struct dd_draw_state_copy draw_state;
+
+ struct util_queue_fence driver_finished;
+ struct u_log_page *log_page;
+};
+
+struct dd_context
+{
+ struct pipe_context base;
+ struct pipe_context *pipe;
+
+ struct dd_draw_state draw_state;
+ unsigned num_draw_calls;
+
+ struct u_log_context log;
+
+ /* Pipelined hang detection.
+ *
+ * This is without unnecessary flushes and waits. There is a memory-based
+ * fence that is incremented by clear_buffer every draw call. Driver fences
+ * are not used.
+ *
+ * After each draw call, a new dd_draw_record is created that contains
+ * a copy of all states, the output of pipe_context::dump_debug_state,
+ * and it has a fence number assigned. That's done without knowing whether
+ * that draw call is problematic or not. The record is added into the list
+ * of all records.
+ *
+ * An independent, separate thread loops over the list of records and checks
+ * their fences. Records with signalled fences are freed. On fence timeout,
+ * the thread dumps the records of in-flight draws.
+ */
+ thrd_t thread;
+ mtx_t mutex;
+ cnd_t cond;
+ struct dd_draw_record *record_pending; /* currently inside the driver */
+ struct list_head records; /* oldest record first */
+ unsigned num_records;
+ bool kill_thread;
+ bool api_stalled;
+};
+
+
+struct pipe_context *
+dd_context_create(struct dd_screen *dscreen, struct pipe_context *pipe);
+
+void
+dd_init_draw_functions(struct dd_context *dctx);
+
+void
+dd_thread_join(struct dd_context *dctx);
+int
+dd_thread_main(void *input);
+
+FILE *
+dd_get_file_stream(struct dd_screen *dscreen, unsigned apitrace_call_number);
+
+static inline struct dd_context *
+dd_context(struct pipe_context *pipe)
+{
+ return (struct dd_context *)pipe;
+}
+
+static inline struct dd_screen *
+dd_screen(struct pipe_screen *screen)
+{
+ return (struct dd_screen*)screen;
+}
+
+static inline struct dd_query *
+dd_query(struct pipe_query *query)
+{
+ return (struct dd_query *)query;
+}
+
+static inline struct pipe_query *
+dd_query_unwrap(struct pipe_query *query)
+{
+ if (query) {
+ return dd_query(query)->query;
+ } else {
+ return NULL;
+ }
+}
+
+
+#define CTX_INIT(_member) \
+ dctx->base._member = dctx->pipe->_member ? dd_context_##_member : NULL
+
+#endif /* DD_H_ */
diff --git a/src/gallium/auxiliary/driver_ddebug/dd_public.h b/src/gallium/auxiliary/driver_ddebug/dd_public.h
new file mode 100644
index 00000000000..e6607655753
--- /dev/null
+++ b/src/gallium/auxiliary/driver_ddebug/dd_public.h
@@ -0,0 +1,36 @@
+/**************************************************************************
+ *
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2010 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef DD_PUBLIC_H_
+#define DD_PUBLIC_H_
+
+struct pipe_screen;
+
+struct pipe_screen *
+ddebug_screen_create(struct pipe_screen *screen);
+
+#endif /* DD_PUBLIC_H_ */
diff --git a/src/gallium/auxiliary/driver_ddebug/dd_screen.c b/src/gallium/auxiliary/driver_ddebug/dd_screen.c
new file mode 100644
index 00000000000..5b2be28a969
--- /dev/null
+++ b/src/gallium/auxiliary/driver_ddebug/dd_screen.c
@@ -0,0 +1,593 @@
+/**************************************************************************
+ *
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "dd_pipe.h"
+#include "dd_public.h"
+#include "util/u_memory.h"
+#include <ctype.h>
+#include <stdio.h>
+
+
+static const char *
+dd_screen_get_name(struct pipe_screen *_screen)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->get_name(screen);
+}
+
+static const char *
+dd_screen_get_vendor(struct pipe_screen *_screen)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->get_vendor(screen);
+}
+
+static const char *
+dd_screen_get_device_vendor(struct pipe_screen *_screen)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->get_device_vendor(screen);
+}
+
+static const void *
+dd_screen_get_compiler_options(struct pipe_screen *_screen,
+ enum pipe_shader_ir ir,
+ enum pipe_shader_type shader)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->get_compiler_options(screen, ir, shader);
+}
+
+static struct disk_cache *
+dd_screen_get_disk_shader_cache(struct pipe_screen *_screen)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->get_disk_shader_cache(screen);
+}
+
+static int
+dd_screen_get_param(struct pipe_screen *_screen,
+ enum pipe_cap param)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->get_param(screen, param);
+}
+
+static float
+dd_screen_get_paramf(struct pipe_screen *_screen,
+ enum pipe_capf param)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->get_paramf(screen, param);
+}
+
+static int
+dd_screen_get_compute_param(struct pipe_screen *_screen,
+ enum pipe_shader_ir ir_type,
+ enum pipe_compute_cap param,
+ void *ret)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->get_compute_param(screen, ir_type, param, ret);
+}
+
+static int
+dd_screen_get_shader_param(struct pipe_screen *_screen,
+ enum pipe_shader_type shader,
+ enum pipe_shader_cap param)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->get_shader_param(screen, shader, param);
+}
+
+static uint64_t
+dd_screen_get_timestamp(struct pipe_screen *_screen)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->get_timestamp(screen);
+}
+
+static void dd_screen_query_memory_info(struct pipe_screen *_screen,
+ struct pipe_memory_info *info)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->query_memory_info(screen, info);
+}
+
+static struct pipe_context *
+dd_screen_context_create(struct pipe_screen *_screen, void *priv,
+ unsigned flags)
+{
+ struct dd_screen *dscreen = dd_screen(_screen);
+ struct pipe_screen *screen = dscreen->screen;
+
+ flags |= PIPE_CONTEXT_DEBUG;
+
+ return dd_context_create(dscreen,
+ screen->context_create(screen, priv, flags));
+}
+
+static boolean
+dd_screen_is_format_supported(struct pipe_screen *_screen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned sample_count,
+ unsigned tex_usage)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->is_format_supported(screen, format, target, sample_count,
+ tex_usage);
+}
+
+static boolean
+dd_screen_can_create_resource(struct pipe_screen *_screen,
+ const struct pipe_resource *templat)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->can_create_resource(screen, templat);
+}
+
+static void
+dd_screen_flush_frontbuffer(struct pipe_screen *_screen,
+ struct pipe_resource *resource,
+ unsigned level, unsigned layer,
+ void *context_private,
+ struct pipe_box *sub_box)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ screen->flush_frontbuffer(screen, resource, level, layer, context_private,
+ sub_box);
+}
+
+static int
+dd_screen_get_driver_query_info(struct pipe_screen *_screen,
+ unsigned index,
+ struct pipe_driver_query_info *info)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->get_driver_query_info(screen, index, info);
+}
+
+static int
+dd_screen_get_driver_query_group_info(struct pipe_screen *_screen,
+ unsigned index,
+ struct pipe_driver_query_group_info *info)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->get_driver_query_group_info(screen, index, info);
+}
+
+
+static void
+dd_screen_get_driver_uuid(struct pipe_screen *_screen, char *uuid)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ screen->get_driver_uuid(screen, uuid);
+}
+
+static void
+dd_screen_get_device_uuid(struct pipe_screen *_screen, char *uuid)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ screen->get_device_uuid(screen, uuid);
+}
+
+/********************************************************************
+ * resource
+ */
+
+static struct pipe_resource *
+dd_screen_resource_create(struct pipe_screen *_screen,
+ const struct pipe_resource *templat)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+ struct pipe_resource *res = screen->resource_create(screen, templat);
+
+ if (!res)
+ return NULL;
+ res->screen = _screen;
+ return res;
+}
+
+static struct pipe_resource *
+dd_screen_resource_from_handle(struct pipe_screen *_screen,
+ const struct pipe_resource *templ,
+ struct winsys_handle *handle,
+ unsigned usage)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+ struct pipe_resource *res =
+ screen->resource_from_handle(screen, templ, handle, usage);
+
+ if (!res)
+ return NULL;
+ res->screen = _screen;
+ return res;
+}
+
+static struct pipe_resource *
+dd_screen_resource_from_user_memory(struct pipe_screen *_screen,
+ const struct pipe_resource *templ,
+ void *user_memory)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+ struct pipe_resource *res =
+ screen->resource_from_user_memory(screen, templ, user_memory);
+
+ if (!res)
+ return NULL;
+ res->screen = _screen;
+ return res;
+}
+
+static struct pipe_resource *
+dd_screen_resource_from_memobj(struct pipe_screen *_screen,
+ const struct pipe_resource *templ,
+ struct pipe_memory_object *memobj,
+ uint64_t offset)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+ struct pipe_resource *res =
+ screen->resource_from_memobj(screen, templ, memobj, offset);
+
+ if (!res)
+ return NULL;
+ res->screen = _screen;
+ return res;
+}
+
+static void
+dd_screen_resource_changed(struct pipe_screen *_screen,
+ struct pipe_resource *res)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ screen->resource_changed(screen, res);
+}
+
+static void
+dd_screen_resource_destroy(struct pipe_screen *_screen,
+ struct pipe_resource *res)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ screen->resource_destroy(screen, res);
+}
+
+static boolean
+dd_screen_resource_get_handle(struct pipe_screen *_screen,
+ struct pipe_context *_pipe,
+ struct pipe_resource *resource,
+ struct winsys_handle *handle,
+ unsigned usage)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+ struct pipe_context *pipe = _pipe ? dd_context(_pipe)->pipe : NULL;
+
+ return screen->resource_get_handle(screen, pipe, resource, handle, usage);
+}
+
+static bool
+dd_screen_check_resource_capability(struct pipe_screen *_screen,
+ struct pipe_resource *resource,
+ unsigned bind)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->check_resource_capability(screen, resource, bind);
+}
+
+
+/********************************************************************
+ * fence
+ */
+
+static void
+dd_screen_fence_reference(struct pipe_screen *_screen,
+ struct pipe_fence_handle **pdst,
+ struct pipe_fence_handle *src)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ screen->fence_reference(screen, pdst, src);
+}
+
+static boolean
+dd_screen_fence_finish(struct pipe_screen *_screen,
+ struct pipe_context *_ctx,
+ struct pipe_fence_handle *fence,
+ uint64_t timeout)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+ struct pipe_context *ctx = _ctx ? dd_context(_ctx)->pipe : NULL;
+
+ return screen->fence_finish(screen, ctx, fence, timeout);
+}
+
+/********************************************************************
+ * memobj
+ */
+
+static struct pipe_memory_object *
+dd_screen_memobj_create_from_handle(struct pipe_screen *_screen,
+ struct winsys_handle *handle,
+ bool dedicated)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ return screen->memobj_create_from_handle(screen, handle, dedicated);
+}
+
+static void
+dd_screen_memobj_destroy(struct pipe_screen *_screen,
+ struct pipe_memory_object *memobj)
+{
+ struct pipe_screen *screen = dd_screen(_screen)->screen;
+
+ screen->memobj_destroy(screen, memobj);
+}
+/********************************************************************
+ * screen
+ */
+
+static void
+dd_screen_destroy(struct pipe_screen *_screen)
+{
+ struct dd_screen *dscreen = dd_screen(_screen);
+ struct pipe_screen *screen = dscreen->screen;
+
+ screen->destroy(screen);
+ FREE(dscreen);
+}
+
+static void
+skip_space(const char **p)
+{
+ while (isspace(**p))
+ (*p)++;
+}
+
+static bool
+match_word(const char **cur, const char *word)
+{
+ size_t len = strlen(word);
+ if (strncmp(*cur, word, len) != 0)
+ return false;
+
+ const char *p = *cur + len;
+ if (*p) {
+ if (!isspace(*p))
+ return false;
+
+ *cur = p + 1;
+ } else {
+ *cur = p;
+ }
+
+ return true;
+}
+
+static bool
+match_uint(const char **cur, unsigned *value)
+{
+ char *end;
+ unsigned v = strtoul(*cur, &end, 0);
+ if (end == *cur || (*end && !isspace(*end)))
+ return false;
+ *cur = end;
+ *value = v;
+ return true;
+}
+
+struct pipe_screen *
+ddebug_screen_create(struct pipe_screen *screen)
+{
+ struct dd_screen *dscreen;
+ const char *option;
+ bool flush = false;
+ bool verbose = false;
+ bool transfers = false;
+ unsigned timeout = 1000;
+ unsigned apitrace_dump_call = 0;
+ enum dd_dump_mode mode = DD_DUMP_ONLY_HANGS;
+
+ option = debug_get_option("GALLIUM_DDEBUG", NULL);
+ if (!option)
+ return screen;
+
+ if (!strcmp(option, "help")) {
+ puts("Gallium driver debugger");
+ puts("");
+ puts("Usage:");
+ puts("");
+ puts(" GALLIUM_DDEBUG=\"[<timeout in ms>] [(always|apitrace <call#)] [flush] [transfers] [verbose]\"");
+ puts(" GALLIUM_DDEBUG_SKIP=[count]");
+ puts("");
+ puts("Dump context and driver information of draw calls into");
+ puts("$HOME/"DD_DIR"/. By default, watch for GPU hangs and only dump information");
+ puts("about draw calls related to the hang.");
+ puts("");
+ puts("<timeout in ms>");
+ puts(" Change the default timeout for GPU hang detection (default=1000ms).");
+ puts(" Setting this to 0 will disable GPU hang detection entirely.");
+ puts("");
+ puts("always");
+ puts(" Dump information about all draw calls.");
+ puts("");
+ puts("transfers");
+ puts(" Also dump and do hang detection on transfers.");
+ puts("");
+ puts("apitrace <call#>");
+ puts(" Dump information about the draw call corresponding to the given");
+ puts(" apitrace call number and exit.");
+ puts("");
+ puts("flush");
+ puts(" Flush after every draw call.");
+ puts("");
+ puts("verbose");
+ puts(" Write additional information to stderr.");
+ puts("");
+ puts("GALLIUM_DDEBUG_SKIP=count");
+ puts(" Skip dumping on the first count draw calls (only relevant with 'always').");
+ puts("");
+ exit(0);
+ }
+
+ for (;;) {
+ skip_space(&option);
+ if (!*option)
+ break;
+
+ if (match_word(&option, "always")) {
+ if (mode == DD_DUMP_APITRACE_CALL) {
+ printf("ddebug: both 'always' and 'apitrace' specified\n");
+ exit(1);
+ }
+
+ mode = DD_DUMP_ALL_CALLS;
+ } else if (match_word(&option, "flush")) {
+ flush = true;
+ } else if (match_word(&option, "transfers")) {
+ transfers = true;
+ } else if (match_word(&option, "verbose")) {
+ verbose = true;
+ } else if (match_word(&option, "apitrace")) {
+ if (mode != DD_DUMP_ONLY_HANGS) {
+ printf("ddebug: 'apitrace' can only appear once and not mixed with 'always'\n");
+ exit(1);
+ }
+
+ if (!match_uint(&option, &apitrace_dump_call)) {
+ printf("ddebug: expected call number after 'apitrace'\n");
+ exit(1);
+ }
+
+ mode = DD_DUMP_APITRACE_CALL;
+ } else if (match_uint(&option, &timeout)) {
+ /* no-op */
+ } else {
+ printf("ddebug: bad options: %s\n", option);
+ exit(1);
+ }
+ }
+
+ dscreen = CALLOC_STRUCT(dd_screen);
+ if (!dscreen)
+ return NULL;
+
+#define SCR_INIT(_member) \
+ dscreen->base._member = screen->_member ? dd_screen_##_member : NULL
+
+ dscreen->base.destroy = dd_screen_destroy;
+ dscreen->base.get_name = dd_screen_get_name;
+ dscreen->base.get_vendor = dd_screen_get_vendor;
+ dscreen->base.get_device_vendor = dd_screen_get_device_vendor;
+ SCR_INIT(get_disk_shader_cache);
+ dscreen->base.get_param = dd_screen_get_param;
+ dscreen->base.get_paramf = dd_screen_get_paramf;
+ dscreen->base.get_compute_param = dd_screen_get_compute_param;
+ dscreen->base.get_shader_param = dd_screen_get_shader_param;
+ dscreen->base.query_memory_info = dd_screen_query_memory_info;
+ /* get_video_param */
+ /* get_compute_param */
+ SCR_INIT(get_timestamp);
+ dscreen->base.context_create = dd_screen_context_create;
+ dscreen->base.is_format_supported = dd_screen_is_format_supported;
+ /* is_video_format_supported */
+ SCR_INIT(can_create_resource);
+ dscreen->base.resource_create = dd_screen_resource_create;
+ dscreen->base.resource_from_handle = dd_screen_resource_from_handle;
+ SCR_INIT(resource_from_memobj);
+ SCR_INIT(resource_from_user_memory);
+ SCR_INIT(check_resource_capability);
+ dscreen->base.resource_get_handle = dd_screen_resource_get_handle;
+ SCR_INIT(resource_changed);
+ dscreen->base.resource_destroy = dd_screen_resource_destroy;
+ SCR_INIT(flush_frontbuffer);
+ SCR_INIT(fence_reference);
+ SCR_INIT(fence_finish);
+ SCR_INIT(memobj_create_from_handle);
+ SCR_INIT(memobj_destroy);
+ SCR_INIT(get_driver_query_info);
+ SCR_INIT(get_driver_query_group_info);
+ SCR_INIT(get_compiler_options);
+ SCR_INIT(get_driver_uuid);
+ SCR_INIT(get_device_uuid);
+
+#undef SCR_INIT
+
+ dscreen->screen = screen;
+ dscreen->timeout_ms = timeout;
+ dscreen->dump_mode = mode;
+ dscreen->flush_always = flush;
+ dscreen->transfers = transfers;
+ dscreen->verbose = verbose;
+ dscreen->apitrace_dump_call = apitrace_dump_call;
+
+ switch (dscreen->dump_mode) {
+ case DD_DUMP_ALL_CALLS:
+ fprintf(stderr, "Gallium debugger active. Logging all calls.\n");
+ break;
+ case DD_DUMP_APITRACE_CALL:
+ fprintf(stderr, "Gallium debugger active. Going to dump an apitrace call.\n");
+ break;
+ default:
+ fprintf(stderr, "Gallium debugger active.\n");
+ break;
+ }
+
+ if (dscreen->timeout_ms > 0)
+ fprintf(stderr, "Hang detection timeout is %ums.\n", dscreen->timeout_ms);
+ else
+ fprintf(stderr, "Hang detection is disabled.\n");
+
+ dscreen->skip_count = debug_get_num_option("GALLIUM_DDEBUG_SKIP", 0);
+ if (dscreen->skip_count > 0) {
+ fprintf(stderr, "Gallium debugger skipping the first %u draw calls.\n",
+ dscreen->skip_count);
+ }
+
+ return &dscreen->base;
+}
diff --git a/src/gallium/auxiliary/driver_ddebug/dd_util.h b/src/gallium/auxiliary/driver_ddebug/dd_util.h
new file mode 100644
index 00000000000..bdfb7cc9163
--- /dev/null
+++ b/src/gallium/auxiliary/driver_ddebug/dd_util.h
@@ -0,0 +1,106 @@
+/**************************************************************************
+ *
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef DD_UTIL_H
+#define DD_UTIL_H
+
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/stat.h>
+
+#include "c99_alloca.h"
+#include "os/os_process.h"
+#include "util/u_atomic.h"
+#include "util/u_debug.h"
+
+/* name of the directory in home */
+#define DD_DIR "ddebug_dumps"
+
+static inline void
+dd_get_debug_filename_and_mkdir(char *buf, size_t buflen, bool verbose)
+{
+ static unsigned index;
+ char proc_name[128], dir[256];
+
+ if (!os_get_process_name(proc_name, sizeof(proc_name))) {
+ fprintf(stderr, "dd: can't get the process name\n");
+ strcpy(proc_name, "unknown");
+ }
+
+ snprintf(dir, sizeof(dir), "%s/"DD_DIR, debug_get_option("HOME", "."));
+
+ if (mkdir(dir, 0774) && errno != EEXIST)
+ fprintf(stderr, "dd: can't create a directory (%i)\n", errno);
+
+ snprintf(buf, buflen, "%s/%s_%u_%08u", dir, proc_name, getpid(),
+ p_atomic_inc_return(&index) - 1);
+
+ if (verbose)
+ fprintf(stderr, "dd: dumping to file %s\n", buf);
+}
+
+static inline FILE *
+dd_get_debug_file(bool verbose)
+{
+ char name[512];
+ FILE *f;
+
+ dd_get_debug_filename_and_mkdir(name, sizeof(name), verbose);
+ f = fopen(name, "w");
+ if (!f) {
+ fprintf(stderr, "dd: can't open file %s\n", name);
+ return NULL;
+ }
+
+ return f;
+}
+
+static inline void
+dd_parse_apitrace_marker(const char *string, int len, unsigned *call_number)
+{
+ unsigned num;
+ char *s;
+
+ if (len <= 0)
+ return;
+
+ /* Make it zero-terminated. */
+ s = alloca(len + 1);
+ memcpy(s, string, len);
+ s[len] = 0;
+
+ /* Parse the number. */
+ errno = 0;
+ num = strtol(s, NULL, 10);
+ if (errno)
+ return;
+
+ *call_number = num;
+}
+
+#endif /* DD_UTIL_H */