summaryrefslogtreecommitdiffstats
path: root/src/intel/tools
diff options
context:
space:
mode:
authorFrancisco Jerez <[email protected]>2019-01-18 12:51:57 -0800
committerFrancisco Jerez <[email protected]>2019-02-11 12:27:22 -0800
commit374eb3cd6f87347e73ec35b247be947e0aba11c4 (patch)
tree976280297e4759abd37c6b7cc3b2cbd3b7b0ad63 /src/intel/tools
parente404c6879d57bffac580936ef420f50648e55f7b (diff)
intel/dump_gpu: Disambiguate between BOs from different GEM handle spaces.
This fixes a rather astonishing problem that came up while debugging an issue in the Vulkan CTS. Apparently the Vulkan CTS framework has the tendency to create multiple VkDevices, each one with a separate DRM device FD and therefore a disjoint GEM buffer object handle space. Because the intel_dump_gpu tool wasn't making any distinction between buffers from the different handle spaces, it was confusing the instruction state pools from both devices, which happened to have the exact same GEM handle and PPGTT virtual address, but completely different shader contents. This was causing the simulator to believe that the vertex pipeline was executing a fragment shader, which didn't end up well. Reviewed-by: Lionel Landwerlin <[email protected]>
Diffstat (limited to 'src/intel/tools')
-rw-r--r--src/intel/tools/intel_dump_gpu.c41
1 files changed, 23 insertions, 18 deletions
diff --git a/src/intel/tools/intel_dump_gpu.c b/src/intel/tools/intel_dump_gpu.c
index ffe49b10108..19e054c894c 100644
--- a/src/intel/tools/intel_dump_gpu.c
+++ b/src/intel/tools/intel_dump_gpu.c
@@ -58,6 +58,7 @@ static FILE *output_file = NULL;
static int verbose = 0;
static bool device_override;
+#define MAX_FD_COUNT 64
#define MAX_BO_COUNT 64 * 1024
struct bo {
@@ -94,12 +95,13 @@ fail_if(int cond, const char *format, ...)
}
static struct bo *
-get_bo(uint32_t handle)
+get_bo(unsigned fd, uint32_t handle)
{
struct bo *bo;
fail_if(handle >= MAX_BO_COUNT, "bo handle too large\n");
- bo = &bos[handle];
+ fail_if(fd >= MAX_FD_COUNT, "bo fd too large\n");
+ bo = &bos[handle + fd * MAX_BO_COUNT];
return bo;
}
@@ -115,7 +117,7 @@ static uint32_t device = 0;
static struct aub_file aub_file;
static void *
-relocate_bo(struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
+relocate_bo(int fd, struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
const struct drm_i915_gem_exec_object2 *obj)
{
const struct drm_i915_gem_exec_object2 *exec_objects =
@@ -137,7 +139,7 @@ relocate_bo(struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
handle = relocs[i].target_handle;
aub_write_reloc(&devinfo, ((char *)relocated) + relocs[i].offset,
- get_bo(handle)->offset + relocs[i].delta);
+ get_bo(fd, handle)->offset + relocs[i].delta);
}
return relocated;
@@ -226,7 +228,7 @@ dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
obj = &exec_objects[i];
- bo = get_bo(obj->handle);
+ bo = get_bo(fd, obj->handle);
/* If bo->size == 0, this means they passed us an invalid
* buffer. The kernel will reject it and so should we.
@@ -262,13 +264,13 @@ dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 :
execbuffer2->buffer_count - 1;
- batch_bo = get_bo(exec_objects[batch_index].handle);
+ batch_bo = get_bo(fd, exec_objects[batch_index].handle);
for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
obj = &exec_objects[i];
- bo = get_bo(obj->handle);
+ bo = get_bo(fd, obj->handle);
if (obj->relocation_count > 0)
- data = relocate_bo(bo, execbuffer2, obj);
+ data = relocate_bo(fd, bo, execbuffer2, obj);
else
data = bo->map;
@@ -306,11 +308,12 @@ dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
}
static void
-add_new_bo(int handle, uint64_t size, void *map)
+add_new_bo(unsigned fd, int handle, uint64_t size, void *map)
{
- struct bo *bo = &bos[handle];
+ struct bo *bo = &bos[handle + fd * MAX_BO_COUNT];
fail_if(handle >= MAX_BO_COUNT, "bo handle out of range\n");
+ fail_if(fd >= MAX_FD_COUNT, "bo fd out of range\n");
fail_if(size == 0, "bo size is invalid\n");
bo->size = size;
@@ -318,9 +321,9 @@ add_new_bo(int handle, uint64_t size, void *map)
}
static void
-remove_bo(int handle)
+remove_bo(int fd, int handle)
{
- struct bo *bo = get_bo(handle);
+ struct bo *bo = get_bo(fd, handle);
if (bo->map && !IS_USERPTR(bo->map))
munmap(bo->map, bo->size);
@@ -383,7 +386,7 @@ maybe_init(void)
}
fclose(config);
- bos = calloc(MAX_BO_COUNT, sizeof(bos[0]));
+ bos = calloc(MAX_FD_COUNT * MAX_BO_COUNT, sizeof(bos[0]));
fail_if(bos == NULL, "out of memory\n");
}
@@ -455,7 +458,7 @@ ioctl(int fd, unsigned long request, ...)
ret = libc_ioctl(fd, request, argp);
if (ret == 0)
- add_new_bo(create->handle, create->size, NULL);
+ add_new_bo(fd, create->handle, create->size, NULL);
return ret;
}
@@ -465,15 +468,16 @@ ioctl(int fd, unsigned long request, ...)
ret = libc_ioctl(fd, request, argp);
if (ret == 0)
- add_new_bo(userptr->handle, userptr->user_size,
+ add_new_bo(fd, userptr->handle, userptr->user_size,
(void *) (uintptr_t) (userptr->user_ptr | USERPTR_FLAG));
+
return ret;
}
case DRM_IOCTL_GEM_CLOSE: {
struct drm_gem_close *close = argp;
- remove_bo(close->handle);
+ remove_bo(fd, close->handle);
return libc_ioctl(fd, request, argp);
}
@@ -483,7 +487,7 @@ ioctl(int fd, unsigned long request, ...)
ret = libc_ioctl(fd, request, argp);
if (ret == 0)
- add_new_bo(open->handle, open->size, NULL);
+ add_new_bo(fd, open->handle, open->size, NULL);
return ret;
}
@@ -497,7 +501,8 @@ ioctl(int fd, unsigned long request, ...)
size = lseek(prime->fd, 0, SEEK_END);
fail_if(size == -1, "failed to get prime bo size\n");
- add_new_bo(prime->handle, size, NULL);
+ add_new_bo(fd, prime->handle, size, NULL);
+
}
return ret;