summaryrefslogtreecommitdiffstats
path: root/src/gallium
diff options
context:
space:
mode:
authorNicolai Hähnle <[email protected]>2017-02-13 12:51:12 +0100
committerNicolai Hähnle <[email protected]>2017-04-05 10:37:19 +0200
commit47e59a7e36351de75ed0539e693bbf3727b44084 (patch)
tree3e9675852ab1fb98f890eeec2544b79c0ba08529 /src/gallium
parent0baee15596d51df5fe0f0edd4b48bd500c26e2cd (diff)
winsys/amdgpu: sparse buffer debugging helpers
Reviewed-by: Marek Olšák <[email protected]>
Diffstat (limited to 'src/gallium')
-rw-r--r--src/gallium/winsys/amdgpu/drm/amdgpu_bo.c61
1 files changed, 61 insertions, 0 deletions
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index dd0603ad3ae..6bdcce53dc8 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -38,6 +38,8 @@
#include <stdio.h>
#include <inttypes.h>
+/* Set to 1 for verbose output showing committed sparse buffer ranges. */
+#define DEBUG_SPARSE_COMMITS 0
struct amdgpu_sparse_backing_chunk {
uint32_t begin, end;
@@ -575,6 +577,61 @@ void amdgpu_bo_slab_free(void *priv, struct pb_slab *pslab)
FREE(slab);
}
+#if DEBUG_SPARSE_COMMITS
+static void
+sparse_dump(struct amdgpu_winsys_bo *bo, const char *func)
+{
+ fprintf(stderr, "%s: %p (size=%"PRIu64", num_va_pages=%u) @ %s\n"
+ "Commitments:\n",
+ __func__, bo, bo->base.size, bo->u.sparse.num_va_pages, func);
+
+ struct amdgpu_sparse_backing *span_backing = NULL;
+ uint32_t span_first_backing_page = 0;
+ uint32_t span_first_va_page = 0;
+ uint32_t va_page = 0;
+
+ for (;;) {
+ struct amdgpu_sparse_backing *backing = 0;
+ uint32_t backing_page = 0;
+
+ if (va_page < bo->u.sparse.num_va_pages) {
+ backing = bo->u.sparse.commitments[va_page].backing;
+ backing_page = bo->u.sparse.commitments[va_page].page;
+ }
+
+ if (span_backing &&
+ (backing != span_backing ||
+ backing_page != span_first_backing_page + (va_page - span_first_va_page))) {
+ fprintf(stderr, " %u..%u: backing=%p:%u..%u\n",
+ span_first_va_page, va_page - 1, span_backing,
+ span_first_backing_page,
+ span_first_backing_page + (va_page - span_first_va_page) - 1);
+
+ span_backing = NULL;
+ }
+
+ if (va_page >= bo->u.sparse.num_va_pages)
+ break;
+
+ if (backing && !span_backing) {
+ span_backing = backing;
+ span_first_backing_page = backing_page;
+ span_first_va_page = va_page;
+ }
+
+ va_page++;
+ }
+
+ fprintf(stderr, "Backing:\n");
+
+ list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
+ fprintf(stderr, " %p (size=%"PRIu64")\n", backing, backing->bo->base.size);
+ for (unsigned i = 0; i < backing->num_chunks; ++i)
+ fprintf(stderr, " %u..%u\n", backing->chunks[i].begin, backing->chunks[i].end);
+ }
+}
+#endif
+
/*
* Attempt to allocate the given number of backing pages. Fewer pages may be
* allocated (depending on the fragmentation of existing backing buffers),
@@ -869,6 +926,10 @@ amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size,
mtx_lock(&bo->u.sparse.commit_lock);
+#if DEBUG_SPARSE_COMMITS
+ sparse_dump(bo, __func__);
+#endif
+
if (commit) {
while (va_page < end_va_page) {
uint32_t span_va_page;