diff options
author | Chris Wilson <[email protected]> | 2017-07-11 16:54:25 +0100 |
---|---|---|
committer | Kenneth Graunke <[email protected]> | 2017-07-11 14:07:47 -0700 |
commit | cead51a0c63df1613f9f6400657597679126024c (patch) | |
tree | d04a7951078bbef5f0fb3146454316fd110624e7 | |
parent | 314879f7fec07cedb5263681173a22d522a8ac9a (diff) |
i965: Use VALGRIND_MAKE_MEM_x in place of MALLOCLIKE/FREELIKE
Valgrind doesn't actually implement VALGRIND_FREELIKE_BLOCK as the
exact inverse of VALGRIND_MALLOCLIKE_BLOCK. It makes the block
inaccessible, but still leaves it defined in its allocation tracker i.e.
it will report the mmap as lost despite the call to FREELIKE!
Instead of treating the mmap as an allocation, treat it as changing the
access bits upon the memory, i.e. that it becomes defined (because of
the buffer objects always contain valid content from the user's
perspective) upon mmap and inaccessible upon munmap. This makes memcheck
happy without leaving it thinking there is a very large leak.
Finally for consistency, we treat all the mmap/munmap paths the same
even though valgrind can intercept the regular mmap used for GTT. We
could move this in the drm_mmap/drm_munmap macros, but that quickly
looks ugly given the desire for those to support different OSes, but I
didn't try that hard!
Reviewed-by: Kenneth Graunke <[email protected]>
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_bufmgr.c | 34 |
1 files changed, 27 insertions, 7 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.c b/src/mesa/drivers/dri/i965/brw_bufmgr.c index 26f194e1923..98a75dd4a68 100644 --- a/src/mesa/drivers/dri/i965/brw_bufmgr.c +++ b/src/mesa/drivers/dri/i965/brw_bufmgr.c @@ -78,6 +78,16 @@ #define VG(x) #endif +/* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier + * VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is + * leaked. All because it does not call VG(cli_free) from its + * VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like + * and allocation, we mark it available for use upon mmapping and remove + * it upon unmapping. + */ +#define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size)) +#define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size)) + #define memclear(s) memset(&s, 0, sizeof(s)) #define FILE_DEBUG_FLAG DEBUG_BUFMGR @@ -519,14 +529,15 @@ bo_free(struct brw_bo *bo) int ret; if (bo->map_cpu) { - VG(VALGRIND_FREELIKE_BLOCK(bo->map_cpu, 0)); + VG_NOACCESS(bo->map_cpu, bo->size); drm_munmap(bo->map_cpu, bo->size); } if (bo->map_wc) { - VG(VALGRIND_FREELIKE_BLOCK(bo->map_wc, 0)); + VG_NOACCESS(bo->map_wc, bo->size); drm_munmap(bo->map_wc, bo->size); } if (bo->map_gtt) { + VG_NOACCESS(bo->map_gtt, bo->size); drm_munmap(bo->map_gtt, bo->size); } @@ -692,14 +703,16 @@ brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, unsigned flags) __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno)); return NULL; } - VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1)); map = (void *) (uintptr_t) mmap_arg.addr_ptr; + VG_DEFINED(map, bo->size); if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) { - VG(VALGRIND_FREELIKE_BLOCK(map, 0)); + VG_NOACCESS(map, bo->size); drm_munmap(map, bo->size); } } + assert(bo->map_cpu); + DBG("brw_bo_map_cpu: %d (%s) -> %p, ", bo->gem_handle, bo->name, bo->map_cpu); print_flags(flags); @@ -750,9 +763,7 @@ brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags) return NULL; } - /* and mmap it. We don't need to use VALGRIND_MALLOCLIKE_BLOCK - * because Valgrind will already intercept this mmap call. - */ + /* and mmap it. */ map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED, bufmgr->fd, mmap_arg.offset); if (map == MAP_FAILED) { @@ -761,10 +772,19 @@ brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags) return NULL; } + /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will + * already intercept this mmap call. However, for consistency between + * all the mmap paths, we mark the pointer as defined now and mark it + * as inaccessible afterwards. + */ + VG_DEFINED(map, bo->size); + if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map)) { + VG_NOACCESS(map, bo->size); drm_munmap(map, bo->size); } } + assert(bo->map_gtt); DBG("bo_map_gtt: %d (%s) -> %p, ", bo->gem_handle, bo->name, bo->map_gtt); print_flags(flags); |