summaryrefslogtreecommitdiffstats
path: root/src/gallium/winsys
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/winsys')
-rw-r--r--src/gallium/winsys/radeon/drm/radeon_drm_bo.c63
-rw-r--r--src/gallium/winsys/radeon/drm/radeon_drm_winsys.c9
-rw-r--r--src/gallium/winsys/radeon/drm/radeon_drm_winsys.h11
3 files changed, 47 insertions, 36 deletions
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
index 7aef238c847..bbfe5cc7884 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
@@ -198,7 +198,8 @@ static enum radeon_bo_domain radeon_bo_get_initial_domain(
return get_valid_domain(args.value);
}
-static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
+static uint64_t radeon_bomgr_find_va(const struct radeon_info *info,
+ struct radeon_vm_heap *heap,
uint64_t size, uint64_t alignment)
{
struct radeon_bo_va_hole *hole, *n;
@@ -207,11 +208,11 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
/* All VM address space holes will implicitly start aligned to the
* size alignment, so we don't need to sanitize the alignment here
*/
- size = align(size, rws->info.gart_page_size);
+ size = align(size, info->gart_page_size);
- mtx_lock(&rws->bo_va_mutex);
+ mtx_lock(&heap->mutex);
/* first look for a hole */
- LIST_FOR_EACH_ENTRY_SAFE(hole, n, &rws->va_holes, list) {
+ LIST_FOR_EACH_ENTRY_SAFE(hole, n, &heap->holes, list) {
offset = hole->offset;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
@@ -223,7 +224,7 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
offset = hole->offset;
list_del(&hole->list);
FREE(hole);
- mtx_unlock(&rws->bo_va_mutex);
+ mtx_unlock(&heap->mutex);
return offset;
}
if ((hole->size - waste) > size) {
@@ -235,46 +236,47 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
}
hole->size -= (size + waste);
hole->offset += size + waste;
- mtx_unlock(&rws->bo_va_mutex);
+ mtx_unlock(&heap->mutex);
return offset;
}
if ((hole->size - waste) == size) {
hole->size = waste;
- mtx_unlock(&rws->bo_va_mutex);
+ mtx_unlock(&heap->mutex);
return offset;
}
}
- offset = rws->va_offset;
+ offset = heap->start;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
if (waste) {
n = CALLOC_STRUCT(radeon_bo_va_hole);
n->size = waste;
n->offset = offset;
- list_add(&n->list, &rws->va_holes);
+ list_add(&n->list, &heap->holes);
}
offset += waste;
- rws->va_offset += size + waste;
- mtx_unlock(&rws->bo_va_mutex);
+ heap->start += size + waste;
+ mtx_unlock(&heap->mutex);
return offset;
}
-static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
+static void radeon_bomgr_free_va(const struct radeon_info *info,
+ struct radeon_vm_heap *heap,
uint64_t va, uint64_t size)
{
struct radeon_bo_va_hole *hole = NULL;
- size = align(size, rws->info.gart_page_size);
+ size = align(size, info->gart_page_size);
- mtx_lock(&rws->bo_va_mutex);
- if ((va + size) == rws->va_offset) {
- rws->va_offset = va;
+ mtx_lock(&heap->mutex);
+ if ((va + size) == heap->start) {
+ heap->start = va;
/* Delete uppermost hole if it reaches the new top */
- if (!LIST_IS_EMPTY(&rws->va_holes)) {
- hole = container_of(rws->va_holes.next, hole, list);
+ if (!LIST_IS_EMPTY(&heap->holes)) {
+ hole = container_of(heap->holes.next, hole, list);
if ((hole->offset + hole->size) == va) {
- rws->va_offset = hole->offset;
+ heap->start = hole->offset;
list_del(&hole->list);
FREE(hole);
}
@@ -282,20 +284,20 @@ static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
} else {
struct radeon_bo_va_hole *next;
- hole = container_of(&rws->va_holes, hole, list);
- LIST_FOR_EACH_ENTRY(next, &rws->va_holes, list) {
+ hole = container_of(&heap->holes, hole, list);
+ LIST_FOR_EACH_ENTRY(next, &heap->holes, list) {
if (next->offset < va)
break;
hole = next;
}
- if (&hole->list != &rws->va_holes) {
+ if (&hole->list != &heap->holes) {
/* Grow upper hole if it's adjacent */
if (hole->offset == (va + size)) {
hole->offset = va;
hole->size += size;
/* Merge lower hole if it's adjacent */
- if (next != hole && &next->list != &rws->va_holes &&
+ if (next != hole && &next->list != &heap->holes &&
(next->offset + next->size) == va) {
next->size += hole->size;
list_del(&hole->list);
@@ -306,7 +308,7 @@ static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
}
/* Grow lower hole if it's adjacent */
- if (next != hole && &next->list != &rws->va_holes &&
+ if (next != hole && &next->list != &heap->holes &&
(next->offset + next->size) == va) {
next->size += size;
goto out;
@@ -323,7 +325,7 @@ static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
}
}
out:
- mtx_unlock(&rws->bo_va_mutex);
+ mtx_unlock(&heap->mutex);
}
void radeon_bo_destroy(struct pb_buffer *_buf)
@@ -368,7 +370,7 @@ void radeon_bo_destroy(struct pb_buffer *_buf)
}
}
- radeon_bomgr_free_va(rws, bo->va, bo->base.size);
+ radeon_bomgr_free_va(&rws->info, &rws->vm64, bo->va, bo->base.size);
}
/* Close object. */
@@ -658,7 +660,8 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
unsigned va_gap_size;
va_gap_size = rws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
- bo->va = radeon_bomgr_find_va(rws, size + va_gap_size, alignment);
+ bo->va = radeon_bomgr_find_va(&rws->info, &rws->vm64,
+ size + va_gap_size, alignment);
va.handle = bo->handle;
va.vm_id = 0;
@@ -1059,7 +1062,8 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
if (ws->info.has_virtual_memory) {
struct drm_radeon_gem_va va;
- bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
+ bo->va = radeon_bomgr_find_va(&ws->info, &ws->vm64,
+ bo->base.size, 1 << 20);
va.handle = bo->handle;
va.operation = RADEON_VA_MAP;
@@ -1202,7 +1206,8 @@ done:
if (ws->info.has_virtual_memory && !bo->va) {
struct drm_radeon_gem_va va;
- bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
+ bo->va = radeon_bomgr_find_va(&ws->info, &ws->vm64,
+ bo->base.size, 1 << 20);
va.handle = bo->handle;
va.operation = RADEON_VA_MAP;
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
index 617507ccd23..4fe36dc3f25 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
@@ -553,7 +553,7 @@ static void radeon_winsys_destroy(struct radeon_winsys *rws)
util_hash_table_destroy(ws->bo_handles);
util_hash_table_destroy(ws->bo_vas);
mtx_destroy(&ws->bo_handles_mutex);
- mtx_destroy(&ws->bo_va_mutex);
+ mtx_destroy(&ws->vm64.mutex);
mtx_destroy(&ws->bo_fence_lock);
if (ws->fd >= 0)
@@ -816,10 +816,11 @@ radeon_drm_winsys_create(int fd, const struct pipe_screen_config *config,
ws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
ws->bo_vas = util_hash_table_create(handle_hash, handle_compare);
(void) mtx_init(&ws->bo_handles_mutex, mtx_plain);
- (void) mtx_init(&ws->bo_va_mutex, mtx_plain);
+ (void) mtx_init(&ws->vm64.mutex, mtx_plain);
(void) mtx_init(&ws->bo_fence_lock, mtx_plain);
- ws->va_offset = ws->va_start;
- list_inithead(&ws->va_holes);
+ list_inithead(&ws->vm64.holes);
+
+ ws->vm64.start = ws->va_start;
/* TTM aligns the BO size to the CPU page size */
ws->info.gart_page_size = sysconf(_SC_PAGESIZE);
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h
index dd6e19a946e..c65f5cb7108 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h
@@ -45,6 +45,12 @@ enum radeon_generation {
#define RADEON_SLAB_MIN_SIZE_LOG2 9
#define RADEON_SLAB_MAX_SIZE_LOG2 14
+struct radeon_vm_heap {
+ mtx_t mutex;
+ uint64_t start;
+ struct list_head holes;
+};
+
struct radeon_drm_winsys {
struct radeon_winsys base;
struct pipe_reference reference;
@@ -76,11 +82,10 @@ struct radeon_drm_winsys {
/* List of buffer virtual memory ranges. Protectded by bo_handles_mutex. */
struct util_hash_table *bo_vas;
mtx_t bo_handles_mutex;
- mtx_t bo_va_mutex;
mtx_t bo_fence_lock;
- uint64_t va_offset;
- struct list_head va_holes;
+ struct radeon_vm_heap vm64;
+
bool check_vm;
struct radeon_surface_manager *surf_man;