aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorSamuel Pitoiset <[email protected]>2020-04-28 13:10:56 +0200
committerMarge Bot <[email protected]>2020-04-28 21:03:26 +0000
commit5832f2b8a34fc5ca50fa0cf590539f2b8c3322f6 (patch)
tree6c0fa38eaa70cdf29d5fe721af73ef97fd5ba365 /src
parent32035cca3fcc1bb49cc75751d8ba324175afb14a (diff)
radv: track memory heaps usage if overallocation is explicitly disallowed
By default, RADV supports overallocation by the sense that it doesn't reject an allocation if the target heap is full. With VK_AMD_overallocation_behaviour, apps can disable overallocation and the driver should account for all allocations explicitly made by the application, and reject if the heap is full. Signed-off-by: Samuel Pitoiset <[email protected]> Reviewed-by: Bas Nieuwenhuizen <[email protected]> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4785>
Diffstat (limited to 'src')
-rw-r--r--src/amd/vulkan/radv_device.c41
-rw-r--r--src/amd/vulkan/radv_private.h7
2 files changed, 48 insertions, 0 deletions
diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c
index dd1178025b5..cacd47f269f 100644
--- a/src/amd/vulkan/radv_device.c
+++ b/src/amd/vulkan/radv_device.c
@@ -2887,6 +2887,7 @@ VkResult radv_CreateDevice(
bool keep_shader_info = false;
bool robust_buffer_access = false;
+ bool overallocation_disallowed = false;
/* Check enabled features */
if (pCreateInfo->pEnabledFeatures) {
@@ -2912,6 +2913,12 @@ VkResult radv_CreateDevice(
robust_buffer_access = true;
break;
}
+ case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD: {
+ const VkDeviceMemoryOverallocationCreateInfoAMD *overallocation = (const void *)ext;
+ if (overallocation->overallocationBehavior == VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD)
+ overallocation_disallowed = true;
+ break;
+ }
default:
break;
}
@@ -2962,6 +2969,9 @@ VkResult radv_CreateDevice(
mtx_init(&device->shader_slab_mutex, mtx_plain);
list_inithead(&device->shader_slabs);
+ device->overallocation_disallowed = overallocation_disallowed;
+ mtx_init(&device->overallocation_mutex, mtx_plain);
+
radv_bo_list_init(&device->bo_list);
for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
@@ -5050,6 +5060,12 @@ static void radv_free_memory(struct radv_device *device,
#endif
if (mem->bo) {
+ if (device->overallocation_disallowed) {
+ mtx_lock(&device->overallocation_mutex);
+ device->allocated_memory_size[mem->heap_index] -= mem->alloc_size;
+ mtx_unlock(&device->overallocation_mutex);
+ }
+
radv_bo_list_remove(device, mem->bo);
device->ws->buffer_destroy(mem->bo);
mem->bo = NULL;
@@ -5159,6 +5175,9 @@ static VkResult radv_alloc_memory(struct radv_device *device,
}
} else {
uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
+ uint32_t heap_index;
+
+ heap_index = device->physical_device->memory_properties.memoryTypes[pAllocateInfo->memoryTypeIndex].heapIndex;
domain = device->physical_device->memory_domains[pAllocateInfo->memoryTypeIndex];
flags = device->physical_device->memory_flags[pAllocateInfo->memoryTypeIndex];
@@ -5169,13 +5188,35 @@ static VkResult radv_alloc_memory(struct radv_device *device,
}
}
+ if (device->overallocation_disallowed) {
+ uint64_t total_size =
+ device->physical_device->memory_properties.memoryHeaps[heap_index].size;
+
+ mtx_lock(&device->overallocation_mutex);
+ if (device->allocated_memory_size[heap_index] + alloc_size > total_size) {
+ mtx_unlock(&device->overallocation_mutex);
+ result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ goto fail;
+ }
+ device->allocated_memory_size[heap_index] += alloc_size;
+ mtx_unlock(&device->overallocation_mutex);
+ }
+
mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment,
domain, flags, priority);
if (!mem->bo) {
+ if (device->overallocation_disallowed) {
+ mtx_lock(&device->overallocation_mutex);
+ device->allocated_memory_size[heap_index] -= alloc_size;
+ mtx_unlock(&device->overallocation_mutex);
+ }
result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto fail;
}
+
+ mem->heap_index = heap_index;
+ mem->alloc_size = alloc_size;
}
if (!wsi_info) {
diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h
index f677de69437..ec4b45235c6 100644
--- a/src/amd/vulkan/radv_private.h
+++ b/src/amd/vulkan/radv_private.h
@@ -860,6 +860,11 @@ struct radv_device {
void *thread_trace_ptr;
uint32_t thread_trace_buffer_size;
int thread_trace_start_frame;
+
+ /* Overallocation. */
+ bool overallocation_disallowed;
+ uint64_t allocated_memory_size[VK_MAX_MEMORY_HEAPS];
+ mtx_t overallocation_mutex;
};
struct radv_device_memory {
@@ -867,6 +872,8 @@ struct radv_device_memory {
/* for dedicated allocations */
struct radv_image *image;
struct radv_buffer *buffer;
+ uint32_t heap_index;
+ uint64_t alloc_size;
void * map;
void * user_ptr;