diff options
author | Bas Nieuwenhuizen <[email protected]> | 2020-06-20 21:12:01 +0200 |
---|---|---|
committer | Marge Bot <[email protected]> | 2020-06-24 13:00:02 +0000 |
commit | a5cb88eea45c02eb398117b2c52f1ef1626e2204 (patch) | |
tree | e20b617fb7bafd74922cff076f3f772da3937326 /src | |
parent | 04765e6a9a43b831edd25a51a85e81f1390f36a8 (diff) |
radv: Handle mmap failures.
Which can happen if we have to many mmaps active in the process.
CC: <[email protected]>
Reviewed-by: Samuel Pitoiset <[email protected]>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5578>
Diffstat (limited to 'src')
-rw-r--r-- | src/amd/vulkan/radv_descriptor_set.c | 37 | ||||
-rw-r--r-- | src/amd/vulkan/radv_device.c | 22 | ||||
-rw-r--r-- | src/amd/vulkan/radv_shader.c | 17 |
3 files changed, 61 insertions, 15 deletions
diff --git a/src/amd/vulkan/radv_descriptor_set.c b/src/amd/vulkan/radv_descriptor_set.c index 8d1343d2d8b..21b9de72efb 100644 --- a/src/amd/vulkan/radv_descriptor_set.c +++ b/src/amd/vulkan/radv_descriptor_set.c @@ -629,6 +629,23 @@ radv_descriptor_set_destroy(struct radv_device *device, vk_free2(&device->vk.alloc, NULL, set); } +static void radv_destroy_descriptor_pool(struct radv_device *device, + const VkAllocationCallbacks *pAllocator, + struct radv_descriptor_pool *pool) +{ + if (!pool->host_memory_base) { + for(int i = 0; i < pool->entry_count; ++i) { + radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false); + } + } + + if (pool->bo) + device->ws->buffer_destroy(pool->bo); + + vk_object_base_finish(&pool->base); + vk_free2(&device->vk.alloc, pAllocator, pool); +} + VkResult radv_CreateDescriptorPool( VkDevice _device, const VkDescriptorPoolCreateInfo* pCreateInfo, @@ -721,7 +738,15 @@ VkResult radv_CreateDescriptorPool( RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT, RADV_BO_PRIORITY_DESCRIPTOR); + if (!pool->bo) { + radv_destroy_descriptor_pool(device, pAllocator, pool); + return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); + } pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo); + if (!pool->mapped_ptr) { + radv_destroy_descriptor_pool(device, pAllocator, pool); + return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); + } } pool->size = bo_size; pool->max_entry_count = pCreateInfo->maxSets; @@ -741,17 +766,7 @@ void radv_DestroyDescriptorPool( if (!pool) return; - if (!pool->host_memory_base) { - for(int i = 0; i < pool->entry_count; ++i) { - radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false); - } - } - - if (pool->bo) - device->ws->buffer_destroy(pool->bo); - - vk_object_base_finish(&pool->base); - vk_free2(&device->vk.alloc, pAllocator, pool); + radv_destroy_descriptor_pool(device, pAllocator, pool); } VkResult radv_ResetDescriptorPool( diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index 1db64cb2ac9..d16235f7ce1 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -3012,6 +3012,8 @@ static VkResult radv_device_init_border_color(struct radv_device *device) device->border_color_data.colors_gpu_ptr = device->ws->buffer_map(device->border_color_data.bo); + if (!device->border_color_data.colors_gpu_ptr) + return vk_error(device->physical_device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); pthread_mutex_init(&device->border_color_data.mutex, NULL); return VK_SUCCESS; @@ -4097,6 +4099,8 @@ radv_get_preamble_cs(struct radv_queue *queue, if (descriptor_bo != queue->descriptor_bo) { uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo); + if (!map) + goto fail; if (scratch_bo) { uint64_t scratch_va = radv_buffer_get_va(scratch_bo); @@ -6362,7 +6366,14 @@ radv_SignalSemaphore(VkDevice _device, return VK_SUCCESS; } - +static void radv_destroy_event(struct radv_device *device, + const VkAllocationCallbacks* pAllocator, + struct radv_event *event) +{ + device->ws->buffer_destroy(event->bo); + vk_object_base_finish(&event->base); + vk_free2(&device->vk.alloc, pAllocator, event); +} VkResult radv_CreateEvent( VkDevice _device, @@ -6390,6 +6401,10 @@ VkResult radv_CreateEvent( } event->map = (uint64_t*)device->ws->buffer_map(event->bo); + if (!event->map) { + radv_destroy_event(device, pAllocator, event); + return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); + } *pEvent = radv_event_to_handle(event); @@ -6406,9 +6421,8 @@ void radv_DestroyEvent( if (!event) return; - device->ws->buffer_destroy(event->bo); - vk_object_base_finish(&event->base); - vk_free2(&device->vk.alloc, pAllocator, event); + + radv_destroy_event(device, pAllocator, event); } VkResult radv_GetEventStatus( diff --git a/src/amd/vulkan/radv_shader.c b/src/amd/vulkan/radv_shader.c index f928ad2be4c..6cc82f0903a 100644 --- a/src/amd/vulkan/radv_shader.c +++ b/src/amd/vulkan/radv_shader.c @@ -667,7 +667,18 @@ radv_alloc_shader_memory(struct radv_device *device, (device->physical_device->rad_info.cpdma_prefetch_writes_memory ? 0 : RADEON_FLAG_READ_ONLY), RADV_BO_PRIORITY_SHADER); + if (!slab->bo) { + free(slab); + return NULL; + } + slab->ptr = (char*)device->ws->buffer_map(slab->bo); + if (!slab->ptr) { + device->ws->buffer_destroy(slab->bo); + free(slab); + return NULL; + } + list_inithead(&slab->shaders); mtx_lock(&device->shader_slab_mutex); @@ -1012,6 +1023,12 @@ radv_shader_variant_create(struct radv_device *device, } void *dest_ptr = radv_alloc_shader_memory(device, variant); + if (!dest_ptr) { + if (binary->type == RADV_BINARY_TYPE_RTLD) + ac_rtld_close(&rtld_binary); + free(variant); + return NULL; + } if (binary->type == RADV_BINARY_TYPE_RTLD) { struct radv_shader_binary_rtld* bin = (struct radv_shader_binary_rtld *)binary; |