summaryrefslogtreecommitdiffstats
path: root/src/intel/vulkan
diff options
context:
space:
mode:
authorChad Versace <[email protected]>2017-09-12 14:05:08 -0700
committerChad Versace <[email protected]>2017-10-17 23:46:06 -0700
commit9775894f102535a79186985124087ac859b5ca44 (patch)
treec47baa18a31f78334f82b4ed275e7c9373b7ebe7 /src/intel/vulkan
parentfbf39fd7c33d56271d79bdbe039605588c8d84db (diff)
anv: Move size check from anv_bo_cache_import() to caller (v2)
This change prepares for VK_ANDROID_native_buffer. When the user imports a gralloc hande into a VkImage using VK_ANDROID_native_buffer, the user provides no size. The driver must infer the size from the internals of the gralloc buffer. The patch is essentially a refactor patch, but it does change behavior in some edge cases, described below. In what follows, the "nominal size" of the bo refers to anv_bo::size, which may not match the bo's "actual size" according to the kernel. Post-patch, the nominal size of the bo returned from anv_bo_cache_import() is always the size of imported dma-buf according to lseek(). Pre-patch, the bo's nominal size was difficult to predict. If the imported dma-buf's gem handle was not resident in the cache, then the bo's nominal size was align(VkMemoryAllocateInfo::allocationSize, 4096). If it *was* resident, then the bo's nominal size was whatever the cache returned. As a consequence, the first cache insert decided the bo's nominal size, which could be significantly smaller compared to the dma-buf's actual size, as the nominal size was determined by VkMemoryAllocationInfo::allocationSize and not lseek(). I believe this patch cleans up that messy behavior. For an imported or exported VkDeviceMemory, anv_bo::size should now be the true size of the bo, if I correctly understand the problem (which I possibly don't). v2: - Preserve behavior of aligning size to 4096 before checking. [for jekstrand] - Check size with < instead of <=, to match behavior of commit c0a4f56 "anv: bo_cache: allow importing a BO larger than needed". [for chadv]
Diffstat (limited to 'src/intel/vulkan')
-rw-r--r--src/intel/vulkan/anv_allocator.c21
-rw-r--r--src/intel/vulkan/anv_device.c25
-rw-r--r--src/intel/vulkan/anv_intel.c14
-rw-r--r--src/intel/vulkan/anv_private.h2
-rw-r--r--src/intel/vulkan/anv_queue.c7
5 files changed, 46 insertions, 23 deletions
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index 401cea40e61..27eedb53aa7 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -1272,13 +1272,10 @@ anv_bo_cache_alloc(struct anv_device *device,
VkResult
anv_bo_cache_import(struct anv_device *device,
struct anv_bo_cache *cache,
- int fd, uint64_t size, struct anv_bo **bo_out)
+ int fd, struct anv_bo **bo_out)
{
pthread_mutex_lock(&cache->mutex);
- /* The kernel is going to give us whole pages anyway */
- size = align_u64(size, 4096);
-
uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
if (!gem_handle) {
pthread_mutex_unlock(&cache->mutex);
@@ -1287,22 +1284,10 @@ anv_bo_cache_import(struct anv_device *device,
struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
if (bo) {
- if (bo->bo.size != size) {
- pthread_mutex_unlock(&cache->mutex);
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
- }
__sync_fetch_and_add(&bo->refcount, 1);
} else {
- /* For security purposes, we reject BO imports where the size does not
- * match exactly. This prevents a malicious client from passing a
- * buffer to a trusted client, lying about the size, and telling the
- * trusted client to try and texture from an image that goes
- * out-of-bounds. This sort of thing could lead to GPU hangs or worse
- * in the trusted client. The trusted client can protect itself against
- * this sort of attack but only if it can trust the buffer size.
- */
- off_t import_size = lseek(fd, 0, SEEK_END);
- if (import_size == (off_t)-1 || import_size < size) {
+ off_t size = lseek(fd, 0, SEEK_END);
+ if (size == (off_t)-1) {
anv_gem_close(device, gem_handle);
pthread_mutex_unlock(&cache->mutex);
return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c
index 1634b5158cc..546ed2d0ca5 100644
--- a/src/intel/vulkan/anv_device.c
+++ b/src/intel/vulkan/anv_device.c
@@ -1543,11 +1543,32 @@ VkResult anv_AllocateMemory(
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
result = anv_bo_cache_import(device, &device->bo_cache,
- fd_info->fd, pAllocateInfo->allocationSize,
- &mem->bo);
+ fd_info->fd, &mem->bo);
if (result != VK_SUCCESS)
goto fail;
+ VkDeviceSize aligned_alloc_size =
+ align_u64(pAllocateInfo->allocationSize, 4096);
+
+ /* For security purposes, we reject importing the bo if it's smaller
+ * than the requested allocation size. This prevents a malicious client
+ * from passing a buffer to a trusted client, lying about the size, and
+ * telling the trusted client to try and texture from an image that goes
+ * out-of-bounds. This sort of thing could lead to GPU hangs or worse
+ * in the trusted client. The trusted client can protect itself against
+ * this sort of attack but only if it can trust the buffer size.
+ */
+ if (mem->bo->size < aligned_alloc_size) {
+ result = vk_errorf(device->instace, device,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
+ "aligned allocationSize too large for "
+ "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR: "
+ "%"PRIu64"B > %"PRIu64"B",
+ aligned_alloc_size, mem->bo->size);
+ anv_bo_cache_release(device, &device->bo_cache, mem->bo);
+ goto fail;
+ }
+
/* From the Vulkan spec:
*
* "Importing memory from a file descriptor transfers ownership of
diff --git a/src/intel/vulkan/anv_intel.c b/src/intel/vulkan/anv_intel.c
index d6bad44091a..885888e82d8 100644
--- a/src/intel/vulkan/anv_intel.c
+++ b/src/intel/vulkan/anv_intel.c
@@ -76,10 +76,22 @@ VkResult anv_CreateDmaBufImageINTEL(
image = anv_image_from_handle(image_h);
result = anv_bo_cache_import(device, &device->bo_cache,
- pCreateInfo->fd, image->size, &mem->bo);
+ pCreateInfo->fd, &mem->bo);
if (result != VK_SUCCESS)
goto fail_import;
+ VkDeviceSize aligned_image_size = align_u64(image->size, 4096);
+
+ if (mem->bo->size < aligned_image_size) {
+ result = vk_errorf(device->instace, device,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
+ "dma-buf too small for image in "
+ "vkCreateDmaBufImageINTEL: %"PRIu64"B < "PRIu64"B",
+ mem->bo->size, aligned_image_size);
+ anv_bo_cache_release(device, &device->bo_cache, mem->bo);
+ goto fail_import;
+ }
+
if (device->instance->physicalDevice.supports_48bit_addresses)
mem->bo->flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index 8af3f5c69e1..61e23282b02 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -718,7 +718,7 @@ VkResult anv_bo_cache_alloc(struct anv_device *device,
uint64_t size, struct anv_bo **bo);
VkResult anv_bo_cache_import(struct anv_device *device,
struct anv_bo_cache *cache,
- int fd, uint64_t size, struct anv_bo **bo);
+ int fd, struct anv_bo **bo);
VkResult anv_bo_cache_export(struct anv_device *device,
struct anv_bo_cache *cache,
struct anv_bo *bo_in, int *fd_out);
diff --git a/src/intel/vulkan/anv_queue.c b/src/intel/vulkan/anv_queue.c
index 7e675e2274d..c6b2e01c628 100644
--- a/src/intel/vulkan/anv_queue.c
+++ b/src/intel/vulkan/anv_queue.c
@@ -1023,10 +1023,15 @@ VkResult anv_ImportSemaphoreFdKHR(
new_impl.type = ANV_SEMAPHORE_TYPE_BO;
VkResult result = anv_bo_cache_import(device, &device->bo_cache,
- fd, 4096, &new_impl.bo);
+ fd, &new_impl.bo);
if (result != VK_SUCCESS)
return result;
+ if (new_impl.bo->size < 4096) {
+ anv_bo_cache_release(device, &device->bo_cache, new_impl.bo);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ }
+
/* If we're going to use this as a fence, we need to *not* have the
* EXEC_OBJECT_ASYNC bit set.
*/