summaryrefslogtreecommitdiffstats
path: root/src/intel/vulkan/anv_allocator.c
diff options
context:
space:
mode:
authorCaio Marcelo de Oliveira Filho <[email protected]>2019-03-01 13:15:31 -0800
committerCaio Marcelo de Oliveira Filho <[email protected]>2019-03-05 12:59:50 -0800
commit69cc6272fbc1991d83b9e739acf5d464e8e905c6 (patch)
tree705927cd936ccadaa3444c465e0b6de1c6aea2fb /src/intel/vulkan/anv_allocator.c
parent5c655c47db76fd972beed11b8e3c4f5c590d1d44 (diff)
anv: Implement VK_EXT_external_memory_host
v2: Ignore the import if handleType == 0. (Jason) Reviewed-by: Lionel Landwerlin <[email protected]> Reviewed-by: Jason Ekstrand <[email protected]>
Diffstat (limited to 'src/intel/vulkan/anv_allocator.c')
-rw-r--r--src/intel/vulkan/anv_allocator.c60
1 files changed, 60 insertions, 0 deletions
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index 6ed5634002c..dd967165bbb 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -1712,6 +1712,66 @@ anv_bo_cache_alloc(struct anv_device *device,
}
VkResult
+anv_bo_cache_import_host_ptr(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ void *host_ptr, uint32_t size,
+ uint64_t bo_flags, struct anv_bo **bo_out)
+{
+ assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
+ assert((bo_flags & ANV_BO_EXTERNAL) == 0);
+
+ uint32_t gem_handle = anv_gem_userptr(device, host_ptr, size);
+ if (!gem_handle)
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+
+ pthread_mutex_lock(&cache->mutex);
+
+ struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+ if (bo) {
+ /* VK_EXT_external_memory_host doesn't require handling importing the
+ * same pointer twice at the same time, but we don't get in the way. If
+ * kernel gives us the same gem_handle, only succeed if the flags match.
+ */
+ if (bo_flags != bo->bo.flags) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "same host pointer imported two different ways");
+ }
+ __sync_fetch_and_add(&bo->refcount, 1);
+ } else {
+ bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!bo) {
+ anv_gem_close(device, gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ bo->refcount = 1;
+
+ anv_bo_init(&bo->bo, gem_handle, size);
+ bo->bo.flags = bo_flags;
+
+ if (!anv_vma_alloc(device, &bo->bo)) {
+ anv_gem_close(device, bo->bo.gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ vk_free(&device->alloc, bo);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+
+ _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
+ }
+
+ pthread_mutex_unlock(&cache->mutex);
+ *bo_out = &bo->bo;
+
+ return VK_SUCCESS;
+}
+
+VkResult
anv_bo_cache_import(struct anv_device *device,
struct anv_bo_cache *cache,
int fd, uint64_t bo_flags,