summaryrefslogtreecommitdiffstats
path: root/src/amd
diff options
context:
space:
mode:
authorBas Nieuwenhuizen <[email protected]>2018-11-26 16:26:36 +0100
committerBas Nieuwenhuizen <[email protected]>2018-12-04 01:21:38 +0100
commit3bf48741e128b60f6430b32cc47197f62075b1e9 (patch)
tree982b7486f44926a48a04f52442899beb21ec1dcd /src/amd
parent51091b3e1f212be956f91ac5214191c14e83ac59 (diff)
radv/android: Use buffer metadata to determine scanout compat.
These days we don't always allocate scanout compatible textures anymore. That does mean we have to fix the radv android WSI though. Fixes: b1444c9ccb0 "radv: Implement VK_ANDROID_native_buffer." Acked-by: Samuel Pitoiset <[email protected]>
Diffstat (limited to 'src/amd')
-rw-r--r--src/amd/vulkan/radv_android.c73
-rw-r--r--src/amd/vulkan/radv_radeon_winsys.h2
-rw-r--r--src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c53
3 files changed, 93 insertions, 35 deletions
diff --git a/src/amd/vulkan/radv_android.c b/src/amd/vulkan/radv_android.c
index 93799b87b8f..1a4425f26a5 100644
--- a/src/amd/vulkan/radv_android.c
+++ b/src/amd/vulkan/radv_android.c
@@ -110,27 +110,6 @@ radv_image_from_gralloc(VkDevice device_h,
struct radv_bo *bo = NULL;
VkResult result;
- VkImageCreateInfo updated_base_info = *base_info;
-
- VkExternalMemoryImageCreateInfo external_memory_info = {
- .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
- .pNext = updated_base_info.pNext,
- .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
- };
-
- updated_base_info.pNext = &external_memory_info;
-
- result = radv_image_create(device_h,
- &(struct radv_image_create_info) {
- .vk_info = &updated_base_info,
- .scanout = true,
- .no_metadata_planes = true},
- alloc,
- &image_h);
-
- if (result != VK_SUCCESS)
- return result;
-
if (gralloc_info->handle->numFds != 1) {
return vk_errorf(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
"VkNativeBufferANDROID::handle::numFds is %d, "
@@ -143,23 +122,14 @@ radv_image_from_gralloc(VkDevice device_h,
*/
int dma_buf = gralloc_info->handle->data[0];
- image = radv_image_from_handle(image_h);
-
VkDeviceMemory memory_h;
- const VkMemoryDedicatedAllocateInfoKHR ded_alloc = {
- .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
- .pNext = NULL,
- .buffer = VK_NULL_HANDLE,
- .image = image_h
- };
-
const VkImportMemoryFdInfoKHR import_info = {
.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
- .pNext = &ded_alloc,
.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,
.fd = dup(dma_buf),
};
+
/* Find the first VRAM memory type, or GART for PRIME images. */
int memory_type_index = -1;
for (int i = 0; i < device->physical_device->memory_properties.memoryTypeCount; ++i) {
@@ -178,14 +148,49 @@ radv_image_from_gralloc(VkDevice device_h,
&(VkMemoryAllocateInfo) {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.pNext = &import_info,
- .allocationSize = image->size,
+ /* Max buffer size, unused for imports */
+ .allocationSize = 0x7FFFFFFF,
.memoryTypeIndex = memory_type_index,
},
alloc,
&memory_h);
if (result != VK_SUCCESS)
+ return result;
+
+ struct radeon_bo_metadata md;
+ device->ws->buffer_get_metadata(radv_device_memory_from_handle(memory_h)->bo, &md);
+
+ bool is_scanout;
+ if (device->physical_device->rad_info.chip_class >= GFX9) {
+ /* Copied from radeonsi, but is hacky so should be cleaned up. */
+ is_scanout = md.u.gfx9.swizzle_mode == 0 || md.u.gfx9.swizzle_mode % 4 == 2;
+ } else {
+ is_scanout = md.u.legacy.scanout;
+ }
+
+ VkImageCreateInfo updated_base_info = *base_info;
+
+ VkExternalMemoryImageCreateInfo external_memory_info = {
+ .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
+ .pNext = updated_base_info.pNext,
+ .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+ };
+
+ updated_base_info.pNext = &external_memory_info;
+
+ result = radv_image_create(device_h,
+ &(struct radv_image_create_info) {
+ .vk_info = &updated_base_info,
+ .scanout = is_scanout,
+ .no_metadata_planes = true},
+ alloc,
+ &image_h);
+
+ if (result != VK_SUCCESS)
goto fail_create_image;
+ image = radv_image_from_handle(image_h);
+
radv_BindImageMemory(device_h, image_h, memory_h, 0);
image->owned_memory = memory_h;
@@ -195,9 +200,7 @@ radv_image_from_gralloc(VkDevice device_h,
return VK_SUCCESS;
fail_create_image:
-fail_size:
- radv_DestroyImage(device_h, image_h, alloc);
-
+ radv_FreeMemory(device_h, memory_h, alloc);
return result;
}
diff --git a/src/amd/vulkan/radv_radeon_winsys.h b/src/amd/vulkan/radv_radeon_winsys.h
index 7977d46229e..e9d541ab150 100644
--- a/src/amd/vulkan/radv_radeon_winsys.h
+++ b/src/amd/vulkan/radv_radeon_winsys.h
@@ -223,6 +223,8 @@ struct radeon_winsys {
void (*buffer_set_metadata)(struct radeon_winsys_bo *bo,
struct radeon_bo_metadata *md);
+ void (*buffer_get_metadata)(struct radeon_winsys_bo *bo,
+ struct radeon_bo_metadata *md);
void (*buffer_virtual_bind)(struct radeon_winsys_bo *parent,
uint64_t offset, uint64_t size,
diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c
index 482cf0f6659..ec126bfc7cb 100644
--- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c
+++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c
@@ -540,6 +540,21 @@ radv_amdgpu_winsys_get_fd(struct radeon_winsys *_ws,
return true;
}
+static unsigned eg_tile_split(unsigned tile_split)
+{
+ switch (tile_split) {
+ case 0: tile_split = 64; break;
+ case 1: tile_split = 128; break;
+ case 2: tile_split = 256; break;
+ case 3: tile_split = 512; break;
+ default:
+ case 4: tile_split = 1024; break;
+ case 5: tile_split = 2048; break;
+ case 6: tile_split = 4096; break;
+ }
+ return tile_split;
+}
+
static unsigned radv_eg_tile_split_rev(unsigned eg_tile_split)
{
switch (eg_tile_split) {
@@ -593,6 +608,43 @@ radv_amdgpu_winsys_bo_set_metadata(struct radeon_winsys_bo *_bo,
amdgpu_bo_set_metadata(bo->bo, &metadata);
}
+static void
+radv_amdgpu_winsys_bo_get_metadata(struct radeon_winsys_bo *_bo,
+ struct radeon_bo_metadata *md)
+{
+ struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
+ struct amdgpu_bo_info info = {0};
+
+ int r = amdgpu_bo_query_info(bo->bo, &info);
+ if (r)
+ return;
+
+ uint64_t tiling_flags = info.metadata.tiling_info;
+
+ if (bo->ws->info.chip_class >= GFX9) {
+ md->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
+ } else {
+ md->u.legacy.microtile = RADEON_LAYOUT_LINEAR;
+ md->u.legacy.macrotile = RADEON_LAYOUT_LINEAR;
+
+ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
+ md->u.legacy.macrotile = RADEON_LAYOUT_TILED;
+ else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
+ md->u.legacy.microtile = RADEON_LAYOUT_TILED;
+
+ md->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
+ md->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+ md->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+ md->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
+ md->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+ md->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+ md->u.legacy.scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
+ }
+
+ md->size_metadata = info.metadata.size_metadata;
+ memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
+}
+
void radv_amdgpu_bo_init_functions(struct radv_amdgpu_winsys *ws)
{
ws->base.buffer_create = radv_amdgpu_winsys_bo_create;
@@ -603,5 +655,6 @@ void radv_amdgpu_bo_init_functions(struct radv_amdgpu_winsys *ws)
ws->base.buffer_from_fd = radv_amdgpu_winsys_bo_from_fd;
ws->base.buffer_get_fd = radv_amdgpu_winsys_get_fd;
ws->base.buffer_set_metadata = radv_amdgpu_winsys_bo_set_metadata;
+ ws->base.buffer_get_metadata = radv_amdgpu_winsys_bo_get_metadata;
ws->base.buffer_virtual_bind = radv_amdgpu_winsys_bo_virtual_bind;
}