summaryrefslogtreecommitdiffstats
path: root/src/freedreno
diff options
context:
space:
mode:
authorBas Nieuwenhuizen <[email protected]>2018-08-08 22:23:57 +0000
committerChia-I Wu <[email protected]>2019-03-11 10:01:15 -0700
commit26380b3a9f8fd513dc4da86798f3c15191914fc2 (patch)
tree1749fb5529155b2f5175a54bd1e011b4f9a73386 /src/freedreno
parentd086d16b8165244db53f20dbf60c921f4bb38f38 (diff)
turnip: Add driver skeleton (v2)
meson files have been updated, autotools and android still need updating. Only build tested. v2 (chadv): - Rebase onto master. - Fix build breakage in Python scripts. - Drop the WSI code. The internal WSI apis have changed recently, and will likely change again before the driver goes upstream. To avoid unnecessary rebase work, let's drop the WSI code and re-add it when we're ready to really use WSI. (olv, after rebase) do not enable freedreno by default on ARM
Diffstat (limited to 'src/freedreno')
-rw-r--r--src/freedreno/meson.build4
-rw-r--r--src/freedreno/vulkan/Android.mk166
-rw-r--r--src/freedreno/vulkan/Makefile.am200
-rw-r--r--src/freedreno/vulkan/Makefile.sources93
-rw-r--r--src/freedreno/vulkan/meson.build127
-rw-r--r--src/freedreno/vulkan/tu_android.c390
-rw-r--r--src/freedreno/vulkan/tu_cmd_buffer.c936
-rw-r--r--src/freedreno/vulkan/tu_descriptor_set.c565
-rw-r--r--src/freedreno/vulkan/tu_descriptor_set.h102
-rw-r--r--src/freedreno/vulkan/tu_device.c1839
-rw-r--r--src/freedreno/vulkan/tu_entrypoints_gen.py506
-rw-r--r--src/freedreno/vulkan/tu_extensions.py275
-rw-r--r--src/freedreno/vulkan/tu_formats.c410
-rw-r--r--src/freedreno/vulkan/tu_icd.py47
-rw-r--r--src/freedreno/vulkan/tu_image.c243
-rw-r--r--src/freedreno/vulkan/tu_meta_blit.c38
-rw-r--r--src/freedreno/vulkan/tu_meta_buffer.c28
-rw-r--r--src/freedreno/vulkan/tu_meta_clear.c53
-rw-r--r--src/freedreno/vulkan/tu_meta_copy.c113
-rw-r--r--src/freedreno/vulkan/tu_meta_resolve.c40
-rw-r--r--src/freedreno/vulkan/tu_pass.c414
-rw-r--r--src/freedreno/vulkan/tu_pipeline.c113
-rw-r--r--src/freedreno/vulkan/tu_pipeline_cache.c424
-rw-r--r--src/freedreno/vulkan/tu_private.h1221
-rw-r--r--src/freedreno/vulkan/tu_query.c123
-rw-r--r--src/freedreno/vulkan/tu_util.c115
-rw-r--r--src/freedreno/vulkan/tu_util.h11
-rw-r--r--src/freedreno/vulkan/vk_format.h545
-rw-r--r--src/freedreno/vulkan/vk_format_layout.csv188
-rw-r--r--src/freedreno/vulkan/vk_format_parse.py388
-rw-r--r--src/freedreno/vulkan/vk_format_table.py173
31 files changed, 9890 insertions, 0 deletions
diff --git a/src/freedreno/meson.build b/src/freedreno/meson.build
index a3db4b1622b..3f77b1d933e 100644
--- a/src/freedreno/meson.build
+++ b/src/freedreno/meson.build
@@ -22,3 +22,7 @@ inc_freedreno = include_directories(['.', './registers'])
subdir('drm')
subdir('ir3')
+
+if with_freedreno_vk
+ subdir('vulkan')
+endif
diff --git a/src/freedreno/vulkan/Android.mk b/src/freedreno/vulkan/Android.mk
new file mode 100644
index 00000000000..f8df7a27f93
--- /dev/null
+++ b/src/freedreno/vulkan/Android.mk
@@ -0,0 +1,166 @@
+# Copyright © 2018 Advanced Micro Devices, Inc.
+# Copyright © 2018 Mauro Rossi [email protected]
+
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+LOCAL_PATH := $(call my-dir)
+
+# get VULKAN_FILES and VULKAN_GENERATED_FILES
+include $(LOCAL_PATH)/Makefile.sources
+
+# The gallium includes are for the util/u_math.h include from main/macros.h
+
+TU_COMMON_INCLUDES := \
+ $(MESA_TOP)/include \
+ $(MESA_TOP)/src/ \
+ $(MESA_TOP)/src/vulkan/wsi \
+ $(MESA_TOP)/src/vulkan/util \
+ $(MESA_TOP)/src/amd \
+ $(MESA_TOP)/src/amd/common \
+ $(MESA_TOP)/src/compiler \
+ $(MESA_TOP)/src/mapi \
+ $(MESA_TOP)/src/mesa \
+ $(MESA_TOP)/src/mesa/drivers/dri/common \
+ $(MESA_TOP)/src/gallium/auxiliary \
+ $(MESA_TOP)/src/gallium/include \
+ frameworks/native/vulkan/include
+
+TU_SHARED_LIBRARIES := libdrm_amdgpu
+
+ifeq ($(filter $(MESA_ANDROID_MAJOR_VERSION), 4 5 6 7),)
+TU_SHARED_LIBRARIES += libnativewindow
+endif
+
+#
+# libmesa_tu_common
+#
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := libmesa_tu_common
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+
+intermediates := $(call local-generated-sources-dir)
+
+LOCAL_SRC_FILES := \
+ $(VULKAN_FILES)
+
+LOCAL_CFLAGS += -DFORCE_BUILD_AMDGPU # instructs LLVM to declare LLVMInitializeAMDGPU* functions
+
+$(call mesa-build-with-llvm)
+
+LOCAL_C_INCLUDES := \
+ $(TU_COMMON_INCLUDES) \
+ $(call generated-sources-dir-for,STATIC_LIBRARIES,libmesa_amd_common,,) \
+ $(call generated-sources-dir-for,STATIC_LIBRARIES,libmesa_nir,,)/nir \
+ $(call generated-sources-dir-for,STATIC_LIBRARIES,libmesa_tu_common,,) \
+ $(call generated-sources-dir-for,STATIC_LIBRARIES,libmesa_vulkan_util,,)/util
+
+LOCAL_WHOLE_STATIC_LIBRARIES := \
+ libmesa_vulkan_util
+
+LOCAL_GENERATED_SOURCES += $(intermediates)/tu_entrypoints.c
+LOCAL_GENERATED_SOURCES += $(intermediates)/tu_entrypoints.h
+LOCAL_GENERATED_SOURCES += $(intermediates)/tu_extensions.c
+LOCAL_GENERATED_SOURCES += $(intermediates)/tu_extensions.h
+LOCAL_GENERATED_SOURCES += $(intermediates)/vk_format_table.c
+
+TU_ENTRYPOINTS_SCRIPT := $(MESA_TOP)/src/amd/vulkan/tu_entrypoints_gen.py
+TU_EXTENSIONS_SCRIPT := $(MESA_TOP)/src/amd/vulkan/tu_extensions.py
+VK_FORMAT_TABLE_SCRIPT := $(MESA_TOP)/src/amd/vulkan/vk_format_table.py
+VK_FORMAT_PARSE_SCRIPT := $(MESA_TOP)/src/amd/vulkan/vk_format_parse.py
+
+vulkan_api_xml = $(MESA_TOP)/src/vulkan/registry/vk.xml
+vk_format_layout_csv = $(MESA_TOP)/src/amd/vulkan/vk_format_layout.csv
+
+$(intermediates)/tu_entrypoints.c: $(TU_ENTRYPOINTS_SCRIPT) \
+ $(TU_EXTENSIONS_SCRIPT) \
+ $(vulkan_api_xml)
+ @mkdir -p $(dir $@)
+ $(MESA_PYTHON2) $(TU_ENTRYPOINTS_SCRIPT) \
+ --xml $(vulkan_api_xml) \
+ --outdir $(dir $@)
+
+$(intermediates)/tu_entrypoints.h: $(intermediates)/tu_entrypoints.c
+
+$(intermediates)/tu_extensions.c: $(TU_EXTENSIONS_SCRIPT) $(vulkan_api_xml)
+ @mkdir -p $(dir $@)
+ $(MESA_PYTHON2) $(TU_EXTENSIONS_SCRIPT) \
+ --xml $(vulkan_api_xml) \
+ --out-c $@ \
+ --out-h $(addsuffix .h,$(basename $@))
+
+$(intermediates)/tu_extensions.h: $(intermediates)/tu_extensions.c
+
+$(intermediates)/vk_format_table.c: $(VK_FORMAT_TABLE_SCRIPT) \
+ $(VK_FORMAT_PARSE_SCRIPT) \
+ $(vk_format_layout_csv)
+ @mkdir -p $(dir $@)
+ $(MESA_PYTHON2) $(VK_FORMAT_TABLE_SCRIPT) $(vk_format_layout_csv) > $@
+
+LOCAL_SHARED_LIBRARIES += $(TU_SHARED_LIBRARIES)
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+ $(MESA_TOP)/src/amd/vulkan \
+ $(intermediates)
+
+include $(MESA_COMMON_MK)
+include $(BUILD_STATIC_LIBRARY)
+
+#
+# libvulkan_radeon
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := vulkan.tu
+LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+LOCAL_PROPRIETARY_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := hw
+
+LOCAL_LDFLAGS += -Wl,--build-id=sha1
+
+LOCAL_SRC_FILES := \
+ $(VULKAN_ANDROID_FILES)
+
+LOCAL_CFLAGS += -DFORCE_BUILD_AMDGPU # instructs LLVM to declare LLVMInitializeAMDGPU* functions
+
+$(call mesa-build-with-llvm)
+
+LOCAL_C_INCLUDES := \
+ $(TU_COMMON_INCLUDES) \
+ $(call generated-sources-dir-for,STATIC_LIBRARIES,libmesa_tu_common,,)
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+ $(MESA_TOP)/src/amd/vulkan \
+ $(intermediates)
+
+LOCAL_WHOLE_STATIC_LIBRARIES := \
+ libmesa_util \
+ libmesa_nir \
+ libmesa_glsl \
+ libmesa_compiler \
+ libmesa_amdgpu_addrlib \
+ libmesa_amd_common \
+ libmesa_tu_common
+
+LOCAL_SHARED_LIBRARIES += $(TU_SHARED_LIBRARIES) libz libsync liblog
+
+include $(MESA_COMMON_MK)
+include $(BUILD_SHARED_LIBRARY)
diff --git a/src/freedreno/vulkan/Makefile.am b/src/freedreno/vulkan/Makefile.am
new file mode 100644
index 00000000000..0d4739b3f16
--- /dev/null
+++ b/src/freedreno/vulkan/Makefile.am
@@ -0,0 +1,200 @@
+# Copyright © 2016 Red Hat
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+include Makefile.sources
+
+noinst_HEADERS = \
+ $(top_srcdir)/include/vulkan/vk_platform.h \
+ $(top_srcdir)/include/vulkan/vulkan_core.h \
+ $(top_srcdir)/include/vulkan/vulkan_wayland.h \
+ $(top_srcdir)/include/vulkan/vulkan_xcb.h \
+ $(top_srcdir)/include/vulkan/vulkan_xlib.h \
+ $(top_srcdir)/include/vulkan/vulkan.h
+
+lib_LTLIBRARIES = libvulkan_radeon.la
+
+# The gallium includes are for the util/u_math.h include from main/macros.h
+
+AM_CPPFLAGS = \
+ -I$(top_srcdir)/include \
+ -I$(top_builddir)/src \
+ -I$(top_srcdir)/src \
+ -I$(top_srcdir)/src/vulkan/wsi \
+ -I$(top_builddir)/src/vulkan/util \
+ -I$(top_srcdir)/src/vulkan/util \
+ -I$(top_srcdir)/src/amd \
+ -I$(top_srcdir)/src/amd/common \
+ -I$(top_builddir)/src/compiler \
+ -I$(top_builddir)/src/compiler/nir \
+ -I$(top_srcdir)/src/compiler \
+ -I$(top_srcdir)/src/mapi \
+ -I$(top_srcdir)/src/mesa \
+ -I$(top_srcdir)/src/mesa/drivers/dri/common \
+ -I$(top_srcdir)/src/gallium/auxiliary \
+ -I$(top_srcdir)/src/gallium/include \
+ $(AMDGPU_CFLAGS) \
+ $(VALGRIND_CFLAGS) \
+ $(DEFINES)
+
+AM_CFLAGS = \
+ $(VISIBILITY_CFLAGS) \
+ $(PTHREAD_CFLAGS) \
+ $(LLVM_CFLAGS)
+
+AM_CXXFLAGS = \
+ $(VISIBILITY_CXXFLAGS) \
+ $(LLVM_CXXFLAGS)
+
+VULKAN_SOURCES = \
+ $(VULKAN_GENERATED_FILES) \
+ $(VULKAN_FILES)
+
+VULKAN_LIB_DEPS = \
+ libvulkan_common.la \
+ $(top_builddir)/src/vulkan/libvulkan_util.la \
+ $(top_builddir)/src/vulkan/libvulkan_wsi.la \
+ $(top_builddir)/src/amd/common/libamd_common.la \
+ $(top_builddir)/src/amd/addrlib/libamdgpu_addrlib.la \
+ $(top_builddir)/src/compiler/nir/libnir.la \
+ $(top_builddir)/src/util/libmesautil.la \
+ $(LLVM_LIBS) \
+ $(LIBELF_LIBS) \
+ $(PTHREAD_LIBS) \
+ $(AMDGPU_LIBS) \
+ $(LIBDRM_LIBS) \
+ $(PTHREAD_LIBS) \
+ $(DLOPEN_LIBS) \
+ -lm
+
+if HAVE_PLATFORM_DRM
+AM_CPPFLAGS += \
+ -DVK_USE_PLATFORM_DISPLAY_KHR
+
+VULKAN_SOURCES += $(VULKAN_WSI_DISPLAY_FILES)
+endif
+
+if HAVE_XLIB_LEASE
+AM_CPPFLAGS += \
+ -DVK_USE_PLATFORM_XLIB_XRANDR_EXT \
+ $(XCB_RANDR_CFLAGS) \
+ $(XLIB_RANDR_CFLAGS)
+
+VULKAN_LIB_DEPS += $(XCB_RANDR_LIBS)
+endif
+
+if HAVE_PLATFORM_X11
+AM_CPPFLAGS += \
+ $(XCB_DRI3_CFLAGS) \
+ -DVK_USE_PLATFORM_XCB_KHR \
+ -DVK_USE_PLATFORM_XLIB_KHR
+
+VULKAN_SOURCES += $(VULKAN_WSI_X11_FILES)
+
+VULKAN_LIB_DEPS += $(XCB_DRI3_LIBS)
+endif
+
+
+if HAVE_PLATFORM_WAYLAND
+AM_CPPFLAGS += \
+ $(WAYLAND_CLIENT_CFLAGS) \
+ -DVK_USE_PLATFORM_WAYLAND_KHR
+
+VULKAN_SOURCES += $(VULKAN_WSI_WAYLAND_FILES)
+
+VULKAN_LIB_DEPS += \
+ $(WAYLAND_CLIENT_LIBS)
+endif
+
+if HAVE_PLATFORM_ANDROID
+AM_CPPFLAGS += $(ANDROID_CPPFLAGS)
+AM_CFLAGS += $(ANDROID_CFLAGS)
+VULKAN_LIB_DEPS += $(ANDROID_LIBS)
+VULKAN_SOURCES += $(VULKAN_ANDROID_FILES)
+endif
+
+noinst_LTLIBRARIES = libvulkan_common.la
+libvulkan_common_la_SOURCES = $(VULKAN_SOURCES)
+
+nodist_EXTRA_libvulkan_radeon_la_SOURCES = dummy.cpp
+libvulkan_radeon_la_SOURCES = $(VULKAN_GEM_FILES)
+
+vulkan_api_xml = $(top_srcdir)/src/vulkan/registry/vk.xml
+
+tu_entrypoints.c: tu_entrypoints_gen.py tu_extensions.py $(vulkan_api_xml)
+ $(MKDIR_GEN)
+ $(AM_V_GEN)$(PYTHON2) $(srcdir)/tu_entrypoints_gen.py \
+ --xml $(vulkan_api_xml) \
+ --outdir $(builddir)
+tu_entrypoints.h: tu_entrypoints.c
+
+tu_extensions.c: tu_extensions.py \
+ $(vulkan_api_xml)
+ $(MKDIR_GEN)
+ $(AM_V_GEN)$(PYTHON2) $(srcdir)/tu_extensions.py \
+ --xml $(vulkan_api_xml) \
+ --out-c tu_extensions.c \
+ --out-h tu_extensions.h
+tu_extensions.h: tu_extensions.c
+
+vk_format_table.c: vk_format_table.py \
+ vk_format_parse.py \
+ vk_format_layout.csv
+ $(PYTHON2) $(srcdir)/vk_format_table.py $(srcdir)/vk_format_layout.csv > $@
+
+BUILT_SOURCES = $(VULKAN_GENERATED_FILES)
+CLEANFILES = $(BUILT_SOURCES) dev_icd.json radeon_icd.@[email protected]
+EXTRA_DIST = \
+ $(top_srcdir)/include/vulkan/vk_icd.h \
+ tu_entrypoints_gen.py \
+ tu_extensions.py \
+ tu_icd.py \
+ vk_format_layout.csv \
+ vk_format_parse.py \
+ vk_format_table.py \
+ meson.build
+
+libvulkan_radeon_la_LIBADD = $(VULKAN_LIB_DEPS)
+
+libvulkan_radeon_la_LDFLAGS = \
+ -shared \
+ -module \
+ -no-undefined \
+ -avoid-version \
+ $(BSYMBOLIC) \
+ $(LLVM_LDFLAGS) \
+ $(GC_SECTIONS) \
+ $(LD_NO_UNDEFINED)
+
+
+icdconfdir = @VULKAN_ICD_INSTALL_DIR@
+icdconf_DATA = radeon_icd.@[email protected]
+# The following is used for development purposes, by setting VK_ICD_FILENAMES.
+noinst_DATA = dev_icd.json
+
+dev_icd.json : tu_extensions.py tu_icd.py
+ $(AM_V_GEN)$(PYTHON2) $(srcdir)/tu_icd.py \
+ --lib-path="${abs_top_builddir}/${LIB_DIR}" --out $@
+
+radeon_icd.@[email protected] : tu_extensions.py tu_icd.py
+ $(AM_V_GEN)$(PYTHON2) $(srcdir)/tu_icd.py \
+ --lib-path="${libdir}" --out $@
+
+include $(top_srcdir)/install-lib-links.mk
diff --git a/src/freedreno/vulkan/Makefile.sources b/src/freedreno/vulkan/Makefile.sources
new file mode 100644
index 00000000000..8d64ab65e76
--- /dev/null
+++ b/src/freedreno/vulkan/Makefile.sources
@@ -0,0 +1,93 @@
+# Copyright © 2016 Red Hat
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+TU_WS_AMDGPU_FILES := \
+ winsys/amdgpu/tu_amdgpu_bo.c \
+ winsys/amdgpu/tu_amdgpu_bo.h \
+ winsys/amdgpu/tu_amdgpu_cs.c \
+ winsys/amdgpu/tu_amdgpu_cs.h \
+ winsys/amdgpu/tu_amdgpu_surface.c \
+ winsys/amdgpu/tu_amdgpu_surface.h \
+ winsys/amdgpu/tu_amdgpu_winsys.c \
+ winsys/amdgpu/tu_amdgpu_winsys.h \
+ winsys/amdgpu/tu_amdgpu_winsys_public.h
+
+VULKAN_FILES := \
+ tu_cmd_buffer.c \
+ tu_cs.h \
+ tu_debug.c \
+ tu_debug.h \
+ tu_device.c \
+ tu_descriptor_set.c \
+ tu_descriptor_set.h \
+ tu_formats.c \
+ tu_image.c \
+ tu_meta.c \
+ tu_meta.h \
+ tu_meta_blit.c \
+ tu_meta_blit2d.c \
+ tu_meta_buffer.c \
+ tu_meta_bufimage.c \
+ tu_meta_clear.c \
+ tu_meta_copy.c \
+ tu_meta_decompress.c \
+ tu_meta_fast_clear.c \
+ tu_meta_resolve.c \
+ tu_meta_resolve_cs.c \
+ tu_meta_resolve_fs.c \
+ tu_nir_to_llvm.c \
+ tu_llvm_helper.cpp \
+ tu_pass.c \
+ tu_pipeline.c \
+ tu_pipeline_cache.c \
+ tu_private.h \
+ tu_radeon_winsys.h \
+ tu_shader.c \
+ tu_shader_info.c \
+ tu_shader.h \
+ tu_shader_helper.h \
+ tu_query.c \
+ tu_util.c \
+ tu_util.h \
+ tu_wsi.c \
+ si_cmd_buffer.c \
+ vk_format.h \
+ $(TU_WS_AMDGPU_FILES)
+
+VULKAN_ANDROID_FILES := \
+ tu_android.c
+
+VULKAN_WSI_WAYLAND_FILES := \
+ tu_wsi_wayland.c
+
+VULKAN_WSI_X11_FILES := \
+ tu_wsi_x11.c
+
+VULKAN_WSI_DISPLAY_FILES := \
+ tu_wsi_display.c
+
+VULKAN_GENERATED_FILES := \
+ tu_entrypoints.c \
+ tu_entrypoints.h \
+ tu_extensions.c \
+ tu_extensions.h \
+ vk_format_table.c
+
diff --git a/src/freedreno/vulkan/meson.build b/src/freedreno/vulkan/meson.build
new file mode 100644
index 00000000000..b7de6bd5bb8
--- /dev/null
+++ b/src/freedreno/vulkan/meson.build
@@ -0,0 +1,127 @@
+# Copyright © 2017 Intel Corporation
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+tu_entrypoints = custom_target(
+ 'tu_entrypoints.[ch]',
+ input : ['tu_entrypoints_gen.py', vk_api_xml],
+ output : ['tu_entrypoints.h', 'tu_entrypoints.c'],
+ command : [
+ prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--outdir',
+ meson.current_build_dir()
+ ],
+ depend_files : files('tu_extensions.py'),
+)
+
+tu_extensions_c = custom_target(
+ 'tu_extensions.c',
+ input : ['tu_extensions.py', vk_api_xml],
+ output : ['tu_extensions.c', 'tu_extensions.h'],
+ command : [
+ prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--out-c', '@OUTPUT0@',
+ '--out-h', '@OUTPUT1@'
+ ],
+)
+
+vk_format_table_c = custom_target(
+ 'vk_format_table.c',
+ input : ['vk_format_table.py', 'vk_format_layout.csv'],
+ output : 'vk_format_table.c',
+ command : [prog_python, '@INPUT@'],
+ depend_files : files('vk_format_parse.py'),
+ capture : true,
+)
+
+libtu_files = files(
+ 'tu_cmd_buffer.c',
+ 'tu_device.c',
+ 'tu_descriptor_set.c',
+ 'tu_descriptor_set.h',
+ 'tu_formats.c',
+ 'tu_image.c',
+ 'tu_meta_blit.c',
+ 'tu_meta_buffer.c',
+ 'tu_meta_clear.c',
+ 'tu_meta_copy.c',
+ 'tu_meta_resolve.c',
+ 'tu_pass.c',
+ 'tu_pipeline.c',
+ 'tu_pipeline_cache.c',
+ 'tu_private.h',
+ 'tu_query.c',
+ 'tu_util.c',
+ 'tu_util.h',
+ 'vk_format.h',
+)
+
+tu_deps = []
+tu_flags = []
+
+libvulkan_freedreno = shared_library(
+ 'vulkan_freedreno',
+ [libtu_files, tu_entrypoints, tu_extensions_c, vk_format_table_c],
+ include_directories : [
+ inc_common, inc_compiler, inc_vulkan_util,
+ ],
+ link_with : [
+ libvulkan_util,
+ libmesa_util,
+ ],
+ dependencies : [
+ dep_dl,
+ dep_elf,
+ dep_libdrm,
+ dep_llvm,
+ dep_m,
+ dep_thread,
+ dep_valgrind,
+ idep_nir,
+ ],
+ c_args : [c_vis_args, no_override_init_args, tu_flags],
+ link_args : [ld_args_bsymbolic, ld_args_gc_sections],
+ install : true,
+)
+
+freedreno_icd = custom_target(
+ 'freedreno_icd',
+ input : 'tu_icd.py',
+ output : 'freedreno_icd.@[email protected]'.format(host_machine.cpu()),
+ command : [
+ prog_python, '@INPUT@',
+ '--lib-path', join_paths(get_option('prefix'), get_option('libdir')),
+ '--out', '@OUTPUT@',
+ ],
+ depend_files : files('tu_extensions.py'),
+ build_by_default : true,
+ install_dir : with_vulkan_icd_dir,
+ install : true,
+)
+
+tu_dev_icd = custom_target(
+ 'tu_dev_icd',
+ input : 'tu_icd.py',
+ output : 'dev_icd.json',
+ command : [
+ prog_python, '@INPUT@', '--lib-path', meson.current_build_dir(),
+ '--out', '@OUTPUT@'
+ ],
+ depend_files : files('tu_extensions.py'),
+ build_by_default : true,
+ install : false,
+)
diff --git a/src/freedreno/vulkan/tu_android.c b/src/freedreno/vulkan/tu_android.c
new file mode 100644
index 00000000000..fbc1bf84b84
--- /dev/null
+++ b/src/freedreno/vulkan/tu_android.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright © 2017, Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <hardware/gralloc.h>
+#include <hardware/hardware.h>
+#include <hardware/hwvulkan.h>
+#include <libsync.h>
+#include <vulkan/vk_android_native_buffer.h>
+#include <vulkan/vk_icd.h>
+
+#include "tu_private.h"
+
+static int
+tu_hal_open(const struct hw_module_t *mod,
+ const char *id,
+ struct hw_device_t **dev);
+static int
+tu_hal_close(struct hw_device_t *dev);
+
+static void UNUSED
+static_asserts(void)
+{
+ STATIC_ASSERT(HWVULKAN_DISPATCH_MAGIC == ICD_LOADER_MAGIC);
+}
+
+PUBLIC struct hwvulkan_module_t HAL_MODULE_INFO_SYM = {
+ .common =
+ {
+ .tag = HARDWARE_MODULE_TAG,
+ .module_api_version = HWVULKAN_MODULE_API_VERSION_0_1,
+ .hal_api_version = HARDWARE_MAKE_API_VERSION(1, 0),
+ .id = HWVULKAN_HARDWARE_MODULE_ID,
+ .name = "AMD Vulkan HAL",
+ .author = "Google",
+ .methods =
+ &(hw_module_methods_t){
+ .open = tu_hal_open,
+ },
+ },
+};
+
+/* If any bits in test_mask are set, then unset them and return true. */
+static inline bool
+unmask32(uint32_t *inout_mask, uint32_t test_mask)
+{
+ uint32_t orig_mask = *inout_mask;
+ *inout_mask &= ~test_mask;
+ return *inout_mask != orig_mask;
+}
+
+static int
+tu_hal_open(const struct hw_module_t *mod,
+ const char *id,
+ struct hw_device_t **dev)
+{
+ assert(mod == &HAL_MODULE_INFO_SYM.common);
+ assert(strcmp(id, HWVULKAN_DEVICE_0) == 0);
+
+ hwvulkan_device_t *hal_dev = malloc(sizeof(*hal_dev));
+ if (!hal_dev)
+ return -1;
+
+ *hal_dev = (hwvulkan_device_t){
+ .common =
+ {
+ .tag = HARDWARE_DEVICE_TAG,
+ .version = HWVULKAN_DEVICE_API_VERSION_0_1,
+ .module = &HAL_MODULE_INFO_SYM.common,
+ .close = tu_hal_close,
+ },
+ .EnumerateInstanceExtensionProperties =
+ tu_EnumerateInstanceExtensionProperties,
+ .CreateInstance = tu_CreateInstance,
+ .GetInstanceProcAddr = tu_GetInstanceProcAddr,
+ };
+
+ *dev = &hal_dev->common;
+ return 0;
+}
+
+static int
+tu_hal_close(struct hw_device_t *dev)
+{
+ /* hwvulkan.h claims that hw_device_t::close() is never called. */
+ return -1;
+}
+
+VkResult
+tu_image_from_gralloc(VkDevice device_h,
+ const VkImageCreateInfo *base_info,
+ const VkNativeBufferANDROID *gralloc_info,
+ const VkAllocationCallbacks *alloc,
+ VkImage *out_image_h)
+
+{
+ TU_FROM_HANDLE(tu_device, device, device_h);
+ VkImage image_h = VK_NULL_HANDLE;
+ struct tu_image *image = NULL;
+ struct tu_bo *bo = NULL;
+ VkResult result;
+
+ result = tu_image_create(
+ device_h,
+ &(struct tu_image_create_info){
+ .vk_info = base_info, .scanout = true, .no_metadata_planes = true },
+ alloc,
+ &image_h);
+
+ if (result != VK_SUCCESS)
+ return result;
+
+ if (gralloc_info->handle->numFds != 1) {
+ return vk_errorf(device->instance,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
+ "VkNativeBufferANDROID::handle::numFds is %d, "
+ "expected 1",
+ gralloc_info->handle->numFds);
+ }
+
+ /* Do not close the gralloc handle's dma_buf. The lifetime of the dma_buf
+ * must exceed that of the gralloc handle, and we do not own the gralloc
+ * handle.
+ */
+ int dma_buf = gralloc_info->handle->data[0];
+
+ image = tu_image_from_handle(image_h);
+
+ VkDeviceMemory memory_h;
+
+ const VkMemoryDedicatedAllocateInfoKHR ded_alloc = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
+ .pNext = NULL,
+ .buffer = VK_NULL_HANDLE,
+ .image = image_h
+ };
+
+ const VkImportMemoryFdInfoKHR import_info = {
+ .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
+ .pNext = &ded_alloc,
+ .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,
+ .fd = dup(dma_buf),
+ };
+ /* Find the first VRAM memory type, or GART for PRIME images. */
+ int memory_type_index = -1;
+ for (int i = 0;
+ i < device->physical_device->memory_properties.memoryTypeCount;
+ ++i) {
+ bool is_local =
+ !!(device->physical_device->memory_properties.memoryTypes[i]
+ .propertyFlags &
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
+ if (is_local) {
+ memory_type_index = i;
+ break;
+ }
+ }
+
+ /* fallback */
+ if (memory_type_index == -1)
+ memory_type_index = 0;
+
+ result =
+ tu_AllocateMemory(device_h,
+ &(VkMemoryAllocateInfo){
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .pNext = &import_info,
+ .allocationSize = image->size,
+ .memoryTypeIndex = memory_type_index,
+ },
+ alloc,
+ &memory_h);
+ if (result != VK_SUCCESS)
+ goto fail_create_image;
+
+ tu_BindImageMemory(device_h, image_h, memory_h, 0);
+
+ image->owned_memory = memory_h;
+ /* Don't clobber the out-parameter until success is certain. */
+ *out_image_h = image_h;
+
+ return VK_SUCCESS;
+
+fail_create_image:
+fail_size:
+ tu_DestroyImage(device_h, image_h, alloc);
+
+ return result;
+}
+
+VkResult
+tu_GetSwapchainGrallocUsageANDROID(VkDevice device_h,
+ VkFormat format,
+ VkImageUsageFlags imageUsage,
+ int *grallocUsage)
+{
+ TU_FROM_HANDLE(tu_device, device, device_h);
+ struct tu_physical_device *phys_dev = device->physical_device;
+ VkPhysicalDevice phys_dev_h = tu_physical_device_to_handle(phys_dev);
+ VkResult result;
+
+ *grallocUsage = 0;
+
+ /* WARNING: Android Nougat's libvulkan.so hardcodes the VkImageUsageFlags
+ * returned to applications via
+ * VkSurfaceCapabilitiesKHR::supportedUsageFlags.
+ * The relevant code in libvulkan/swapchain.cpp contains this fun comment:
+ *
+ * TODO(jessehall): I think these are right, but haven't thought hard
+ * about it. Do we need to query the driver for support of any of
+ * these?
+ *
+ * Any disagreement between this function and the hardcoded
+ * VkSurfaceCapabilitiesKHR:supportedUsageFlags causes tests
+ * dEQP-VK.wsi.android.swapchain.*.image_usage to fail.
+ */
+
+ const VkPhysicalDeviceImageFormatInfo2KHR image_format_info = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR,
+ .format = format,
+ .type = VK_IMAGE_TYPE_2D,
+ .tiling = VK_IMAGE_TILING_OPTIMAL,
+ .usage = imageUsage,
+ };
+
+ VkImageFormatProperties2KHR image_format_props = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR,
+ };
+
+ /* Check that requested format and usage are supported. */
+ result = tu_GetPhysicalDeviceImageFormatProperties2(
+ phys_dev_h, &image_format_info, &image_format_props);
+ if (result != VK_SUCCESS) {
+ return vk_errorf(device->instance,
+ result,
+ "tu_GetPhysicalDeviceImageFormatProperties2 failed "
+ "inside %s",
+ __func__);
+ }
+
+ if (unmask32(&imageUsage,
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT))
+ *grallocUsage |= GRALLOC_USAGE_HW_RENDER;
+
+ if (unmask32(&imageUsage,
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_STORAGE_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT))
+ *grallocUsage |= GRALLOC_USAGE_HW_TEXTURE;
+
+ /* All VkImageUsageFlags not explicitly checked here are unsupported for
+ * gralloc swapchains.
+ */
+ if (imageUsage != 0) {
+ return vk_errorf(device->instance,
+ VK_ERROR_FORMAT_NOT_SUPPORTED,
+ "unsupported VkImageUsageFlags(0x%x) for gralloc "
+ "swapchain",
+ imageUsage);
+ }
+
+ /*
+ * FINISHME: Advertise all display-supported formats. Mostly
+ * DRM_FORMAT_ARGB2101010 and DRM_FORMAT_ABGR2101010, but need to check
+ * what we need for 30-bit colors.
+ */
+ if (format == VK_FORMAT_B8G8R8A8_UNORM ||
+ format == VK_FORMAT_B5G6R5_UNORM_PACK16) {
+ *grallocUsage |= GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER |
+ GRALLOC_USAGE_EXTERNAL_DISP;
+ }
+
+ if (*grallocUsage == 0)
+ return VK_ERROR_FORMAT_NOT_SUPPORTED;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_AcquireImageANDROID(VkDevice device,
+ VkImage image_h,
+ int nativeFenceFd,
+ VkSemaphore semaphore,
+ VkFence fence)
+{
+ VkResult semaphore_result = VK_SUCCESS, fence_result = VK_SUCCESS;
+
+ if (semaphore != VK_NULL_HANDLE) {
+ int semaphore_fd =
+ nativeFenceFd >= 0 ? dup(nativeFenceFd) : nativeFenceFd;
+ semaphore_result = tu_ImportSemaphoreFdKHR(
+ device,
+ &(VkImportSemaphoreFdInfoKHR){
+ .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR,
+ .flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR,
+ .fd = semaphore_fd,
+ .semaphore = semaphore,
+ });
+ }
+
+ if (fence != VK_NULL_HANDLE) {
+ int fence_fd = nativeFenceFd >= 0 ? dup(nativeFenceFd) : nativeFenceFd;
+ fence_result = tu_ImportFenceFdKHR(
+ device,
+ &(VkImportFenceFdInfoKHR){
+ .sType = VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR,
+ .flags = VK_FENCE_IMPORT_TEMPORARY_BIT_KHR,
+ .fd = fence_fd,
+ .fence = fence,
+ });
+ }
+
+ close(nativeFenceFd);
+
+ if (semaphore_result != VK_SUCCESS)
+ return semaphore_result;
+ return fence_result;
+}
+
+VkResult
+tu_QueueSignalReleaseImageANDROID(VkQueue _queue,
+ uint32_t waitSemaphoreCount,
+ const VkSemaphore *pWaitSemaphores,
+ VkImage image,
+ int *pNativeFenceFd)
+{
+ TU_FROM_HANDLE(tu_queue, queue, _queue);
+ VkResult result = VK_SUCCESS;
+
+ if (waitSemaphoreCount == 0) {
+ if (pNativeFenceFd)
+ *pNativeFenceFd = -1;
+ return VK_SUCCESS;
+ }
+
+ int fd = -1;
+
+ for (uint32_t i = 0; i < waitSemaphoreCount; ++i) {
+ int tmp_fd;
+ result = tu_GetSemaphoreFdKHR(
+ tu_device_to_handle(queue->device),
+ &(VkSemaphoreGetFdInfoKHR){
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,
+ .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR,
+ .semaphore = pWaitSemaphores[i],
+ },
+ &tmp_fd);
+ if (result != VK_SUCCESS) {
+ if (fd >= 0)
+ close(fd);
+ return result;
+ }
+
+ if (fd < 0)
+ fd = tmp_fd;
+ else if (tmp_fd >= 0) {
+ sync_accumulate("tu", &fd, tmp_fd);
+ close(tmp_fd);
+ }
+ }
+
+ if (pNativeFenceFd) {
+ *pNativeFenceFd = fd;
+ } else if (fd >= 0) {
+ close(fd);
+ /* We still need to do the exports, to reset the semaphores, but
+ * otherwise we don't wait on them. */
+ }
+ return VK_SUCCESS;
+}
diff --git a/src/freedreno/vulkan/tu_cmd_buffer.c b/src/freedreno/vulkan/tu_cmd_buffer.c
new file mode 100644
index 00000000000..030e1112811
--- /dev/null
+++ b/src/freedreno/vulkan/tu_cmd_buffer.c
@@ -0,0 +1,936 @@
+/*
+ * Copyright © 2016 Red Hat.
+ * Copyright © 2016 Bas Nieuwenhuizen
+ *
+ * based in part on anv driver which is:
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "tu_private.h"
+#include "vk_format.h"
+
+const struct tu_dynamic_state default_dynamic_state = {
+ .viewport =
+ {
+ .count = 0,
+ },
+ .scissor =
+ {
+ .count = 0,
+ },
+ .line_width = 1.0f,
+ .depth_bias =
+ {
+ .bias = 0.0f,
+ .clamp = 0.0f,
+ .slope = 0.0f,
+ },
+ .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
+ .depth_bounds =
+ {
+ .min = 0.0f,
+ .max = 1.0f,
+ },
+ .stencil_compare_mask =
+ {
+ .front = ~0u,
+ .back = ~0u,
+ },
+ .stencil_write_mask =
+ {
+ .front = ~0u,
+ .back = ~0u,
+ },
+ .stencil_reference =
+ {
+ .front = 0u,
+ .back = 0u,
+ },
+};
+
+static void
+tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer,
+ const struct tu_dynamic_state *src)
+{
+ struct tu_dynamic_state *dest = &cmd_buffer->state.dynamic;
+ uint32_t copy_mask = src->mask;
+ uint32_t dest_mask = 0;
+
+ /* Make sure to copy the number of viewports/scissors because they can
+ * only be specified at pipeline creation time.
+ */
+ dest->viewport.count = src->viewport.count;
+ dest->scissor.count = src->scissor.count;
+ dest->discard_rectangle.count = src->discard_rectangle.count;
+
+ if (copy_mask & TU_DYNAMIC_VIEWPORT) {
+ if (memcmp(&dest->viewport.viewports,
+ &src->viewport.viewports,
+ src->viewport.count * sizeof(VkViewport))) {
+ typed_memcpy(dest->viewport.viewports,
+ src->viewport.viewports,
+ src->viewport.count);
+ dest_mask |= TU_DYNAMIC_VIEWPORT;
+ }
+ }
+
+ if (copy_mask & TU_DYNAMIC_SCISSOR) {
+ if (memcmp(&dest->scissor.scissors,
+ &src->scissor.scissors,
+ src->scissor.count * sizeof(VkRect2D))) {
+ typed_memcpy(
+ dest->scissor.scissors, src->scissor.scissors, src->scissor.count);
+ dest_mask |= TU_DYNAMIC_SCISSOR;
+ }
+ }
+
+ if (copy_mask & TU_DYNAMIC_LINE_WIDTH) {
+ if (dest->line_width != src->line_width) {
+ dest->line_width = src->line_width;
+ dest_mask |= TU_DYNAMIC_LINE_WIDTH;
+ }
+ }
+
+ if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) {
+ if (memcmp(
+ &dest->depth_bias, &src->depth_bias, sizeof(src->depth_bias))) {
+ dest->depth_bias = src->depth_bias;
+ dest_mask |= TU_DYNAMIC_DEPTH_BIAS;
+ }
+ }
+
+ if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) {
+ if (memcmp(&dest->blend_constants,
+ &src->blend_constants,
+ sizeof(src->blend_constants))) {
+ typed_memcpy(dest->blend_constants, src->blend_constants, 4);
+ dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS;
+ }
+ }
+
+ if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) {
+ if (memcmp(&dest->depth_bounds,
+ &src->depth_bounds,
+ sizeof(src->depth_bounds))) {
+ dest->depth_bounds = src->depth_bounds;
+ dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS;
+ }
+ }
+
+ if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) {
+ if (memcmp(&dest->stencil_compare_mask,
+ &src->stencil_compare_mask,
+ sizeof(src->stencil_compare_mask))) {
+ dest->stencil_compare_mask = src->stencil_compare_mask;
+ dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK;
+ }
+ }
+
+ if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) {
+ if (memcmp(&dest->stencil_write_mask,
+ &src->stencil_write_mask,
+ sizeof(src->stencil_write_mask))) {
+ dest->stencil_write_mask = src->stencil_write_mask;
+ dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK;
+ }
+ }
+
+ if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) {
+ if (memcmp(&dest->stencil_reference,
+ &src->stencil_reference,
+ sizeof(src->stencil_reference))) {
+ dest->stencil_reference = src->stencil_reference;
+ dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE;
+ }
+ }
+
+ if (copy_mask & TU_DYNAMIC_DISCARD_RECTANGLE) {
+ if (memcmp(&dest->discard_rectangle.rectangles,
+ &src->discard_rectangle.rectangles,
+ src->discard_rectangle.count * sizeof(VkRect2D))) {
+ typed_memcpy(dest->discard_rectangle.rectangles,
+ src->discard_rectangle.rectangles,
+ src->discard_rectangle.count);
+ dest_mask |= TU_DYNAMIC_DISCARD_RECTANGLE;
+ }
+ }
+}
+
+static VkResult
+tu_create_cmd_buffer(struct tu_device *device,
+ struct tu_cmd_pool *pool,
+ VkCommandBufferLevel level,
+ VkCommandBuffer *pCommandBuffer)
+{
+ struct tu_cmd_buffer *cmd_buffer;
+ cmd_buffer = vk_zalloc(
+ &pool->alloc, sizeof(*cmd_buffer), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (cmd_buffer == NULL)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ cmd_buffer->device = device;
+ cmd_buffer->pool = pool;
+ cmd_buffer->level = level;
+
+ if (pool) {
+ list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
+ cmd_buffer->queue_family_index = pool->queue_family_index;
+
+ } else {
+ /* Init the pool_link so we can safely call list_del when we destroy
+ * the command buffer
+ */
+ list_inithead(&cmd_buffer->pool_link);
+ cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
+ }
+
+ *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
+
+ list_inithead(&cmd_buffer->upload.list);
+
+ return VK_SUCCESS;
+}
+
+static void
+tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
+{
+ list_del(&cmd_buffer->pool_link);
+
+ for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
+ free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
+
+ vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
+}
+
+static VkResult
+tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
+{
+ cmd_buffer->record_result = VK_SUCCESS;
+
+ for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
+ cmd_buffer->descriptors[i].dirty = 0;
+ cmd_buffer->descriptors[i].valid = 0;
+ cmd_buffer->descriptors[i].push_dirty = false;
+ }
+
+ cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
+
+ return cmd_buffer->record_result;
+}
+
+VkResult
+tu_AllocateCommandBuffers(VkDevice _device,
+ const VkCommandBufferAllocateInfo *pAllocateInfo,
+ VkCommandBuffer *pCommandBuffers)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
+
+ VkResult result = VK_SUCCESS;
+ uint32_t i;
+
+ for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
+
+ if (!list_empty(&pool->free_cmd_buffers)) {
+ struct tu_cmd_buffer *cmd_buffer = list_first_entry(
+ &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
+
+ list_del(&cmd_buffer->pool_link);
+ list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
+
+ result = tu_reset_cmd_buffer(cmd_buffer);
+ cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ cmd_buffer->level = pAllocateInfo->level;
+
+ pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
+ } else {
+ result = tu_create_cmd_buffer(
+ device, pool, pAllocateInfo->level, &pCommandBuffers[i]);
+ }
+ if (result != VK_SUCCESS)
+ break;
+ }
+
+ if (result != VK_SUCCESS) {
+ tu_FreeCommandBuffers(
+ _device, pAllocateInfo->commandPool, i, pCommandBuffers);
+
+ /* From the Vulkan 1.0.66 spec:
+ *
+ * "vkAllocateCommandBuffers can be used to create multiple
+ * command buffers. If the creation of any of those command
+ * buffers fails, the implementation must destroy all
+ * successfully created command buffer objects from this
+ * command, set all entries of the pCommandBuffers array to
+ * NULL and return the error."
+ */
+ memset(pCommandBuffers,
+ 0,
+ sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
+ }
+
+ return result;
+}
+
+void
+tu_FreeCommandBuffers(VkDevice device,
+ VkCommandPool commandPool,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer *pCommandBuffers)
+{
+ for (uint32_t i = 0; i < commandBufferCount; i++) {
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
+
+ if (cmd_buffer) {
+ if (cmd_buffer->pool) {
+ list_del(&cmd_buffer->pool_link);
+ list_addtail(&cmd_buffer->pool_link,
+ &cmd_buffer->pool->free_cmd_buffers);
+ } else
+ tu_cmd_buffer_destroy(cmd_buffer);
+ }
+ }
+}
+
+VkResult
+tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
+ VkCommandBufferResetFlags flags)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ return tu_reset_cmd_buffer(cmd_buffer);
+}
+
+VkResult
+tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
+ const VkCommandBufferBeginInfo *pBeginInfo)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ VkResult result = VK_SUCCESS;
+
+ if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
+ /* If the command buffer has already been resetted with
+ * vkResetCommandBuffer, no need to do it again.
+ */
+ result = tu_reset_cmd_buffer(cmd_buffer);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
+ cmd_buffer->usage_flags = pBeginInfo->flags;
+
+ /* setup initial configuration into command buffer */
+ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
+ switch (cmd_buffer->queue_family_index) {
+ case TU_QUEUE_GENERAL:
+ /* init */
+ break;
+ default:
+ break;
+ }
+ }
+
+ cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
+
+ return result;
+}
+
+void
+tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
+ uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer *pBuffers,
+ const VkDeviceSize *pOffsets)
+{
+}
+
+void
+tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ VkIndexType indexType)
+{
+}
+
+void
+tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout _layout,
+ uint32_t firstSet,
+ uint32_t descriptorSetCount,
+ const VkDescriptorSet *pDescriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t *pDynamicOffsets)
+{
+}
+
+void
+tu_CmdPushConstants(VkCommandBuffer commandBuffer,
+ VkPipelineLayout layout,
+ VkShaderStageFlags stageFlags,
+ uint32_t offset,
+ uint32_t size,
+ const void *pValues)
+{
+}
+
+VkResult
+tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+
+ cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
+
+ return cmd_buffer->record_result;
+}
+
+void
+tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipeline _pipeline)
+{
+}
+
+void
+tu_CmdSetViewport(VkCommandBuffer commandBuffer,
+ uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkViewport *pViewports)
+{
+}
+
+void
+tu_CmdSetScissor(VkCommandBuffer commandBuffer,
+ uint32_t firstScissor,
+ uint32_t scissorCount,
+ const VkRect2D *pScissors)
+{
+}
+
+void
+tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
+{
+}
+
+void
+tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
+ float depthBiasConstantFactor,
+ float depthBiasClamp,
+ float depthBiasSlopeFactor)
+{
+}
+
+void
+tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
+ const float blendConstants[4])
+{
+}
+
+void
+tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
+ float minDepthBounds,
+ float maxDepthBounds)
+{
+}
+
+void
+tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t compareMask)
+{
+}
+
+void
+tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t writeMask)
+{
+}
+
+void
+tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t reference)
+{
+}
+
+void
+tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer *pCmdBuffers)
+{
+}
+
+VkResult
+tu_CreateCommandPool(VkDevice _device,
+ const VkCommandPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkCommandPool *pCmdPool)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_cmd_pool *pool;
+
+ pool = vk_alloc2(&device->alloc,
+ pAllocator,
+ sizeof(*pool),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (pool == NULL)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ if (pAllocator)
+ pool->alloc = *pAllocator;
+ else
+ pool->alloc = device->alloc;
+
+ list_inithead(&pool->cmd_buffers);
+ list_inithead(&pool->free_cmd_buffers);
+
+ pool->queue_family_index = pCreateInfo->queueFamilyIndex;
+
+ *pCmdPool = tu_cmd_pool_to_handle(pool);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyCommandPool(VkDevice _device,
+ VkCommandPool commandPool,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
+
+ if (!pool)
+ return;
+
+ list_for_each_entry_safe(
+ struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers, pool_link)
+ {
+ tu_cmd_buffer_destroy(cmd_buffer);
+ }
+
+ list_for_each_entry_safe(
+ struct tu_cmd_buffer, cmd_buffer, &pool->free_cmd_buffers, pool_link)
+ {
+ tu_cmd_buffer_destroy(cmd_buffer);
+ }
+
+ vk_free2(&device->alloc, pAllocator, pool);
+}
+
+VkResult
+tu_ResetCommandPool(VkDevice device,
+ VkCommandPool commandPool,
+ VkCommandPoolResetFlags flags)
+{
+ TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
+ VkResult result;
+
+ list_for_each_entry(
+ struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers, pool_link)
+ {
+ result = tu_reset_cmd_buffer(cmd_buffer);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ return VK_SUCCESS;
+}
+
+void
+tu_TrimCommandPool(VkDevice device,
+ VkCommandPool commandPool,
+ VkCommandPoolTrimFlagsKHR flags)
+{
+ TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
+
+ if (!pool)
+ return;
+
+ list_for_each_entry_safe(
+ struct tu_cmd_buffer, cmd_buffer, &pool->free_cmd_buffers, pool_link)
+ {
+ tu_cmd_buffer_destroy(cmd_buffer);
+ }
+}
+
+void
+tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo *pRenderPassBegin,
+ VkSubpassContents contents)
+{
+}
+
+void
+tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo *pRenderPassBeginInfo,
+ const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
+{
+ tu_CmdBeginRenderPass(
+ commandBuffer, pRenderPassBeginInfo, pSubpassBeginInfo->contents);
+}
+
+void
+tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
+{
+}
+
+void
+tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer,
+ const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
+ const VkSubpassEndInfoKHR *pSubpassEndInfo)
+{
+ tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
+}
+
+struct tu_draw_info
+{
+ /**
+ * Number of vertices.
+ */
+ uint32_t count;
+
+ /**
+ * Index of the first vertex.
+ */
+ int32_t vertex_offset;
+
+ /**
+ * First instance id.
+ */
+ uint32_t first_instance;
+
+ /**
+ * Number of instances.
+ */
+ uint32_t instance_count;
+
+ /**
+ * First index (indexed draws only).
+ */
+ uint32_t first_index;
+
+ /**
+ * Whether it's an indexed draw.
+ */
+ bool indexed;
+
+ /**
+ * Indirect draw parameters resource.
+ */
+ struct tu_buffer *indirect;
+ uint64_t indirect_offset;
+ uint32_t stride;
+
+ /**
+ * Draw count parameters resource.
+ */
+ struct tu_buffer *count_buffer;
+ uint64_t count_buffer_offset;
+};
+
+static void
+tu_draw(struct tu_cmd_buffer *cmd_buffer, const struct tu_draw_info *info)
+{
+}
+
+void
+tu_CmdDraw(VkCommandBuffer commandBuffer,
+ uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ struct tu_draw_info info = {};
+
+ info.count = vertexCount;
+ info.instance_count = instanceCount;
+ info.first_instance = firstInstance;
+ info.vertex_offset = firstVertex;
+
+ tu_draw(cmd_buffer, &info);
+}
+
+void
+tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ struct tu_draw_info info = {};
+
+ info.indexed = true;
+ info.count = indexCount;
+ info.instance_count = instanceCount;
+ info.first_index = firstIndex;
+ info.vertex_offset = vertexOffset;
+ info.first_instance = firstInstance;
+
+ tu_draw(cmd_buffer, &info);
+}
+
+void
+tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
+ VkBuffer _buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
+ struct tu_draw_info info = {};
+
+ info.count = drawCount;
+ info.indirect = buffer;
+ info.indirect_offset = offset;
+ info.stride = stride;
+
+ tu_draw(cmd_buffer, &info);
+}
+
+void
+tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
+ VkBuffer _buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
+ struct tu_draw_info info = {};
+
+ info.indexed = true;
+ info.count = drawCount;
+ info.indirect = buffer;
+ info.indirect_offset = offset;
+ info.stride = stride;
+
+ tu_draw(cmd_buffer, &info);
+}
+
+struct tu_dispatch_info
+{
+ /**
+ * Determine the layout of the grid (in block units) to be used.
+ */
+ uint32_t blocks[3];
+
+ /**
+ * A starting offset for the grid. If unaligned is set, the offset
+ * must still be aligned.
+ */
+ uint32_t offsets[3];
+ /**
+ * Whether it's an unaligned compute dispatch.
+ */
+ bool unaligned;
+
+ /**
+ * Indirect compute parameters resource.
+ */
+ struct tu_buffer *indirect;
+ uint64_t indirect_offset;
+};
+
+static void
+tu_dispatch(struct tu_cmd_buffer *cmd_buffer,
+ const struct tu_dispatch_info *info)
+{
+}
+
+void
+tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
+ uint32_t base_x,
+ uint32_t base_y,
+ uint32_t base_z,
+ uint32_t x,
+ uint32_t y,
+ uint32_t z)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ struct tu_dispatch_info info = {};
+
+ info.blocks[0] = x;
+ info.blocks[1] = y;
+ info.blocks[2] = z;
+
+ info.offsets[0] = base_x;
+ info.offsets[1] = base_y;
+ info.offsets[2] = base_z;
+ tu_dispatch(cmd_buffer, &info);
+}
+
+void
+tu_CmdDispatch(VkCommandBuffer commandBuffer,
+ uint32_t x,
+ uint32_t y,
+ uint32_t z)
+{
+ tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
+}
+
+void
+tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
+ VkBuffer _buffer,
+ VkDeviceSize offset)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
+ struct tu_dispatch_info info = {};
+
+ info.indirect = buffer;
+ info.indirect_offset = offset;
+
+ tu_dispatch(cmd_buffer, &info);
+}
+
+void
+tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
+{
+}
+
+void
+tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
+ const VkSubpassEndInfoKHR *pSubpassEndInfo)
+{
+ tu_CmdEndRenderPass(commandBuffer);
+}
+
+struct tu_barrier_info
+{
+ uint32_t eventCount;
+ const VkEvent *pEvents;
+ VkPipelineStageFlags srcStageMask;
+};
+
+static void
+tu_barrier(struct tu_cmd_buffer *cmd_buffer,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier *pImageMemoryBarriers,
+ const struct tu_barrier_info *info)
+{
+}
+
+void
+tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags destStageMask,
+ VkBool32 byRegion,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier *pImageMemoryBarriers)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ struct tu_barrier_info info;
+
+ info.eventCount = 0;
+ info.pEvents = NULL;
+ info.srcStageMask = srcStageMask;
+
+ tu_barrier(cmd_buffer,
+ memoryBarrierCount,
+ pMemoryBarriers,
+ bufferMemoryBarrierCount,
+ pBufferMemoryBarriers,
+ imageMemoryBarrierCount,
+ pImageMemoryBarriers,
+ &info);
+}
+
+static void
+write_event(struct tu_cmd_buffer *cmd_buffer,
+ struct tu_event *event,
+ VkPipelineStageFlags stageMask,
+ unsigned value)
+{
+}
+
+void
+tu_CmdSetEvent(VkCommandBuffer commandBuffer,
+ VkEvent _event,
+ VkPipelineStageFlags stageMask)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ TU_FROM_HANDLE(tu_event, event, _event);
+
+ write_event(cmd_buffer, event, stageMask, 1);
+}
+
+void
+tu_CmdResetEvent(VkCommandBuffer commandBuffer,
+ VkEvent _event,
+ VkPipelineStageFlags stageMask)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ TU_FROM_HANDLE(tu_event, event, _event);
+
+ write_event(cmd_buffer, event, stageMask, 0);
+}
+
+void
+tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
+ uint32_t eventCount,
+ const VkEvent *pEvents,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier *pImageMemoryBarriers)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ struct tu_barrier_info info;
+
+ info.eventCount = eventCount;
+ info.pEvents = pEvents;
+ info.srcStageMask = 0;
+
+ tu_barrier(cmd_buffer,
+ memoryBarrierCount,
+ pMemoryBarriers,
+ bufferMemoryBarrierCount,
+ pBufferMemoryBarriers,
+ imageMemoryBarrierCount,
+ pImageMemoryBarriers,
+ &info);
+}
+
+void
+tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
+{
+ /* No-op */
+}
diff --git a/src/freedreno/vulkan/tu_descriptor_set.c b/src/freedreno/vulkan/tu_descriptor_set.c
new file mode 100644
index 00000000000..a9dd6df1727
--- /dev/null
+++ b/src/freedreno/vulkan/tu_descriptor_set.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright © 2016 Red Hat.
+ * Copyright © 2016 Bas Nieuwenhuizen
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <assert.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "tu_private.h"
+#include "util/mesa-sha1.h"
+#include "vk_util.h"
+
+static int
+binding_compare(const void *av, const void *bv)
+{
+ const VkDescriptorSetLayoutBinding *a =
+ (const VkDescriptorSetLayoutBinding *)av;
+ const VkDescriptorSetLayoutBinding *b =
+ (const VkDescriptorSetLayoutBinding *)bv;
+
+ return (a->binding < b->binding) ? -1 : (a->binding > b->binding) ? 1 : 0;
+}
+
+static VkDescriptorSetLayoutBinding *
+create_sorted_bindings(const VkDescriptorSetLayoutBinding *bindings,
+ unsigned count)
+{
+ VkDescriptorSetLayoutBinding *sorted_bindings =
+ malloc(count * sizeof(VkDescriptorSetLayoutBinding));
+ if (!sorted_bindings)
+ return NULL;
+
+ memcpy(
+ sorted_bindings, bindings, count * sizeof(VkDescriptorSetLayoutBinding));
+
+ qsort(sorted_bindings,
+ count,
+ sizeof(VkDescriptorSetLayoutBinding),
+ binding_compare);
+
+ return sorted_bindings;
+}
+
+VkResult
+tu_CreateDescriptorSetLayout(
+ VkDevice _device,
+ const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDescriptorSetLayout *pSetLayout)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_descriptor_set_layout *set_layout;
+
+ assert(pCreateInfo->sType ==
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
+ const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT *variable_flags =
+ vk_find_struct_const(pCreateInfo->pNext,
+ DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT);
+
+ uint32_t max_binding = 0;
+ uint32_t immutable_sampler_count = 0;
+ for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
+ max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
+ if (pCreateInfo->pBindings[j].pImmutableSamplers)
+ immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
+ }
+
+ uint32_t samplers_offset =
+ sizeof(struct tu_descriptor_set_layout) +
+ (max_binding + 1) * sizeof(set_layout->binding[0]);
+ size_t size =
+ samplers_offset + immutable_sampler_count * 4 * sizeof(uint32_t);
+
+ set_layout = vk_alloc2(
+ &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!set_layout)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ set_layout->flags = pCreateInfo->flags;
+
+ /* We just allocate all the samplers at the end of the struct */
+ uint32_t *samplers = (uint32_t *)&set_layout->binding[max_binding + 1];
+
+ VkDescriptorSetLayoutBinding *bindings =
+ create_sorted_bindings(pCreateInfo->pBindings, pCreateInfo->bindingCount);
+ if (!bindings) {
+ vk_free2(&device->alloc, pAllocator, set_layout);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ set_layout->binding_count = max_binding + 1;
+ set_layout->shader_stages = 0;
+ set_layout->dynamic_shader_stages = 0;
+ set_layout->has_immutable_samplers = false;
+ set_layout->size = 0;
+
+ memset(
+ set_layout->binding, 0, size - sizeof(struct tu_descriptor_set_layout));
+
+ uint32_t buffer_count = 0;
+ uint32_t dynamic_offset_count = 0;
+
+ for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
+ const VkDescriptorSetLayoutBinding *binding = bindings + j;
+ uint32_t b = binding->binding;
+ uint32_t alignment;
+ unsigned binding_buffer_count = 0;
+
+ switch (binding->descriptorType) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ assert(!(pCreateInfo->flags &
+ VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
+ set_layout->binding[b].dynamic_offset_count = 1;
+ set_layout->dynamic_shader_stages |= binding->stageFlags;
+ set_layout->binding[b].size = 0;
+ binding_buffer_count = 1;
+ alignment = 1;
+ break;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ set_layout->binding[b].size = 16;
+ binding_buffer_count = 1;
+ alignment = 16;
+ break;
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ /* main descriptor + fmask descriptor */
+ set_layout->binding[b].size = 64;
+ binding_buffer_count = 1;
+ alignment = 32;
+ break;
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ /* main descriptor + fmask descriptor + sampler */
+ set_layout->binding[b].size = 96;
+ binding_buffer_count = 1;
+ alignment = 32;
+ break;
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ set_layout->binding[b].size = 16;
+ alignment = 16;
+ break;
+ default:
+ unreachable("unknown descriptor type\n");
+ break;
+ }
+
+ set_layout->size = align(set_layout->size, alignment);
+ set_layout->binding[b].type = binding->descriptorType;
+ set_layout->binding[b].array_size = binding->descriptorCount;
+ set_layout->binding[b].offset = set_layout->size;
+ set_layout->binding[b].buffer_offset = buffer_count;
+ set_layout->binding[b].dynamic_offset_offset = dynamic_offset_count;
+
+ if (variable_flags && binding->binding < variable_flags->bindingCount &&
+ (variable_flags->pBindingFlags[binding->binding] &
+ VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
+ assert(!binding->pImmutableSamplers); /* Terribly ill defined how many
+ samplers are valid */
+ assert(binding->binding == max_binding);
+
+ set_layout->has_variable_descriptors = true;
+ }
+
+ if (binding->pImmutableSamplers) {
+ set_layout->binding[b].immutable_samplers_offset = samplers_offset;
+ set_layout->has_immutable_samplers = true;
+ }
+
+ set_layout->size +=
+ binding->descriptorCount * set_layout->binding[b].size;
+ buffer_count += binding->descriptorCount * binding_buffer_count;
+ dynamic_offset_count +=
+ binding->descriptorCount * set_layout->binding[b].dynamic_offset_count;
+ set_layout->shader_stages |= binding->stageFlags;
+ }
+
+ free(bindings);
+
+ set_layout->buffer_count = buffer_count;
+ set_layout->dynamic_offset_count = dynamic_offset_count;
+
+ *pSetLayout = tu_descriptor_set_layout_to_handle(set_layout);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyDescriptorSetLayout(VkDevice _device,
+ VkDescriptorSetLayout _set_layout,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_descriptor_set_layout, set_layout, _set_layout);
+
+ if (!set_layout)
+ return;
+
+ vk_free2(&device->alloc, pAllocator, set_layout);
+}
+
+void
+tu_GetDescriptorSetLayoutSupport(
+ VkDevice device,
+ const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
+ VkDescriptorSetLayoutSupport *pSupport)
+{
+ VkDescriptorSetLayoutBinding *bindings =
+ create_sorted_bindings(pCreateInfo->pBindings, pCreateInfo->bindingCount);
+ if (!bindings) {
+ pSupport->supported = false;
+ return;
+ }
+
+ const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT *variable_flags =
+ vk_find_struct_const(pCreateInfo->pNext,
+ DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT);
+ VkDescriptorSetVariableDescriptorCountLayoutSupportEXT *variable_count =
+ vk_find_struct(
+ (void *)pCreateInfo->pNext,
+ DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT);
+ if (variable_count) {
+ variable_count->maxVariableDescriptorCount = 0;
+ }
+
+ bool supported = true;
+ uint64_t size = 0;
+ for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
+ const VkDescriptorSetLayoutBinding *binding = bindings + i;
+
+ uint64_t descriptor_size = 0;
+ uint64_t descriptor_alignment = 1;
+ switch (binding->descriptorType) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ break;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ descriptor_size = 16;
+ descriptor_alignment = 16;
+ break;
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ descriptor_size = 64;
+ descriptor_alignment = 32;
+ break;
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ descriptor_size = 96;
+ descriptor_alignment = 32;
+ break;
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ descriptor_size = 16;
+ descriptor_alignment = 16;
+ break;
+ default:
+ unreachable("unknown descriptor type\n");
+ break;
+ }
+
+ if (size && !align_u64(size, descriptor_alignment)) {
+ supported = false;
+ }
+ size = align_u64(size, descriptor_alignment);
+
+ uint64_t max_count = UINT64_MAX;
+ if (descriptor_size)
+ max_count = (UINT64_MAX - size) / descriptor_size;
+
+ if (max_count < binding->descriptorCount) {
+ supported = false;
+ }
+ if (variable_flags && binding->binding < variable_flags->bindingCount &&
+ variable_count &&
+ (variable_flags->pBindingFlags[binding->binding] &
+ VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
+ variable_count->maxVariableDescriptorCount =
+ MIN2(UINT32_MAX, max_count);
+ }
+ size += binding->descriptorCount * descriptor_size;
+ }
+
+ free(bindings);
+
+ pSupport->supported = supported;
+}
+
+/*
+ * Pipeline layouts. These have nothing to do with the pipeline. They are
+ * just multiple descriptor set layouts pasted together.
+ */
+
+VkResult
+tu_CreatePipelineLayout(VkDevice _device,
+ const VkPipelineLayoutCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkPipelineLayout *pPipelineLayout)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_pipeline_layout *layout;
+ struct mesa_sha1 ctx;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
+
+ layout = vk_alloc2(&device->alloc,
+ pAllocator,
+ sizeof(*layout),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (layout == NULL)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ layout->num_sets = pCreateInfo->setLayoutCount;
+
+ unsigned dynamic_offset_count = 0;
+
+ _mesa_sha1_init(&ctx);
+ for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
+ TU_FROM_HANDLE(
+ tu_descriptor_set_layout, set_layout, pCreateInfo->pSetLayouts[set]);
+ layout->set[set].layout = set_layout;
+
+ layout->set[set].dynamic_offset_start = dynamic_offset_count;
+ for (uint32_t b = 0; b < set_layout->binding_count; b++) {
+ dynamic_offset_count += set_layout->binding[b].array_size *
+ set_layout->binding[b].dynamic_offset_count;
+ if (set_layout->binding[b].immutable_samplers_offset)
+ _mesa_sha1_update(
+ &ctx,
+ tu_immutable_samplers(set_layout, set_layout->binding + b),
+ set_layout->binding[b].array_size * 4 * sizeof(uint32_t));
+ }
+ _mesa_sha1_update(&ctx,
+ set_layout->binding,
+ sizeof(set_layout->binding[0]) *
+ set_layout->binding_count);
+ }
+
+ layout->dynamic_offset_count = dynamic_offset_count;
+ layout->push_constant_size = 0;
+
+ for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
+ const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
+ layout->push_constant_size =
+ MAX2(layout->push_constant_size, range->offset + range->size);
+ }
+
+ layout->push_constant_size = align(layout->push_constant_size, 16);
+ _mesa_sha1_update(
+ &ctx, &layout->push_constant_size, sizeof(layout->push_constant_size));
+ _mesa_sha1_final(&ctx, layout->sha1);
+ *pPipelineLayout = tu_pipeline_layout_to_handle(layout);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyPipelineLayout(VkDevice _device,
+ VkPipelineLayout _pipelineLayout,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_pipeline_layout, pipeline_layout, _pipelineLayout);
+
+ if (!pipeline_layout)
+ return;
+ vk_free2(&device->alloc, pAllocator, pipeline_layout);
+}
+
+#define EMPTY 1
+
+VkResult
+tu_CreateDescriptorPool(VkDevice _device,
+ const VkDescriptorPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDescriptorPool *pDescriptorPool)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_descriptor_pool *pool;
+
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyDescriptorPool(VkDevice _device,
+ VkDescriptorPool _pool,
+ const VkAllocationCallbacks *pAllocator)
+{
+}
+
+VkResult
+tu_ResetDescriptorPool(VkDevice _device,
+ VkDescriptorPool descriptorPool,
+ VkDescriptorPoolResetFlags flags)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_descriptor_pool, pool, descriptorPool);
+
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_AllocateDescriptorSets(VkDevice _device,
+ const VkDescriptorSetAllocateInfo *pAllocateInfo,
+ VkDescriptorSet *pDescriptorSets)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_descriptor_pool, pool, pAllocateInfo->descriptorPool);
+
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_FreeDescriptorSets(VkDevice _device,
+ VkDescriptorPool descriptorPool,
+ uint32_t count,
+ const VkDescriptorSet *pDescriptorSets)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_descriptor_pool, pool, descriptorPool);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_update_descriptor_sets(struct tu_device *device,
+ struct tu_cmd_buffer *cmd_buffer,
+ VkDescriptorSet dstSetOverride,
+ uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet *pDescriptorWrites,
+ uint32_t descriptorCopyCount,
+ const VkCopyDescriptorSet *pDescriptorCopies)
+{
+}
+
+void
+tu_UpdateDescriptorSets(VkDevice _device,
+ uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet *pDescriptorWrites,
+ uint32_t descriptorCopyCount,
+ const VkCopyDescriptorSet *pDescriptorCopies)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+
+ tu_update_descriptor_sets(device,
+ NULL,
+ VK_NULL_HANDLE,
+ descriptorWriteCount,
+ pDescriptorWrites,
+ descriptorCopyCount,
+ pDescriptorCopies);
+}
+
+VkResult
+tu_CreateDescriptorUpdateTemplate(
+ VkDevice _device,
+ const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(
+ tu_descriptor_set_layout, set_layout, pCreateInfo->descriptorSetLayout);
+ const uint32_t entry_count = pCreateInfo->descriptorUpdateEntryCount;
+ const size_t size =
+ sizeof(struct tu_descriptor_update_template) +
+ sizeof(struct tu_descriptor_update_template_entry) * entry_count;
+ struct tu_descriptor_update_template *templ;
+ uint32_t i;
+
+ templ = vk_alloc2(
+ &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!templ)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ *pDescriptorUpdateTemplate = tu_descriptor_update_template_to_handle(templ);
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyDescriptorUpdateTemplate(
+ VkDevice _device,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(
+ tu_descriptor_update_template, templ, descriptorUpdateTemplate);
+
+ if (!templ)
+ return;
+
+ vk_free2(&device->alloc, pAllocator, templ);
+}
+
+void
+tu_update_descriptor_set_with_template(
+ struct tu_device *device,
+ struct tu_cmd_buffer *cmd_buffer,
+ struct tu_descriptor_set *set,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const void *pData)
+{
+ TU_FROM_HANDLE(
+ tu_descriptor_update_template, templ, descriptorUpdateTemplate);
+}
+
+void
+tu_UpdateDescriptorSetWithTemplate(
+ VkDevice _device,
+ VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const void *pData)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_descriptor_set, set, descriptorSet);
+
+ tu_update_descriptor_set_with_template(
+ device, NULL, set, descriptorUpdateTemplate, pData);
+}
+
+VkResult
+tu_CreateSamplerYcbcrConversion(
+ VkDevice device,
+ const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSamplerYcbcrConversion *pYcbcrConversion)
+{
+ *pYcbcrConversion = VK_NULL_HANDLE;
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroySamplerYcbcrConversion(VkDevice device,
+ VkSamplerYcbcrConversion ycbcrConversion,
+ const VkAllocationCallbacks *pAllocator)
+{
+ /* Do nothing. */
+}
diff --git a/src/freedreno/vulkan/tu_descriptor_set.h b/src/freedreno/vulkan/tu_descriptor_set.h
new file mode 100644
index 00000000000..087eb3ddc20
--- /dev/null
+++ b/src/freedreno/vulkan/tu_descriptor_set.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright © 2016 Bas Nieuwenhuizen
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef TU_DESCRIPTOR_SET_H
+#define TU_DESCRIPTOR_SET_H
+
+#include <vulkan/vulkan.h>
+
+#define MAX_SETS 32
+
+struct tu_descriptor_set_binding_layout
+{
+ VkDescriptorType type;
+
+ /* Number of array elements in this binding */
+ uint32_t array_size;
+
+ uint32_t offset;
+ uint32_t buffer_offset;
+ uint16_t dynamic_offset_offset;
+
+ uint16_t dynamic_offset_count;
+ /* redundant with the type, each for a single array element */
+ uint32_t size;
+
+ /* Offset in the tu_descriptor_set_layout of the immutable samplers, or 0
+ * if there are no immutable samplers. */
+ uint32_t immutable_samplers_offset;
+};
+
+struct tu_descriptor_set_layout
+{
+ /* The create flags for this descriptor set layout */
+ VkDescriptorSetLayoutCreateFlags flags;
+
+ /* Number of bindings in this descriptor set */
+ uint32_t binding_count;
+
+ /* Total size of the descriptor set with room for all array entries */
+ uint32_t size;
+
+ /* Shader stages affected by this descriptor set */
+ uint16_t shader_stages;
+ uint16_t dynamic_shader_stages;
+
+ /* Number of buffers in this descriptor set */
+ uint32_t buffer_count;
+
+ /* Number of dynamic offsets used by this descriptor set */
+ uint16_t dynamic_offset_count;
+
+ bool has_immutable_samplers;
+ bool has_variable_descriptors;
+
+ /* Bindings in this descriptor set */
+ struct tu_descriptor_set_binding_layout binding[0];
+};
+
+struct tu_pipeline_layout
+{
+ struct
+ {
+ struct tu_descriptor_set_layout *layout;
+ uint32_t size;
+ uint32_t dynamic_offset_start;
+ } set[MAX_SETS];
+
+ uint32_t num_sets;
+ uint32_t push_constant_size;
+ uint32_t dynamic_offset_count;
+
+ unsigned char sha1[20];
+};
+
+static inline const uint32_t *
+tu_immutable_samplers(const struct tu_descriptor_set_layout *set,
+ const struct tu_descriptor_set_binding_layout *binding)
+{
+ return (const uint32_t *)((const char *)set +
+ binding->immutable_samplers_offset);
+}
+#endif /* TU_DESCRIPTOR_SET_H */
diff --git a/src/freedreno/vulkan/tu_device.c b/src/freedreno/vulkan/tu_device.c
new file mode 100644
index 00000000000..4b86bd507fa
--- /dev/null
+++ b/src/freedreno/vulkan/tu_device.c
@@ -0,0 +1,1839 @@
+/*
+ * Copyright © 2016 Red Hat.
+ * Copyright © 2016 Bas Nieuwenhuizen
+ *
+ * based in part on anv driver which is:
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "tu_private.h"
+#include "util/debug.h"
+#include "util/disk_cache.h"
+#include "util/strtod.h"
+#include "vk_format.h"
+#include "vk_util.h"
+#include <fcntl.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <xf86drm.h>
+
+static int
+tu_device_get_cache_uuid(uint16_t family, void *uuid)
+{
+ uint32_t mesa_timestamp;
+ uint16_t f = family;
+ memset(uuid, 0, VK_UUID_SIZE);
+ if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
+ &mesa_timestamp))
+ return -1;
+
+ memcpy(uuid, &mesa_timestamp, 4);
+ memcpy((char *)uuid + 4, &f, 2);
+ snprintf((char *)uuid + 6, VK_UUID_SIZE - 10, "tu");
+ return 0;
+}
+
+static void
+tu_get_driver_uuid(void *uuid)
+{
+ memset(uuid, 0, VK_UUID_SIZE);
+}
+
+static void
+tu_get_device_uuid(void *uuid)
+{
+ stub();
+}
+
+static VkResult
+tu_physical_device_init(struct tu_physical_device *device,
+ struct tu_instance *instance,
+ drmDevicePtr drm_device)
+{
+ const char *path = drm_device->nodes[DRM_NODE_RENDER];
+ VkResult result;
+ drmVersionPtr version;
+ int fd;
+ int master_fd = -1;
+
+ fd = open(path, O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ if (instance->debug_flags & TU_DEBUG_STARTUP)
+ tu_logi("Could not open device '%s'", path);
+
+ return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
+ }
+
+ version = drmGetVersion(fd);
+ if (!version) {
+ close(fd);
+
+ if (instance->debug_flags & TU_DEBUG_STARTUP)
+ tu_logi("Could not get the kernel driver version for device '%s'",
+ path);
+
+ return vk_errorf(instance,
+ VK_ERROR_INCOMPATIBLE_DRIVER,
+ "failed to get version %s: %m",
+ path);
+ }
+
+ if (strcmp(version->name, "amdgpu")) {
+ drmFreeVersion(version);
+ if (master_fd != -1)
+ close(master_fd);
+ close(fd);
+
+ if (instance->debug_flags & TU_DEBUG_STARTUP)
+ tu_logi("Device '%s' is not using the amdgpu kernel driver.", path);
+
+ return VK_ERROR_INCOMPATIBLE_DRIVER;
+ }
+ drmFreeVersion(version);
+
+ if (instance->debug_flags & TU_DEBUG_STARTUP)
+ tu_logi("Found compatible device '%s'.", path);
+
+ device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ device->instance = instance;
+ assert(strlen(path) < ARRAY_SIZE(device->path));
+ strncpy(device->path, path, ARRAY_SIZE(device->path));
+
+ if (instance->enabled_extensions.KHR_display) {
+ master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
+ if (master_fd >= 0) {
+ /* TODO: free master_fd is accel is not working? */
+ abort();
+ }
+ }
+
+ device->master_fd = master_fd;
+ device->local_fd = fd;
+
+ if (tu_device_get_cache_uuid(0 /* TODO */, device->cache_uuid)) {
+ result = vk_errorf(
+ instance, VK_ERROR_INITIALIZATION_FAILED, "cannot generate UUID");
+ goto fail;
+ }
+
+ /* The gpu id is already embedded in the uuid so we just pass "tu"
+ * when creating the cache.
+ */
+ char buf[VK_UUID_SIZE * 2 + 1];
+ disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
+ device->disk_cache = disk_cache_create(device->name, buf, 0);
+
+ fprintf(stderr,
+ "WARNING: tu is not a conformant vulkan implementation, "
+ "testing use only.\n");
+
+ tu_get_driver_uuid(&device->device_uuid);
+ tu_get_device_uuid(&device->device_uuid);
+
+ tu_fill_device_extension_table(device, &device->supported_extensions);
+
+ if (result != VK_SUCCESS) {
+ vk_error(instance, result);
+ goto fail;
+ }
+
+ return VK_SUCCESS;
+
+fail:
+ close(fd);
+ if (master_fd != -1)
+ close(master_fd);
+ return result;
+}
+
+static void
+tu_physical_device_finish(struct tu_physical_device *device)
+{
+ disk_cache_destroy(device->disk_cache);
+ close(device->local_fd);
+ if (device->master_fd != -1)
+ close(device->master_fd);
+}
+
+static void *
+default_alloc_func(void *pUserData,
+ size_t size,
+ size_t align,
+ VkSystemAllocationScope allocationScope)
+{
+ return malloc(size);
+}
+
+static void *
+default_realloc_func(void *pUserData,
+ void *pOriginal,
+ size_t size,
+ size_t align,
+ VkSystemAllocationScope allocationScope)
+{
+ return realloc(pOriginal, size);
+}
+
+static void
+default_free_func(void *pUserData, void *pMemory)
+{
+ free(pMemory);
+}
+
+static const VkAllocationCallbacks default_alloc = {
+ .pUserData = NULL,
+ .pfnAllocation = default_alloc_func,
+ .pfnReallocation = default_realloc_func,
+ .pfnFree = default_free_func,
+};
+
+static const struct debug_control tu_debug_options[] = { { "startup",
+ TU_DEBUG_STARTUP },
+ { NULL, 0 } };
+
+const char *
+tu_get_debug_option_name(int id)
+{
+ assert(id < ARRAY_SIZE(tu_debug_options) - 1);
+ return tu_debug_options[id].string;
+}
+
+static int
+tu_get_instance_extension_index(const char *name)
+{
+ for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
+ if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
+ return i;
+ }
+ return -1;
+}
+
+VkResult
+tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkInstance *pInstance)
+{
+ struct tu_instance *instance;
+ VkResult result;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
+
+ uint32_t client_version;
+ if (pCreateInfo->pApplicationInfo &&
+ pCreateInfo->pApplicationInfo->apiVersion != 0) {
+ client_version = pCreateInfo->pApplicationInfo->apiVersion;
+ } else {
+ tu_EnumerateInstanceVersion(&client_version);
+ }
+
+ instance = vk_zalloc2(&default_alloc,
+ pAllocator,
+ sizeof(*instance),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ if (!instance)
+ return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+
+ if (pAllocator)
+ instance->alloc = *pAllocator;
+ else
+ instance->alloc = default_alloc;
+
+ instance->api_version = client_version;
+ instance->physical_device_count = -1;
+
+ instance->debug_flags =
+ parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
+
+ if (instance->debug_flags & TU_DEBUG_STARTUP)
+ tu_logi("Created an instance");
+
+ for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
+ const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
+ int index = tu_get_instance_extension_index(ext_name);
+
+ if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
+ vk_free2(&default_alloc, pAllocator, instance);
+ return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
+ }
+
+ instance->enabled_extensions.extensions[index] = true;
+ }
+
+ result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
+ if (result != VK_SUCCESS) {
+ vk_free2(&default_alloc, pAllocator, instance);
+ return vk_error(instance, result);
+ }
+
+ _mesa_locale_init();
+
+ VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
+
+ *pInstance = tu_instance_to_handle(instance);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyInstance(VkInstance _instance,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_instance, instance, _instance);
+
+ if (!instance)
+ return;
+
+ for (int i = 0; i < instance->physical_device_count; ++i) {
+ tu_physical_device_finish(instance->physical_devices + i);
+ }
+
+ VG(VALGRIND_DESTROY_MEMPOOL(instance));
+
+ _mesa_locale_fini();
+
+ vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
+
+ vk_free(&instance->alloc, instance);
+}
+
+static VkResult
+tu_enumerate_devices(struct tu_instance *instance)
+{
+ /* TODO: Check for more devices ? */
+ drmDevicePtr devices[8];
+ VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
+ int max_devices;
+
+ instance->physical_device_count = 0;
+
+ max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
+
+ if (instance->debug_flags & TU_DEBUG_STARTUP)
+ tu_logi("Found %d drm nodes", max_devices);
+
+ if (max_devices < 1)
+ return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
+
+ for (unsigned i = 0; i < (unsigned)max_devices; i++) {
+ if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
+ devices[i]->bustype == DRM_BUS_PCI &&
+ devices[i]->deviceinfo.pci->vendor_id == 0) {
+
+ result = tu_physical_device_init(instance->physical_devices +
+ instance->physical_device_count,
+ instance,
+ devices[i]);
+ if (result == VK_SUCCESS)
+ ++instance->physical_device_count;
+ else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
+ break;
+ }
+ }
+ drmFreeDevices(devices, max_devices);
+
+ return result;
+}
+
+VkResult
+tu_EnumeratePhysicalDevices(VkInstance _instance,
+ uint32_t *pPhysicalDeviceCount,
+ VkPhysicalDevice *pPhysicalDevices)
+{
+ TU_FROM_HANDLE(tu_instance, instance, _instance);
+ VkResult result;
+
+ if (instance->physical_device_count < 0) {
+ result = tu_enumerate_devices(instance);
+ if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
+ return result;
+ }
+
+ if (!pPhysicalDevices) {
+ *pPhysicalDeviceCount = instance->physical_device_count;
+ } else {
+ *pPhysicalDeviceCount =
+ MIN2(*pPhysicalDeviceCount, instance->physical_device_count);
+ for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i)
+ pPhysicalDevices[i] =
+ tu_physical_device_to_handle(instance->physical_devices + i);
+ }
+
+ return *pPhysicalDeviceCount < instance->physical_device_count
+ ? VK_INCOMPLETE
+ : VK_SUCCESS;
+}
+
+VkResult
+tu_EnumeratePhysicalDeviceGroups(
+ VkInstance _instance,
+ uint32_t *pPhysicalDeviceGroupCount,
+ VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
+{
+ TU_FROM_HANDLE(tu_instance, instance, _instance);
+ VkResult result;
+
+ if (instance->physical_device_count < 0) {
+ result = tu_enumerate_devices(instance);
+ if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
+ return result;
+ }
+
+ if (!pPhysicalDeviceGroupProperties) {
+ *pPhysicalDeviceGroupCount = instance->physical_device_count;
+ } else {
+ *pPhysicalDeviceGroupCount =
+ MIN2(*pPhysicalDeviceGroupCount, instance->physical_device_count);
+ for (unsigned i = 0; i < *pPhysicalDeviceGroupCount; ++i) {
+ pPhysicalDeviceGroupProperties[i].physicalDeviceCount = 1;
+ pPhysicalDeviceGroupProperties[i].physicalDevices[0] =
+ tu_physical_device_to_handle(instance->physical_devices + i);
+ pPhysicalDeviceGroupProperties[i].subsetAllocation = false;
+ }
+ }
+ return *pPhysicalDeviceGroupCount < instance->physical_device_count
+ ? VK_INCOMPLETE
+ : VK_SUCCESS;
+}
+
+void
+tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceFeatures *pFeatures)
+{
+ memset(pFeatures, 0, sizeof(*pFeatures));
+
+ *pFeatures = (VkPhysicalDeviceFeatures){
+ .robustBufferAccess = false,
+ .fullDrawIndexUint32 = false,
+ .imageCubeArray = false,
+ .independentBlend = false,
+ .geometryShader = false,
+ .tessellationShader = false,
+ .sampleRateShading = false,
+ .dualSrcBlend = false,
+ .logicOp = false,
+ .multiDrawIndirect = false,
+ .drawIndirectFirstInstance = false,
+ .depthClamp = false,
+ .depthBiasClamp = false,
+ .fillModeNonSolid = false,
+ .depthBounds = false,
+ .wideLines = false,
+ .largePoints = false,
+ .alphaToOne = false,
+ .multiViewport = false,
+ .samplerAnisotropy = false,
+ .textureCompressionETC2 = false,
+ .textureCompressionASTC_LDR = false,
+ .textureCompressionBC = false,
+ .occlusionQueryPrecise = false,
+ .pipelineStatisticsQuery = false,
+ .vertexPipelineStoresAndAtomics = false,
+ .fragmentStoresAndAtomics = false,
+ .shaderTessellationAndGeometryPointSize = false,
+ .shaderImageGatherExtended = false,
+ .shaderStorageImageExtendedFormats = false,
+ .shaderStorageImageMultisample = false,
+ .shaderUniformBufferArrayDynamicIndexing = false,
+ .shaderSampledImageArrayDynamicIndexing = false,
+ .shaderStorageBufferArrayDynamicIndexing = false,
+ .shaderStorageImageArrayDynamicIndexing = false,
+ .shaderStorageImageReadWithoutFormat = false,
+ .shaderStorageImageWriteWithoutFormat = false,
+ .shaderClipDistance = false,
+ .shaderCullDistance = false,
+ .shaderFloat64 = false,
+ .shaderInt64 = false,
+ .shaderInt16 = false,
+ .sparseBinding = false,
+ .variableMultisampleRate = false,
+ .inheritedQueries = false,
+ };
+}
+
+void
+tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceFeatures2KHR *pFeatures)
+{
+ vk_foreach_struct(ext, pFeatures->pNext)
+ {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
+ VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
+ features->variablePointersStorageBuffer = true;
+ features->variablePointers = false;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
+ VkPhysicalDeviceMultiviewFeaturesKHR *features =
+ (VkPhysicalDeviceMultiviewFeaturesKHR *)ext;
+ features->multiview = true;
+ features->multiviewGeometryShader = true;
+ features->multiviewTessellationShader = true;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
+ VkPhysicalDeviceShaderDrawParameterFeatures *features =
+ (VkPhysicalDeviceShaderDrawParameterFeatures *)ext;
+ features->shaderDrawParameters = true;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
+ VkPhysicalDeviceProtectedMemoryFeatures *features =
+ (VkPhysicalDeviceProtectedMemoryFeatures *)ext;
+ features->protectedMemory = false;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
+ VkPhysicalDevice16BitStorageFeatures *features =
+ (VkPhysicalDevice16BitStorageFeatures *)ext;
+ features->storageBuffer16BitAccess = false;
+ features->uniformAndStorageBuffer16BitAccess = false;
+ features->storagePushConstant16 = false;
+ features->storageInputOutput16 = false;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
+ VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
+ (VkPhysicalDeviceSamplerYcbcrConversionFeatures *)ext;
+ features->samplerYcbcrConversion = false;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
+ VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
+ (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
+ features->shaderInputAttachmentArrayDynamicIndexing = true;
+ features->shaderUniformTexelBufferArrayDynamicIndexing = true;
+ features->shaderStorageTexelBufferArrayDynamicIndexing = true;
+ features->shaderUniformBufferArrayNonUniformIndexing = false;
+ features->shaderSampledImageArrayNonUniformIndexing = false;
+ features->shaderStorageBufferArrayNonUniformIndexing = false;
+ features->shaderStorageImageArrayNonUniformIndexing = false;
+ features->shaderInputAttachmentArrayNonUniformIndexing = false;
+ features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
+ features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
+ features->descriptorBindingUniformBufferUpdateAfterBind = true;
+ features->descriptorBindingSampledImageUpdateAfterBind = true;
+ features->descriptorBindingStorageImageUpdateAfterBind = true;
+ features->descriptorBindingStorageBufferUpdateAfterBind = true;
+ features->descriptorBindingUniformTexelBufferUpdateAfterBind = true;
+ features->descriptorBindingStorageTexelBufferUpdateAfterBind = true;
+ features->descriptorBindingUpdateUnusedWhilePending = true;
+ features->descriptorBindingPartiallyBound = true;
+ features->descriptorBindingVariableDescriptorCount = true;
+ features->runtimeDescriptorArray = true;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
+ VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
+ (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)ext;
+ features->conditionalRendering = true;
+ features->inheritedConditionalRendering = false;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
+}
+
+void
+tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceProperties *pProperties)
+{
+ TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
+ VkSampleCountFlags sample_counts = 0xf;
+
+ /* make sure that the entire descriptor set is addressable with a signed
+ * 32-bit int. So the sum of all limits scaled by descriptor size has to
+ * be at most 2 GiB. the combined image & samples object count as one of
+ * both. This limit is for the pipeline layout, not for the set layout, but
+ * there is no set limit, so we just set a pipeline limit. I don't think
+ * any app is going to hit this soon. */
+ size_t max_descriptor_set_size =
+ ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
+ (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
+ 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
+ 32 /* sampler, largest when combined with image */ +
+ 64 /* sampled image */ + 64 /* storage image */);
+
+ VkPhysicalDeviceLimits limits = {
+ .maxImageDimension1D = (1 << 14),
+ .maxImageDimension2D = (1 << 14),
+ .maxImageDimension3D = (1 << 11),
+ .maxImageDimensionCube = (1 << 14),
+ .maxImageArrayLayers = (1 << 11),
+ .maxTexelBufferElements = 128 * 1024 * 1024,
+ .maxUniformBufferRange = UINT32_MAX,
+ .maxStorageBufferRange = UINT32_MAX,
+ .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
+ .maxMemoryAllocationCount = UINT32_MAX,
+ .maxSamplerAllocationCount = 64 * 1024,
+ .bufferImageGranularity = 64, /* A cache line */
+ .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
+ .maxBoundDescriptorSets = MAX_SETS,
+ .maxPerStageDescriptorSamplers = max_descriptor_set_size,
+ .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
+ .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
+ .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
+ .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
+ .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
+ .maxPerStageResources = max_descriptor_set_size,
+ .maxDescriptorSetSamplers = max_descriptor_set_size,
+ .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
+ .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
+ .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
+ .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
+ .maxDescriptorSetSampledImages = max_descriptor_set_size,
+ .maxDescriptorSetStorageImages = max_descriptor_set_size,
+ .maxDescriptorSetInputAttachments = max_descriptor_set_size,
+ .maxVertexInputAttributes = 32,
+ .maxVertexInputBindings = 32,
+ .maxVertexInputAttributeOffset = 2047,
+ .maxVertexInputBindingStride = 2048,
+ .maxVertexOutputComponents = 128,
+ .maxTessellationGenerationLevel = 64,
+ .maxTessellationPatchSize = 32,
+ .maxTessellationControlPerVertexInputComponents = 128,
+ .maxTessellationControlPerVertexOutputComponents = 128,
+ .maxTessellationControlPerPatchOutputComponents = 120,
+ .maxTessellationControlTotalOutputComponents = 4096,
+ .maxTessellationEvaluationInputComponents = 128,
+ .maxTessellationEvaluationOutputComponents = 128,
+ .maxGeometryShaderInvocations = 127,
+ .maxGeometryInputComponents = 64,
+ .maxGeometryOutputComponents = 128,
+ .maxGeometryOutputVertices = 256,
+ .maxGeometryTotalOutputComponents = 1024,
+ .maxFragmentInputComponents = 128,
+ .maxFragmentOutputAttachments = 8,
+ .maxFragmentDualSrcAttachments = 1,
+ .maxFragmentCombinedOutputResources = 8,
+ .maxComputeSharedMemorySize = 32768,
+ .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
+ .maxComputeWorkGroupInvocations = 2048,
+ .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
+ .subPixelPrecisionBits = 4 /* FIXME */,
+ .subTexelPrecisionBits = 4 /* FIXME */,
+ .mipmapPrecisionBits = 4 /* FIXME */,
+ .maxDrawIndexedIndexValue = UINT32_MAX,
+ .maxDrawIndirectCount = UINT32_MAX,
+ .maxSamplerLodBias = 16,
+ .maxSamplerAnisotropy = 16,
+ .maxViewports = MAX_VIEWPORTS,
+ .maxViewportDimensions = { (1 << 14), (1 << 14) },
+ .viewportBoundsRange = { INT16_MIN, INT16_MAX },
+ .viewportSubPixelBits = 8,
+ .minMemoryMapAlignment = 4096, /* A page */
+ .minTexelBufferOffsetAlignment = 1,
+ .minUniformBufferOffsetAlignment = 4,
+ .minStorageBufferOffsetAlignment = 4,
+ .minTexelOffset = -32,
+ .maxTexelOffset = 31,
+ .minTexelGatherOffset = -32,
+ .maxTexelGatherOffset = 31,
+ .minInterpolationOffset = -2,
+ .maxInterpolationOffset = 2,
+ .subPixelInterpolationOffsetBits = 8,
+ .maxFramebufferWidth = (1 << 14),
+ .maxFramebufferHeight = (1 << 14),
+ .maxFramebufferLayers = (1 << 10),
+ .framebufferColorSampleCounts = sample_counts,
+ .framebufferDepthSampleCounts = sample_counts,
+ .framebufferStencilSampleCounts = sample_counts,
+ .framebufferNoAttachmentsSampleCounts = sample_counts,
+ .maxColorAttachments = MAX_RTS,
+ .sampledImageColorSampleCounts = sample_counts,
+ .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
+ .sampledImageDepthSampleCounts = sample_counts,
+ .sampledImageStencilSampleCounts = sample_counts,
+ .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
+ .maxSampleMaskWords = 1,
+ .timestampComputeAndGraphics = true,
+ .timestampPeriod = 1,
+ .maxClipDistances = 8,
+ .maxCullDistances = 8,
+ .maxCombinedClipAndCullDistances = 8,
+ .discreteQueuePriorities = 1,
+ .pointSizeRange = { 0.125, 255.875 },
+ .lineWidthRange = { 0.0, 7.9921875 },
+ .pointSizeGranularity = (1.0 / 8.0),
+ .lineWidthGranularity = (1.0 / 128.0),
+ .strictLines = false, /* FINISHME */
+ .standardSampleLocations = true,
+ .optimalBufferCopyOffsetAlignment = 128,
+ .optimalBufferCopyRowPitchAlignment = 128,
+ .nonCoherentAtomSize = 64,
+ };
+
+ *pProperties = (VkPhysicalDeviceProperties){
+ .apiVersion = tu_physical_device_api_version(pdevice),
+ .driverVersion = vk_get_driver_version(),
+ .vendorID = 0, /* TODO */
+ .deviceID = 0,
+ .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
+ .limits = limits,
+ .sparseProperties = { 0 },
+ };
+
+ strcpy(pProperties->deviceName, pdevice->name);
+ memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
+}
+
+void
+tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceProperties2KHR *pProperties)
+{
+ TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
+ tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
+
+ vk_foreach_struct(ext, pProperties->pNext)
+ {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
+ VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
+ (VkPhysicalDevicePushDescriptorPropertiesKHR *)ext;
+ properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
+ VkPhysicalDeviceIDPropertiesKHR *properties =
+ (VkPhysicalDeviceIDPropertiesKHR *)ext;
+ memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
+ memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
+ properties->deviceLUIDValid = false;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
+ VkPhysicalDeviceMultiviewPropertiesKHR *properties =
+ (VkPhysicalDeviceMultiviewPropertiesKHR *)ext;
+ properties->maxMultiviewViewCount = MAX_VIEWS;
+ properties->maxMultiviewInstanceIndex = INT_MAX;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
+ VkPhysicalDevicePointClippingPropertiesKHR *properties =
+ (VkPhysicalDevicePointClippingPropertiesKHR *)ext;
+ properties->pointClippingBehavior =
+ VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
+ VkPhysicalDeviceMaintenance3Properties *properties =
+ (VkPhysicalDeviceMaintenance3Properties *)ext;
+ /* Make sure everything is addressable by a signed 32-bit int, and
+ * our largest descriptors are 96 bytes. */
+ properties->maxPerSetDescriptors = (1ull << 31) / 96;
+ /* Our buffer size fields allow only this much */
+ properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+static void
+tu_get_physical_device_queue_family_properties(
+ struct tu_physical_device *pdevice,
+ uint32_t *pCount,
+ VkQueueFamilyProperties **pQueueFamilyProperties)
+{
+ int num_queue_families = 1;
+ int idx;
+ if (pQueueFamilyProperties == NULL) {
+ *pCount = num_queue_families;
+ return;
+ }
+
+ if (!*pCount)
+ return;
+
+ idx = 0;
+ if (*pCount >= 1) {
+ *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties){
+ .queueFlags =
+ VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
+ .queueCount = 1,
+ .timestampValidBits = 64,
+ .minImageTransferGranularity = (VkExtent3D){ 1, 1, 1 },
+ };
+ idx++;
+ }
+
+ *pCount = idx;
+}
+
+void
+tu_GetPhysicalDeviceQueueFamilyProperties(
+ VkPhysicalDevice physicalDevice,
+ uint32_t *pCount,
+ VkQueueFamilyProperties *pQueueFamilyProperties)
+{
+ TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
+ if (!pQueueFamilyProperties) {
+ return tu_get_physical_device_queue_family_properties(
+ pdevice, pCount, NULL);
+ return;
+ }
+ VkQueueFamilyProperties *properties[] = {
+ pQueueFamilyProperties + 0,
+ };
+ tu_get_physical_device_queue_family_properties(pdevice, pCount, properties);
+ assert(*pCount <= 1);
+}
+
+void
+tu_GetPhysicalDeviceQueueFamilyProperties2(
+ VkPhysicalDevice physicalDevice,
+ uint32_t *pCount,
+ VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
+{
+ TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
+ if (!pQueueFamilyProperties) {
+ return tu_get_physical_device_queue_family_properties(
+ pdevice, pCount, NULL);
+ return;
+ }
+ VkQueueFamilyProperties *properties[] = {
+ &pQueueFamilyProperties[0].queueFamilyProperties,
+ };
+ tu_get_physical_device_queue_family_properties(pdevice, pCount, properties);
+ assert(*pCount <= 1);
+}
+
+void
+tu_GetPhysicalDeviceMemoryProperties(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceMemoryProperties *pMemoryProperties)
+{
+ stub();
+}
+
+void
+tu_GetPhysicalDeviceMemoryProperties2(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
+{
+ return tu_GetPhysicalDeviceMemoryProperties(
+ physicalDevice, &pMemoryProperties->memoryProperties);
+}
+
+static int
+tu_queue_init(struct tu_device *device,
+ struct tu_queue *queue,
+ uint32_t queue_family_index,
+ int idx,
+ VkDeviceQueueCreateFlags flags)
+{
+ queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ queue->device = device;
+ queue->queue_family_index = queue_family_index;
+ queue->queue_idx = idx;
+ queue->flags = flags;
+
+ return VK_SUCCESS;
+}
+
+static void
+tu_queue_finish(struct tu_queue *queue)
+{
+}
+
+static int
+tu_get_device_extension_index(const char *name)
+{
+ for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
+ if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
+ return i;
+ }
+ return -1;
+}
+
+VkResult
+tu_CreateDevice(VkPhysicalDevice physicalDevice,
+ const VkDeviceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDevice *pDevice)
+{
+ TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
+ VkResult result;
+ struct tu_device *device;
+
+ /* Check enabled features */
+ if (pCreateInfo->pEnabledFeatures) {
+ VkPhysicalDeviceFeatures supported_features;
+ tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
+ VkBool32 *supported_feature = (VkBool32 *)&supported_features;
+ VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
+ unsigned num_features =
+ sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
+ for (uint32_t i = 0; i < num_features; i++) {
+ if (enabled_feature[i] && !supported_feature[i])
+ return vk_error(physical_device->instance,
+ VK_ERROR_FEATURE_NOT_PRESENT);
+ }
+ }
+
+ device = vk_zalloc2(&physical_device->instance->alloc,
+ pAllocator,
+ sizeof(*device),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+ if (!device)
+ return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ device->instance = physical_device->instance;
+ device->physical_device = physical_device;
+
+ if (pAllocator)
+ device->alloc = *pAllocator;
+ else
+ device->alloc = physical_device->instance->alloc;
+
+ for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
+ const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
+ int index = tu_get_device_extension_index(ext_name);
+ if (index < 0 ||
+ !physical_device->supported_extensions.extensions[index]) {
+ vk_free(&device->alloc, device);
+ return vk_error(physical_device->instance,
+ VK_ERROR_EXTENSION_NOT_PRESENT);
+ }
+
+ device->enabled_extensions.extensions[index] = true;
+ }
+
+ for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
+ const VkDeviceQueueCreateInfo *queue_create =
+ &pCreateInfo->pQueueCreateInfos[i];
+ uint32_t qfi = queue_create->queueFamilyIndex;
+ device->queues[qfi] =
+ vk_alloc(&device->alloc,
+ queue_create->queueCount * sizeof(struct tu_queue),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+ if (!device->queues[qfi]) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ goto fail;
+ }
+
+ memset(device->queues[qfi],
+ 0,
+ queue_create->queueCount * sizeof(struct tu_queue));
+
+ device->queue_count[qfi] = queue_create->queueCount;
+
+ for (unsigned q = 0; q < queue_create->queueCount; q++) {
+ result = tu_queue_init(
+ device, &device->queues[qfi][q], qfi, q, queue_create->flags);
+ if (result != VK_SUCCESS)
+ goto fail;
+ }
+ }
+
+ VkPipelineCacheCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
+ ci.pNext = NULL;
+ ci.flags = 0;
+ ci.pInitialData = NULL;
+ ci.initialDataSize = 0;
+ VkPipelineCache pc;
+ result =
+ tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ device->mem_cache = tu_pipeline_cache_from_handle(pc);
+
+ *pDevice = tu_device_to_handle(device);
+ return VK_SUCCESS;
+
+fail:
+ for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
+ for (unsigned q = 0; q < device->queue_count[i]; q++)
+ tu_queue_finish(&device->queues[i][q]);
+ if (device->queue_count[i])
+ vk_free(&device->alloc, device->queues[i]);
+ }
+
+ vk_free(&device->alloc, device);
+ return result;
+}
+
+void
+tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+
+ if (!device)
+ return;
+
+ for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
+ for (unsigned q = 0; q < device->queue_count[i]; q++)
+ tu_queue_finish(&device->queues[i][q]);
+ if (device->queue_count[i])
+ vk_free(&device->alloc, device->queues[i]);
+ }
+
+ VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
+ tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
+
+ vk_free(&device->alloc, device);
+}
+
+VkResult
+tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
+ VkLayerProperties *pProperties)
+{
+ if (pProperties == NULL) {
+ *pPropertyCount = 0;
+ return VK_SUCCESS;
+ }
+
+ /* None supported at this time */
+ return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
+}
+
+VkResult
+tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
+ uint32_t *pPropertyCount,
+ VkLayerProperties *pProperties)
+{
+ if (pProperties == NULL) {
+ *pPropertyCount = 0;
+ return VK_SUCCESS;
+ }
+
+ /* None supported at this time */
+ return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
+}
+
+void
+tu_GetDeviceQueue2(VkDevice _device,
+ const VkDeviceQueueInfo2 *pQueueInfo,
+ VkQueue *pQueue)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_queue *queue;
+
+ queue =
+ &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
+ if (pQueueInfo->flags != queue->flags) {
+ /* From the Vulkan 1.1.70 spec:
+ *
+ * "The queue returned by vkGetDeviceQueue2 must have the same
+ * flags value from this structure as that used at device
+ * creation time in a VkDeviceQueueCreateInfo instance. If no
+ * matching flags were specified at device creation time then
+ * pQueue will return VK_NULL_HANDLE."
+ */
+ *pQueue = VK_NULL_HANDLE;
+ return;
+ }
+
+ *pQueue = tu_queue_to_handle(queue);
+}
+
+void
+tu_GetDeviceQueue(VkDevice _device,
+ uint32_t queueFamilyIndex,
+ uint32_t queueIndex,
+ VkQueue *pQueue)
+{
+ const VkDeviceQueueInfo2 info =
+ (VkDeviceQueueInfo2){.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
+ .queueFamilyIndex = queueFamilyIndex,
+ .queueIndex = queueIndex };
+
+ tu_GetDeviceQueue2(_device, &info, pQueue);
+}
+
+VkResult
+tu_QueueSubmit(VkQueue _queue,
+ uint32_t submitCount,
+ const VkSubmitInfo *pSubmits,
+ VkFence _fence)
+{
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_QueueWaitIdle(VkQueue _queue)
+{
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_DeviceWaitIdle(VkDevice _device)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+
+ for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
+ for (unsigned q = 0; q < device->queue_count[i]; q++) {
+ tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
+ }
+ }
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
+ uint32_t *pPropertyCount,
+ VkExtensionProperties *pProperties)
+{
+ VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
+
+ for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
+ if (tu_supported_instance_extensions.extensions[i]) {
+ vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+VkResult
+tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
+ const char *pLayerName,
+ uint32_t *pPropertyCount,
+ VkExtensionProperties *pProperties)
+{
+ TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
+ VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
+
+ for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
+ if (device->supported_extensions.extensions[i]) {
+ vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+PFN_vkVoidFunction
+tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
+{
+ TU_FROM_HANDLE(tu_instance, instance, _instance);
+
+ return tu_lookup_entrypoint_checked(pName,
+ instance ? instance->api_version : 0,
+ instance ? &instance->enabled_extensions
+ : NULL,
+ NULL);
+}
+
+/* The loader wants us to expose a second GetInstanceProcAddr function
+ * to work around certain LD_PRELOAD issues seen in apps.
+ */
+PUBLIC
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
+vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
+
+PUBLIC
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
+vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
+{
+ return tu_GetInstanceProcAddr(instance, pName);
+}
+
+PFN_vkVoidFunction
+tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+
+ return tu_lookup_entrypoint_checked(pName,
+ device->instance->api_version,
+ &device->instance->enabled_extensions,
+ &device->enabled_extensions);
+}
+
+static VkResult
+tu_alloc_memory(struct tu_device *device,
+ const VkMemoryAllocateInfo *pAllocateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDeviceMemory *pMem)
+{
+ struct tu_device_memory *mem;
+
+ assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
+
+ if (pAllocateInfo->allocationSize == 0) {
+ /* Apparently, this is allowed */
+ *pMem = VK_NULL_HANDLE;
+ return VK_SUCCESS;
+ }
+
+ mem = vk_alloc2(&device->alloc,
+ pAllocator,
+ sizeof(*mem),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (mem == NULL)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ *pMem = tu_device_memory_to_handle(mem);
+
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_AllocateMemory(VkDevice _device,
+ const VkMemoryAllocateInfo *pAllocateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDeviceMemory *pMem)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
+}
+
+void
+tu_FreeMemory(VkDevice _device,
+ VkDeviceMemory _mem,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_device_memory, mem, _mem);
+
+ if (mem == NULL)
+ return;
+
+ vk_free2(&device->alloc, pAllocator, mem);
+}
+
+VkResult
+tu_MapMemory(VkDevice _device,
+ VkDeviceMemory _memory,
+ VkDeviceSize offset,
+ VkDeviceSize size,
+ VkMemoryMapFlags flags,
+ void **ppData)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_device_memory, mem, _memory);
+
+ if (mem == NULL) {
+ *ppData = NULL;
+ return VK_SUCCESS;
+ }
+
+ if (mem->user_ptr)
+ *ppData = mem->user_ptr;
+
+ if (*ppData) {
+ *ppData += offset;
+ return VK_SUCCESS;
+ }
+
+ return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
+}
+
+void
+tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
+{
+ TU_FROM_HANDLE(tu_device_memory, mem, _memory);
+
+ if (mem == NULL)
+ return;
+}
+
+VkResult
+tu_FlushMappedMemoryRanges(VkDevice _device,
+ uint32_t memoryRangeCount,
+ const VkMappedMemoryRange *pMemoryRanges)
+{
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_InvalidateMappedMemoryRanges(VkDevice _device,
+ uint32_t memoryRangeCount,
+ const VkMappedMemoryRange *pMemoryRanges)
+{
+ return VK_SUCCESS;
+}
+
+void
+tu_GetBufferMemoryRequirements(VkDevice _device,
+ VkBuffer _buffer,
+ VkMemoryRequirements *pMemoryRequirements)
+{
+ TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
+
+ pMemoryRequirements->alignment = 16;
+ pMemoryRequirements->size =
+ align64(buffer->size, pMemoryRequirements->alignment);
+}
+
+void
+tu_GetBufferMemoryRequirements2(
+ VkDevice device,
+ const VkBufferMemoryRequirementsInfo2KHR *pInfo,
+ VkMemoryRequirements2KHR *pMemoryRequirements)
+{
+ tu_GetBufferMemoryRequirements(
+ device, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
+}
+
+void
+tu_GetImageMemoryRequirements(VkDevice _device,
+ VkImage _image,
+ VkMemoryRequirements *pMemoryRequirements)
+{
+ TU_FROM_HANDLE(tu_image, image, _image);
+
+ /* TODO: memory type */
+
+ pMemoryRequirements->size = image->size;
+ pMemoryRequirements->alignment = image->alignment;
+}
+
+void
+tu_GetImageMemoryRequirements2(VkDevice device,
+ const VkImageMemoryRequirementsInfo2KHR *pInfo,
+ VkMemoryRequirements2KHR *pMemoryRequirements)
+{
+ tu_GetImageMemoryRequirements(
+ device, pInfo->image, &pMemoryRequirements->memoryRequirements);
+}
+
+void
+tu_GetImageSparseMemoryRequirements(
+ VkDevice device,
+ VkImage image,
+ uint32_t *pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
+{
+ stub();
+}
+
+void
+tu_GetImageSparseMemoryRequirements2(
+ VkDevice device,
+ const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
+ uint32_t *pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
+{
+ stub();
+}
+
+void
+tu_GetDeviceMemoryCommitment(VkDevice device,
+ VkDeviceMemory memory,
+ VkDeviceSize *pCommittedMemoryInBytes)
+{
+ *pCommittedMemoryInBytes = 0;
+}
+
+VkResult
+tu_BindBufferMemory2(VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindBufferMemoryInfoKHR *pBindInfos)
+{
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_BindBufferMemory(VkDevice device,
+ VkBuffer buffer,
+ VkDeviceMemory memory,
+ VkDeviceSize memoryOffset)
+{
+ const VkBindBufferMemoryInfoKHR info = {
+ .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
+ .buffer = buffer,
+ .memory = memory,
+ .memoryOffset = memoryOffset
+ };
+
+ return tu_BindBufferMemory2(device, 1, &info);
+}
+
+VkResult
+tu_BindImageMemory2(VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindImageMemoryInfoKHR *pBindInfos)
+{
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_BindImageMemory(VkDevice device,
+ VkImage image,
+ VkDeviceMemory memory,
+ VkDeviceSize memoryOffset)
+{
+ const VkBindImageMemoryInfoKHR info = {
+ .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
+ .image = image,
+ .memory = memory,
+ .memoryOffset = memoryOffset
+ };
+
+ return tu_BindImageMemory2(device, 1, &info);
+}
+
+VkResult
+tu_QueueBindSparse(VkQueue _queue,
+ uint32_t bindInfoCount,
+ const VkBindSparseInfo *pBindInfo,
+ VkFence _fence)
+{
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_CreateFence(VkDevice _device,
+ const VkFenceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkFence *pFence)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+
+ struct tu_fence *fence = vk_alloc2(&device->alloc,
+ pAllocator,
+ sizeof(*fence),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ if (!fence)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ *pFence = tu_fence_to_handle(fence);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyFence(VkDevice _device,
+ VkFence _fence,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_fence, fence, _fence);
+
+ if (!fence)
+ return;
+
+ vk_free2(&device->alloc, pAllocator, fence);
+}
+
+VkResult
+tu_WaitForFences(VkDevice _device,
+ uint32_t fenceCount,
+ const VkFence *pFences,
+ VkBool32 waitAll,
+ uint64_t timeout)
+{
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
+{
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_GetFenceStatus(VkDevice _device, VkFence _fence)
+{
+ return VK_SUCCESS;
+}
+
+// Queue semaphore functions
+
+VkResult
+tu_CreateSemaphore(VkDevice _device,
+ const VkSemaphoreCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSemaphore *pSemaphore)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+
+ struct tu_semaphore *sem = vk_alloc2(&device->alloc,
+ pAllocator,
+ sizeof(*sem),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!sem)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ *pSemaphore = tu_semaphore_to_handle(sem);
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroySemaphore(VkDevice _device,
+ VkSemaphore _semaphore,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
+ if (!_semaphore)
+ return;
+
+ vk_free2(&device->alloc, pAllocator, sem);
+}
+
+VkResult
+tu_CreateEvent(VkDevice _device,
+ const VkEventCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkEvent *pEvent)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_event *event = vk_alloc2(&device->alloc,
+ pAllocator,
+ sizeof(*event),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ if (!event)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ *pEvent = tu_event_to_handle(event);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyEvent(VkDevice _device,
+ VkEvent _event,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_event, event, _event);
+
+ if (!event)
+ return;
+ vk_free2(&device->alloc, pAllocator, event);
+}
+
+VkResult
+tu_GetEventStatus(VkDevice _device, VkEvent _event)
+{
+ TU_FROM_HANDLE(tu_event, event, _event);
+
+ if (*event->map == 1)
+ return VK_EVENT_SET;
+ return VK_EVENT_RESET;
+}
+
+VkResult
+tu_SetEvent(VkDevice _device, VkEvent _event)
+{
+ TU_FROM_HANDLE(tu_event, event, _event);
+ *event->map = 1;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_ResetEvent(VkDevice _device, VkEvent _event)
+{
+ TU_FROM_HANDLE(tu_event, event, _event);
+ *event->map = 0;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_CreateBuffer(VkDevice _device,
+ const VkBufferCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkBuffer *pBuffer)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_buffer *buffer;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
+
+ buffer = vk_alloc2(&device->alloc,
+ pAllocator,
+ sizeof(*buffer),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (buffer == NULL)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ buffer->size = pCreateInfo->size;
+ buffer->usage = pCreateInfo->usage;
+ buffer->flags = pCreateInfo->flags;
+
+ *pBuffer = tu_buffer_to_handle(buffer);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyBuffer(VkDevice _device,
+ VkBuffer _buffer,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
+
+ if (!buffer)
+ return;
+
+ vk_free2(&device->alloc, pAllocator, buffer);
+}
+
+static uint32_t
+tu_surface_max_layer_count(struct tu_image_view *iview)
+{
+ return iview->type == VK_IMAGE_VIEW_TYPE_3D
+ ? iview->extent.depth
+ : (iview->base_layer + iview->layer_count);
+}
+
+VkResult
+tu_CreateFramebuffer(VkDevice _device,
+ const VkFramebufferCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkFramebuffer *pFramebuffer)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_framebuffer *framebuffer;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
+
+ size_t size =
+ sizeof(*framebuffer) +
+ sizeof(struct tu_attachment_info) * pCreateInfo->attachmentCount;
+ framebuffer = vk_alloc2(
+ &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (framebuffer == NULL)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ framebuffer->attachment_count = pCreateInfo->attachmentCount;
+ framebuffer->width = pCreateInfo->width;
+ framebuffer->height = pCreateInfo->height;
+ framebuffer->layers = pCreateInfo->layers;
+ for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
+ VkImageView _iview = pCreateInfo->pAttachments[i];
+ struct tu_image_view *iview = tu_image_view_from_handle(_iview);
+ framebuffer->attachments[i].attachment = iview;
+
+ framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
+ framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
+ framebuffer->layers =
+ MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
+ }
+
+ *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyFramebuffer(VkDevice _device,
+ VkFramebuffer _fb,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
+
+ if (!fb)
+ return;
+ vk_free2(&device->alloc, pAllocator, fb);
+}
+
+static void
+tu_init_sampler(struct tu_device *device,
+ struct tu_sampler *sampler,
+ const VkSamplerCreateInfo *pCreateInfo)
+{
+}
+
+VkResult
+tu_CreateSampler(VkDevice _device,
+ const VkSamplerCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSampler *pSampler)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_sampler *sampler;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
+
+ sampler = vk_alloc2(&device->alloc,
+ pAllocator,
+ sizeof(*sampler),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!sampler)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ tu_init_sampler(device, sampler, pCreateInfo);
+ *pSampler = tu_sampler_to_handle(sampler);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroySampler(VkDevice _device,
+ VkSampler _sampler,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
+
+ if (!sampler)
+ return;
+ vk_free2(&device->alloc, pAllocator, sampler);
+}
+
+/* vk_icd.h does not declare this function, so we declare it here to
+ * suppress Wmissing-prototypes.
+ */
+PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
+vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
+
+PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
+vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
+{
+ /* For the full details on loader interface versioning, see
+ * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
+ * What follows is a condensed summary, to help you navigate the large and
+ * confusing official doc.
+ *
+ * - Loader interface v0 is incompatible with later versions. We don't
+ * support it.
+ *
+ * - In loader interface v1:
+ * - The first ICD entrypoint called by the loader is
+ * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
+ * entrypoint.
+ * - The ICD must statically expose no other Vulkan symbol unless it is
+ * linked with -Bsymbolic.
+ * - Each dispatchable Vulkan handle created by the ICD must be
+ * a pointer to a struct whose first member is VK_LOADER_DATA. The
+ * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
+ * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
+ * vkDestroySurfaceKHR(). The ICD must be capable of working with
+ * such loader-managed surfaces.
+ *
+ * - Loader interface v2 differs from v1 in:
+ * - The first ICD entrypoint called by the loader is
+ * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
+ * statically expose this entrypoint.
+ *
+ * - Loader interface v3 differs from v2 in:
+ * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
+ * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
+ * because the loader no longer does so.
+ */
+ *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
+ return VK_SUCCESS;
+}
+
+void
+tu_GetPhysicalDeviceExternalSemaphoreProperties(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
+ VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
+{
+ pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
+ pExternalSemaphoreProperties->compatibleHandleTypes = 0;
+ pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
+}
+
+void
+tu_GetPhysicalDeviceExternalFenceProperties(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
+ VkExternalFencePropertiesKHR *pExternalFenceProperties)
+{
+ pExternalFenceProperties->exportFromImportedHandleTypes = 0;
+ pExternalFenceProperties->compatibleHandleTypes = 0;
+ pExternalFenceProperties->externalFenceFeatures = 0;
+}
+
+VkResult
+tu_CreateDebugReportCallbackEXT(
+ VkInstance _instance,
+ const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDebugReportCallbackEXT *pCallback)
+{
+ TU_FROM_HANDLE(tu_instance, instance, _instance);
+ return vk_create_debug_report_callback(&instance->debug_report_callbacks,
+ pCreateInfo,
+ pAllocator,
+ &instance->alloc,
+ pCallback);
+}
+
+void
+tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
+ VkDebugReportCallbackEXT _callback,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_instance, instance, _instance);
+ vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
+ _callback,
+ pAllocator,
+ &instance->alloc);
+}
+
+void
+tu_DebugReportMessageEXT(VkInstance _instance,
+ VkDebugReportFlagsEXT flags,
+ VkDebugReportObjectTypeEXT objectType,
+ uint64_t object,
+ size_t location,
+ int32_t messageCode,
+ const char *pLayerPrefix,
+ const char *pMessage)
+{
+ TU_FROM_HANDLE(tu_instance, instance, _instance);
+ vk_debug_report(&instance->debug_report_callbacks,
+ flags,
+ objectType,
+ object,
+ location,
+ messageCode,
+ pLayerPrefix,
+ pMessage);
+}
+
+void
+tu_GetDeviceGroupPeerMemoryFeatures(
+ VkDevice device,
+ uint32_t heapIndex,
+ uint32_t localDeviceIndex,
+ uint32_t remoteDeviceIndex,
+ VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
+{
+ assert(localDeviceIndex == remoteDeviceIndex);
+
+ *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
+ VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
+ VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
+ VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
+}
diff --git a/src/freedreno/vulkan/tu_entrypoints_gen.py b/src/freedreno/vulkan/tu_entrypoints_gen.py
new file mode 100644
index 00000000000..865ad1de843
--- /dev/null
+++ b/src/freedreno/vulkan/tu_entrypoints_gen.py
@@ -0,0 +1,506 @@
+# coding=utf-8
+#
+# Copyright © 2015, 2017 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import argparse
+import functools
+import math
+import os
+import xml.etree.cElementTree as et
+
+from collections import OrderedDict, namedtuple
+from mako.template import Template
+
+from tu_extensions import VkVersion, MAX_API_VERSION, EXTENSIONS
+
+# We generate a static hash table for entry point lookup
+# (vkGetProcAddress). We use a linear congruential generator for our hash
+# function and a power-of-two size table. The prime numbers are determined
+# experimentally.
+
+# We currently don't use layers in tu, but keeping the ability for anv
+# anyways, so we can use it for device groups.
+LAYERS = [
+ 'tu'
+]
+
+TEMPLATE_H = Template("""\
+/* This file generated from ${filename}, don't edit directly. */
+
+struct tu_dispatch_table {
+ union {
+ void *entrypoints[${len(entrypoints)}];
+ struct {
+ % for e in entrypoints:
+ % if e.guard is not None:
+#ifdef ${e.guard}
+ PFN_${e.name} ${e.name};
+#else
+ void *${e.name};
+# endif
+ % else:
+ PFN_${e.name} ${e.name};
+ % endif
+ % endfor
+ };
+ };
+};
+
+% for e in entrypoints:
+ % if e.alias:
+ <% continue %>
+ % endif
+ % if e.guard is not None:
+#ifdef ${e.guard}
+ % endif
+ % for layer in LAYERS:
+ ${e.return_type} ${e.prefixed_name(layer)}(${e.decl_params()});
+ % endfor
+ % if e.guard is not None:
+#endif // ${e.guard}
+ % endif
+% endfor
+""", output_encoding='utf-8')
+
+TEMPLATE_C = Template(u"""\
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* This file generated from ${filename}, don't edit directly. */
+
+#include "tu_private.h"
+
+struct string_map_entry {
+ uint32_t name;
+ uint32_t hash;
+ uint32_t num;
+};
+
+/* We use a big string constant to avoid lots of relocations from the entry
+ * point table to lots of little strings. The entries in the entry point table
+ * store the index into this big string.
+ */
+
+static const char strings[] =
+% for s in strmap.sorted_strings:
+ "${s.string}\\0"
+% endfor
+;
+
+static const struct string_map_entry string_map_entries[] = {
+% for s in strmap.sorted_strings:
+ { ${s.offset}, ${'{:0=#8x}'.format(s.hash)}, ${s.num} }, /* ${s.string} */
+% endfor
+};
+
+/* Hash table stats:
+ * size ${len(strmap.sorted_strings)} entries
+ * collisions entries:
+% for i in range(10):
+ * ${i}${'+' if i == 9 else ' '} ${strmap.collisions[i]}
+% endfor
+ */
+
+#define none 0xffff
+static const uint16_t string_map[${strmap.hash_size}] = {
+% for e in strmap.mapping:
+ ${ '{:0=#6x}'.format(e) if e >= 0 else 'none' },
+% endfor
+};
+
+/* Weak aliases for all potential implementations. These will resolve to
+ * NULL if they're not defined, which lets the resolve_entrypoint() function
+ * either pick the correct entry point.
+ */
+
+% for layer in LAYERS:
+ % for e in entrypoints:
+ % if e.alias:
+ <% continue %>
+ % endif
+ % if e.guard is not None:
+#ifdef ${e.guard}
+ % endif
+ ${e.return_type} ${e.prefixed_name(layer)}(${e.decl_params()}) __attribute__ ((weak));
+ % if e.guard is not None:
+#endif // ${e.guard}
+ % endif
+ % endfor
+
+ const struct tu_dispatch_table ${layer}_layer = {
+ % for e in entrypoints:
+ % if e.guard is not None:
+#ifdef ${e.guard}
+ % endif
+ .${e.name} = ${e.prefixed_name(layer)},
+ % if e.guard is not None:
+#endif // ${e.guard}
+ % endif
+ % endfor
+ };
+% endfor
+
+static void * __attribute__ ((noinline))
+tu_resolve_entrypoint(uint32_t index)
+{
+ return tu_layer.entrypoints[index];
+}
+
+/** Return true if the core version or extension in which the given entrypoint
+ * is defined is enabled.
+ *
+ * If instance is NULL, we only allow the 3 commands explicitly allowed by the vk
+ * spec.
+ *
+ * If device is NULL, all device extensions are considered enabled.
+ */
+static bool
+tu_entrypoint_is_enabled(int index, uint32_t core_version,
+ const struct tu_instance_extension_table *instance,
+ const struct tu_device_extension_table *device)
+{
+ switch (index) {
+% for e in entrypoints:
+ case ${e.num}:
+ % if not e.device_command:
+ if (device) return false;
+ % endif
+ % if e.name == 'vkCreateInstance' or e.name == 'vkEnumerateInstanceExtensionProperties' or e.name == 'vkEnumerateInstanceLayerProperties' or e.name == 'vkEnumerateInstanceVersion':
+ return !device;
+ % elif e.core_version:
+ return instance && ${e.core_version.c_vk_version()} <= core_version;
+ % elif e.extensions:
+ % for ext in e.extensions:
+ % if ext.type == 'instance':
+ if (instance && instance->${ext.name[3:]}) return true;
+ % else:
+ if (instance && (!device || device->${ext.name[3:]})) return true;
+ % endif
+ %endfor
+ return false;
+ % else:
+ return instance;
+ % endif
+% endfor
+ default:
+ return false;
+ }
+}
+
+static int
+tu_lookup_entrypoint(const char *name)
+{
+ static const uint32_t prime_factor = ${strmap.prime_factor};
+ static const uint32_t prime_step = ${strmap.prime_step};
+ const struct string_map_entry *e;
+ uint32_t hash, h;
+ uint16_t i;
+ const char *p;
+
+ hash = 0;
+ for (p = name; *p; p++)
+ hash = hash * prime_factor + *p;
+
+ h = hash;
+ while (1) {
+ i = string_map[h & ${strmap.hash_mask}];
+ if (i == none)
+ return -1;
+ e = &string_map_entries[i];
+ if (e->hash == hash && strcmp(name, strings + e->name) == 0)
+ return e->num;
+ h += prime_step;
+ }
+
+ return -1;
+}
+
+void *
+tu_lookup_entrypoint_unchecked(const char *name)
+{
+ int index = tu_lookup_entrypoint(name);
+ if (index < 0)
+ return NULL;
+ return tu_resolve_entrypoint(index);
+}
+
+void *
+tu_lookup_entrypoint_checked(const char *name,
+ uint32_t core_version,
+ const struct tu_instance_extension_table *instance,
+ const struct tu_device_extension_table *device)
+{
+ int index = tu_lookup_entrypoint(name);
+ if (index < 0 || !tu_entrypoint_is_enabled(index, core_version, instance, device))
+ return NULL;
+ return tu_resolve_entrypoint(index);
+}""", output_encoding='utf-8')
+
+U32_MASK = 2**32 - 1
+
+PRIME_FACTOR = 5024183
+PRIME_STEP = 19
+
+def round_to_pow2(x):
+ return 2**int(math.ceil(math.log(x, 2)))
+
+class StringIntMapEntry(object):
+ def __init__(self, string, num):
+ self.string = string
+ self.num = num
+
+ # Calculate the same hash value that we will calculate in C.
+ h = 0
+ for c in string:
+ h = ((h * PRIME_FACTOR) + ord(c)) & U32_MASK
+ self.hash = h
+
+ self.offset = None
+
+class StringIntMap(object):
+ def __init__(self):
+ self.baked = False
+ self.strings = dict()
+
+ def add_string(self, string, num):
+ assert not self.baked
+ assert string not in self.strings
+ assert num >= 0 and num < 2**31
+ self.strings[string] = StringIntMapEntry(string, num)
+
+ def bake(self):
+ self.sorted_strings = \
+ sorted(self.strings.values(), key=lambda x: x.string)
+ offset = 0
+ for entry in self.sorted_strings:
+ entry.offset = offset
+ offset += len(entry.string) + 1
+
+ # Save off some values that we'll need in C
+ self.hash_size = round_to_pow2(len(self.strings) * 1.25)
+ self.hash_mask = self.hash_size - 1
+ self.prime_factor = PRIME_FACTOR
+ self.prime_step = PRIME_STEP
+
+ self.mapping = [-1] * self.hash_size
+ self.collisions = [0] * 10
+ for idx, s in enumerate(self.sorted_strings):
+ level = 0
+ h = s.hash
+ while self.mapping[h & self.hash_mask] >= 0:
+ h = h + PRIME_STEP
+ level = level + 1
+ self.collisions[min(level, 9)] += 1
+ self.mapping[h & self.hash_mask] = idx
+
+EntrypointParam = namedtuple('EntrypointParam', 'type name decl')
+
+class EntrypointBase(object):
+ def __init__(self, name):
+ self.name = name
+ self.alias = None
+ self.guard = None
+ self.enabled = False
+ self.num = None
+ # Extensions which require this entrypoint
+ self.core_version = None
+ self.extensions = []
+
+class Entrypoint(EntrypointBase):
+ def __init__(self, name, return_type, params, guard = None):
+ super(Entrypoint, self).__init__(name)
+ self.return_type = return_type
+ self.params = params
+ self.guard = guard
+ self.device_command = len(params) > 0 and (params[0].type == 'VkDevice' or params[0].type == 'VkQueue' or params[0].type == 'VkCommandBuffer')
+
+ def prefixed_name(self, prefix):
+ assert self.name.startswith('vk')
+ return prefix + '_' + self.name[2:]
+
+ def decl_params(self):
+ return ', '.join(p.decl for p in self.params)
+
+ def call_params(self):
+ return ', '.join(p.name for p in self.params)
+
+class EntrypointAlias(EntrypointBase):
+ def __init__(self, name, entrypoint):
+ super(EntrypointAlias, self).__init__(name)
+ self.alias = entrypoint
+ self.device_command = entrypoint.device_command
+
+ def prefixed_name(self, prefix):
+ return self.alias.prefixed_name(prefix)
+
+def get_entrypoints(doc, entrypoints_to_defines, start_index):
+ """Extract the entry points from the registry."""
+ entrypoints = OrderedDict()
+
+ for command in doc.findall('./commands/command'):
+ if 'alias' in command.attrib:
+ alias = command.attrib['name']
+ target = command.attrib['alias']
+ entrypoints[alias] = EntrypointAlias(alias, entrypoints[target])
+ else:
+ name = command.find('./proto/name').text
+ ret_type = command.find('./proto/type').text
+ params = [EntrypointParam(
+ type = p.find('./type').text,
+ name = p.find('./name').text,
+ decl = ''.join(p.itertext())
+ ) for p in command.findall('./param')]
+ guard = entrypoints_to_defines.get(name)
+ # They really need to be unique
+ assert name not in entrypoints
+ entrypoints[name] = Entrypoint(name, ret_type, params, guard)
+
+ for feature in doc.findall('./feature'):
+ assert feature.attrib['api'] == 'vulkan'
+ version = VkVersion(feature.attrib['number'])
+ if version > MAX_API_VERSION:
+ continue
+
+ for command in feature.findall('./require/command'):
+ e = entrypoints[command.attrib['name']]
+ e.enabled = True
+ assert e.core_version is None
+ e.core_version = version
+
+ supported_exts = dict((ext.name, ext) for ext in EXTENSIONS)
+ for extension in doc.findall('.extensions/extension'):
+ ext_name = extension.attrib['name']
+ if ext_name not in supported_exts:
+ continue
+
+ ext = supported_exts[ext_name]
+ ext.type = extension.attrib['type']
+
+ for command in extension.findall('./require/command'):
+ e = entrypoints[command.attrib['name']]
+ e.enabled = True
+ assert e.core_version is None
+ e.extensions.append(ext)
+
+ # if the base command is not supported by the driver yet, don't alias aliases
+ for e in entrypoints.values():
+ if e.alias and not e.alias.enabled:
+ e_clone = copy.deepcopy(e.alias)
+ e_clone.enabled = True
+ e_clone.name = e.name
+ entrypoints[e.name] = e_clone
+
+ return [e for e in entrypoints.values() if e.enabled]
+
+
+def get_entrypoints_defines(doc):
+ """Maps entry points to extension defines."""
+ entrypoints_to_defines = {}
+
+ for extension in doc.findall('./extensions/extension[@protect]'):
+ define = extension.attrib['protect']
+
+ for entrypoint in extension.findall('./require/command'):
+ fullname = entrypoint.attrib['name']
+ entrypoints_to_defines[fullname] = define
+
+ for extension in doc.findall('./extensions/extension[@platform]'):
+ platform = extension.attrib['platform']
+ ext = '_KHR'
+ if platform.upper() == 'XLIB_XRANDR':
+ ext = '_EXT'
+ define = 'VK_USE_PLATFORM_' + platform.upper() + ext
+
+ for entrypoint in extension.findall('./require/command'):
+ fullname = entrypoint.attrib['name']
+ entrypoints_to_defines[fullname] = define
+
+ return entrypoints_to_defines
+
+
+def gen_code(entrypoints):
+ """Generate the C code."""
+ strmap = StringIntMap()
+ for e in entrypoints:
+ strmap.add_string(e.name, e.num)
+ strmap.bake()
+
+ return TEMPLATE_C.render(entrypoints=entrypoints,
+ LAYERS=LAYERS,
+ strmap=strmap,
+ filename=os.path.basename(__file__))
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--outdir', help='Where to write the files.',
+ required=True)
+ parser.add_argument('--xml',
+ help='Vulkan API XML file.',
+ required=True,
+ action='append',
+ dest='xml_files')
+ args = parser.parse_args()
+
+ entrypoints = []
+
+ for filename in args.xml_files:
+ doc = et.parse(filename)
+ entrypoints += get_entrypoints(doc, get_entrypoints_defines(doc),
+ start_index=len(entrypoints))
+
+ for num, e in enumerate(entrypoints):
+ e.num = num
+
+ # For outputting entrypoints.h we generate a tu_EntryPoint() prototype
+ # per entry point.
+ with open(os.path.join(args.outdir, 'tu_entrypoints.h'), 'wb') as f:
+ f.write(TEMPLATE_H.render(entrypoints=entrypoints,
+ LAYERS=LAYERS,
+ filename=os.path.basename(__file__)))
+ with open(os.path.join(args.outdir, 'tu_entrypoints.c'), 'wb') as f:
+ f.write(gen_code(entrypoints))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/freedreno/vulkan/tu_extensions.py b/src/freedreno/vulkan/tu_extensions.py
new file mode 100644
index 00000000000..37b4f0ca0f0
--- /dev/null
+++ b/src/freedreno/vulkan/tu_extensions.py
@@ -0,0 +1,275 @@
+COPYRIGHT = """\
+/*
+ * Copyright 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+"""
+
+import argparse
+import copy
+import re
+import xml.etree.cElementTree as et
+
+from mako.template import Template
+
+MAX_API_VERSION = '1.1.82'
+
+class Extension:
+ def __init__(self, name, ext_version, enable):
+ self.name = name
+ self.ext_version = int(ext_version)
+ if enable is True:
+ self.enable = 'true';
+ elif enable is False:
+ self.enable = 'false';
+ else:
+ self.enable = enable;
+
+# On Android, we disable all surface and swapchain extensions. Android's Vulkan
+# loader implements VK_KHR_surface and VK_KHR_swapchain, and applications
+# cannot access the driver's implementation. Moreoever, if the driver exposes
+# the those extension strings, then tests dEQP-VK.api.info.instance.extensions
+# and dEQP-VK.api.info.device fail due to the duplicated strings.
+EXTENSIONS = [
+ Extension('VK_KHR_bind_memory2', 1, True),
+ Extension('VK_KHR_create_renderpass2', 1, True),
+ Extension('VK_KHR_dedicated_allocation', 1, True),
+ Extension('VK_KHR_get_display_properties2', 1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
+ Extension('VK_KHR_get_memory_requirements2', 1, True),
+ Extension('VK_KHR_get_physical_device_properties2', 1, True),
+ Extension('VK_KHR_get_surface_capabilities2', 1, 'TU_HAS_SURFACE'),
+ Extension('VK_KHR_maintenance1', 1, True),
+ Extension('VK_KHR_maintenance2', 1, True),
+ Extension('VK_KHR_maintenance3', 1, True),
+ Extension('VK_KHR_surface', 25, 'TU_HAS_SURFACE'),
+ Extension('VK_KHR_swapchain', 68, 'TU_HAS_SURFACE'),
+ Extension('VK_KHR_wayland_surface', 6, 'VK_USE_PLATFORM_WAYLAND_KHR'),
+ Extension('VK_KHR_xcb_surface', 6, 'VK_USE_PLATFORM_XCB_KHR'),
+ Extension('VK_KHR_xlib_surface', 6, 'VK_USE_PLATFORM_XLIB_KHR'),
+ Extension('VK_KHR_display', 23, 'VK_USE_PLATFORM_DISPLAY_KHR'),
+ Extension('VK_EXT_direct_mode_display', 1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
+ Extension('VK_EXT_acquire_xlib_display', 1, 'VK_USE_PLATFORM_XLIB_XRANDR_EXT'),
+ Extension('VK_EXT_display_surface_counter', 1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
+ Extension('VK_EXT_display_control', 1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
+ Extension('VK_EXT_debug_report', 9, True),
+]
+
+class VkVersion:
+ def __init__(self, string):
+ split = string.split('.')
+ self.major = int(split[0])
+ self.minor = int(split[1])
+ if len(split) > 2:
+ assert len(split) == 3
+ self.patch = int(split[2])
+ else:
+ self.patch = None
+
+ # Sanity check. The range bits are required by the definition of the
+ # VK_MAKE_VERSION macro
+ assert self.major < 1024 and self.minor < 1024
+ assert self.patch is None or self.patch < 4096
+ assert(str(self) == string)
+
+ def __str__(self):
+ ver_list = [str(self.major), str(self.minor)]
+ if self.patch is not None:
+ ver_list.append(str(self.patch))
+ return '.'.join(ver_list)
+
+ def c_vk_version(self):
+ patch = self.patch if self.patch is not None else 0
+ ver_list = [str(self.major), str(self.minor), str(patch)]
+ return 'VK_MAKE_VERSION(' + ', '.join(ver_list) + ')'
+
+ def __int_ver(self):
+ # This is just an expansion of VK_VERSION
+ patch = self.patch if self.patch is not None else 0
+ return (self.major << 22) | (self.minor << 12) | patch
+
+ def __gt__(self, other):
+ # If only one of them has a patch version, "ignore" it by making
+ # other's patch version match self.
+ if (self.patch is None) != (other.patch is None):
+ other = copy.copy(other)
+ other.patch = self.patch
+
+ return self.__int_ver() > other.__int_ver()
+
+MAX_API_VERSION = VkVersion(MAX_API_VERSION)
+
+def _init_exts_from_xml(xml):
+ """ Walk the Vulkan XML and fill out extra extension information. """
+
+ xml = et.parse(xml)
+
+ ext_name_map = {}
+ for ext in EXTENSIONS:
+ ext_name_map[ext.name] = ext
+
+ for ext_elem in xml.findall('.extensions/extension'):
+ ext_name = ext_elem.attrib['name']
+ if ext_name not in ext_name_map:
+ continue
+
+ ext = ext_name_map[ext_name]
+ ext.type = ext_elem.attrib['type']
+
+_TEMPLATE_H = Template(COPYRIGHT + """
+#ifndef TU_EXTENSIONS_H
+#define TU_EXTENSIONS_H
+
+enum {
+ TU_INSTANCE_EXTENSION_COUNT = ${len(instance_extensions)},
+ TU_DEVICE_EXTENSION_COUNT = ${len(device_extensions)},
+};
+
+struct tu_instance_extension_table {
+ union {
+ bool extensions[TU_INSTANCE_EXTENSION_COUNT];
+ struct {
+%for ext in instance_extensions:
+ bool ${ext.name[3:]};
+%endfor
+ };
+ };
+};
+
+struct tu_device_extension_table {
+ union {
+ bool extensions[TU_DEVICE_EXTENSION_COUNT];
+ struct {
+%for ext in device_extensions:
+ bool ${ext.name[3:]};
+%endfor
+ };
+ };
+};
+
+extern const VkExtensionProperties tu_instance_extensions[TU_INSTANCE_EXTENSION_COUNT];
+extern const VkExtensionProperties tu_device_extensions[TU_DEVICE_EXTENSION_COUNT];
+extern const struct tu_instance_extension_table tu_supported_instance_extensions;
+
+
+struct tu_physical_device;
+
+void tu_fill_device_extension_table(const struct tu_physical_device *device,
+ struct tu_device_extension_table* table);
+#endif
+""")
+
+_TEMPLATE_C = Template(COPYRIGHT + """
+#include "tu_private.h"
+
+#include "vk_util.h"
+
+/* Convert the VK_USE_PLATFORM_* defines to booleans */
+%for platform in ['ANDROID_KHR', 'WAYLAND_KHR', 'XCB_KHR', 'XLIB_KHR', 'DISPLAY_KHR', 'XLIB_XRANDR_EXT']:
+#ifdef VK_USE_PLATFORM_${platform}
+# undef VK_USE_PLATFORM_${platform}
+# define VK_USE_PLATFORM_${platform} true
+#else
+# define VK_USE_PLATFORM_${platform} false
+#endif
+%endfor
+
+/* And ANDROID too */
+#ifdef ANDROID
+# undef ANDROID
+# define ANDROID true
+#else
+# define ANDROID false
+#endif
+
+#define TU_HAS_SURFACE (VK_USE_PLATFORM_WAYLAND_KHR || \\
+ VK_USE_PLATFORM_XCB_KHR || \\
+ VK_USE_PLATFORM_XLIB_KHR || \\
+ VK_USE_PLATFORM_DISPLAY_KHR)
+
+
+const VkExtensionProperties tu_instance_extensions[TU_INSTANCE_EXTENSION_COUNT] = {
+%for ext in instance_extensions:
+ {"${ext.name}", ${ext.ext_version}},
+%endfor
+};
+
+const VkExtensionProperties tu_device_extensions[TU_DEVICE_EXTENSION_COUNT] = {
+%for ext in device_extensions:
+ {"${ext.name}", ${ext.ext_version}},
+%endfor
+};
+
+const struct tu_instance_extension_table tu_supported_instance_extensions = {
+%for ext in instance_extensions:
+ .${ext.name[3:]} = ${ext.enable},
+%endfor
+};
+
+void tu_fill_device_extension_table(const struct tu_physical_device *device,
+ struct tu_device_extension_table* table)
+{
+%for ext in device_extensions:
+ table->${ext.name[3:]} = ${ext.enable};
+%endfor
+}
+
+VkResult tu_EnumerateInstanceVersion(
+ uint32_t* pApiVersion)
+{
+ *pApiVersion = ${MAX_API_VERSION.c_vk_version()};
+ return VK_SUCCESS;
+}
+
+uint32_t
+tu_physical_device_api_version(struct tu_physical_device *dev)
+{
+ return VK_MAKE_VERSION(1, 1, 82);
+}
+""")
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--out-c', help='Output C file.', required=True)
+ parser.add_argument('--out-h', help='Output H file.', required=True)
+ parser.add_argument('--xml',
+ help='Vulkan API XML file.',
+ required=True,
+ action='append',
+ dest='xml_files')
+ args = parser.parse_args()
+
+ for filename in args.xml_files:
+ _init_exts_from_xml(filename)
+
+ for ext in EXTENSIONS:
+ assert ext.type == 'instance' or ext.type == 'device'
+
+ template_env = {
+ 'MAX_API_VERSION': MAX_API_VERSION,
+ 'instance_extensions': [e for e in EXTENSIONS if e.type == 'instance'],
+ 'device_extensions': [e for e in EXTENSIONS if e.type == 'device'],
+ }
+
+ with open(args.out_c, 'w') as f:
+ f.write(_TEMPLATE_C.render(**template_env))
+ with open(args.out_h, 'w') as f:
+ f.write(_TEMPLATE_H.render(**template_env))
diff --git a/src/freedreno/vulkan/tu_formats.c b/src/freedreno/vulkan/tu_formats.c
new file mode 100644
index 00000000000..09f8c93a187
--- /dev/null
+++ b/src/freedreno/vulkan/tu_formats.c
@@ -0,0 +1,410 @@
+
+/*
+ * Copyright © 2016 Red Hat.
+ * Copyright © 2016 Bas Nieuwenhuizen
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "tu_private.h"
+
+#include "vk_format.h"
+
+#include "vk_util.h"
+
+#include "util/format_r11g11b10f.h"
+#include "util/format_srgb.h"
+#include "util/u_half.h"
+
+static void
+tu_physical_device_get_format_properties(
+ struct tu_physical_device *physical_device,
+ VkFormat format,
+ VkFormatProperties *out_properties)
+{
+ VkFormatFeatureFlags linear = 0, tiled = 0, buffer = 0;
+ const struct vk_format_description *desc = vk_format_description(format);
+ if (!desc) {
+ out_properties->linearTilingFeatures = linear;
+ out_properties->optimalTilingFeatures = tiled;
+ out_properties->bufferFeatures = buffer;
+ return;
+ }
+
+ out_properties->linearTilingFeatures = linear;
+ out_properties->optimalTilingFeatures = tiled;
+ out_properties->bufferFeatures = buffer;
+}
+
+void
+tu_GetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkFormatProperties *pFormatProperties)
+{
+ TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
+
+ tu_physical_device_get_format_properties(
+ physical_device, format, pFormatProperties);
+}
+
+void
+tu_GetPhysicalDeviceFormatProperties2(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkFormatProperties2KHR *pFormatProperties)
+{
+ TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
+
+ tu_physical_device_get_format_properties(
+ physical_device, format, &pFormatProperties->formatProperties);
+}
+
+static VkResult
+tu_get_image_format_properties(struct tu_physical_device *physical_device,
+ const VkPhysicalDeviceImageFormatInfo2KHR *info,
+ VkImageFormatProperties *pImageFormatProperties)
+
+{
+ VkFormatProperties format_props;
+ VkFormatFeatureFlags format_feature_flags;
+ VkExtent3D maxExtent;
+ uint32_t maxMipLevels;
+ uint32_t maxArraySize;
+ VkSampleCountFlags sampleCounts = VK_SAMPLE_COUNT_1_BIT;
+
+ tu_physical_device_get_format_properties(
+ physical_device, info->format, &format_props);
+ if (info->tiling == VK_IMAGE_TILING_LINEAR) {
+ format_feature_flags = format_props.linearTilingFeatures;
+ } else if (info->tiling == VK_IMAGE_TILING_OPTIMAL) {
+ format_feature_flags = format_props.optimalTilingFeatures;
+ } else {
+ unreachable("bad VkImageTiling");
+ }
+
+ if (format_feature_flags == 0)
+ goto unsupported;
+
+ if (info->type != VK_IMAGE_TYPE_2D &&
+ vk_format_is_depth_or_stencil(info->format))
+ goto unsupported;
+
+ switch (info->type) {
+ default:
+ unreachable("bad vkimage type\n");
+ case VK_IMAGE_TYPE_1D:
+ maxExtent.width = 16384;
+ maxExtent.height = 1;
+ maxExtent.depth = 1;
+ maxMipLevels = 15; /* log2(maxWidth) + 1 */
+ maxArraySize = 2048;
+ break;
+ case VK_IMAGE_TYPE_2D:
+ maxExtent.width = 16384;
+ maxExtent.height = 16384;
+ maxExtent.depth = 1;
+ maxMipLevels = 15; /* log2(maxWidth) + 1 */
+ maxArraySize = 2048;
+ break;
+ case VK_IMAGE_TYPE_3D:
+ maxExtent.width = 2048;
+ maxExtent.height = 2048;
+ maxExtent.depth = 2048;
+ maxMipLevels = 12; /* log2(maxWidth) + 1 */
+ maxArraySize = 1;
+ break;
+ }
+
+ if (info->tiling == VK_IMAGE_TILING_OPTIMAL &&
+ info->type == VK_IMAGE_TYPE_2D &&
+ (format_feature_flags &
+ (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
+ VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) &&
+ !(info->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) &&
+ !(info->usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
+ sampleCounts |=
+ VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT | VK_SAMPLE_COUNT_8_BIT;
+ }
+
+ if (info->usage & VK_IMAGE_USAGE_SAMPLED_BIT) {
+ if (!(format_feature_flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
+ goto unsupported;
+ }
+ }
+
+ if (info->usage & VK_IMAGE_USAGE_STORAGE_BIT) {
+ if (!(format_feature_flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
+ goto unsupported;
+ }
+ }
+
+ if (info->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
+ if (!(format_feature_flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
+ goto unsupported;
+ }
+ }
+
+ if (info->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ if (!(format_feature_flags &
+ VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
+ goto unsupported;
+ }
+ }
+
+ *pImageFormatProperties = (VkImageFormatProperties){
+ .maxExtent = maxExtent,
+ .maxMipLevels = maxMipLevels,
+ .maxArrayLayers = maxArraySize,
+ .sampleCounts = sampleCounts,
+
+ /* FINISHME: Accurately calculate
+ * VkImageFormatProperties::maxResourceSize.
+ */
+ .maxResourceSize = UINT32_MAX,
+ };
+
+ return VK_SUCCESS;
+unsupported:
+ *pImageFormatProperties = (VkImageFormatProperties){
+ .maxExtent = { 0, 0, 0 },
+ .maxMipLevels = 0,
+ .maxArrayLayers = 0,
+ .sampleCounts = 0,
+ .maxResourceSize = 0,
+ };
+
+ return VK_ERROR_FORMAT_NOT_SUPPORTED;
+}
+
+VkResult
+tu_GetPhysicalDeviceImageFormatProperties(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags createFlags,
+ VkImageFormatProperties *pImageFormatProperties)
+{
+ TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
+
+ const VkPhysicalDeviceImageFormatInfo2KHR info = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR,
+ .pNext = NULL,
+ .format = format,
+ .type = type,
+ .tiling = tiling,
+ .usage = usage,
+ .flags = createFlags,
+ };
+
+ return tu_get_image_format_properties(
+ physical_device, &info, pImageFormatProperties);
+}
+
+static void
+get_external_image_format_properties(
+ const VkPhysicalDeviceImageFormatInfo2KHR *pImageFormatInfo,
+ VkExternalMemoryHandleTypeFlagBitsKHR handleType,
+ VkExternalMemoryPropertiesKHR *external_properties)
+{
+ VkExternalMemoryFeatureFlagBitsKHR flags = 0;
+ VkExternalMemoryHandleTypeFlagsKHR export_flags = 0;
+ VkExternalMemoryHandleTypeFlagsKHR compat_flags = 0;
+ switch (handleType) {
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
+ switch (pImageFormatInfo->type) {
+ case VK_IMAGE_TYPE_2D:
+ flags = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR |
+ VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR |
+ VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR;
+ compat_flags = export_flags =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR |
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+ break;
+ default:
+ break;
+ }
+ break;
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT:
+ flags = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR;
+ compat_flags = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
+ break;
+ default:
+ break;
+ }
+
+ *external_properties = (VkExternalMemoryPropertiesKHR){
+ .externalMemoryFeatures = flags,
+ .exportFromImportedHandleTypes = export_flags,
+ .compatibleHandleTypes = compat_flags,
+ };
+}
+
+VkResult
+tu_GetPhysicalDeviceImageFormatProperties2(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceImageFormatInfo2KHR *base_info,
+ VkImageFormatProperties2KHR *base_props)
+{
+ TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
+ const VkPhysicalDeviceExternalImageFormatInfoKHR *external_info = NULL;
+ VkExternalImageFormatPropertiesKHR *external_props = NULL;
+ VkResult result;
+
+ result = tu_get_image_format_properties(
+ physical_device, base_info, &base_props->imageFormatProperties);
+ if (result != VK_SUCCESS)
+ return result;
+
+ /* Extract input structs */
+ vk_foreach_struct_const(s, base_info->pNext)
+ {
+ switch (s->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR:
+ external_info = (const void *)s;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Extract output structs */
+ vk_foreach_struct(s, base_props->pNext)
+ {
+ switch (s->sType) {
+ case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR:
+ external_props = (void *)s;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* From the Vulkan 1.0.42 spec:
+ *
+ * If handleType is 0, vkGetPhysicalDeviceImageFormatProperties2KHR will
+ * behave as if VkPhysicalDeviceExternalImageFormatInfoKHR was not
+ * present and VkExternalImageFormatPropertiesKHR will be ignored.
+ */
+ if (external_info && external_info->handleType != 0) {
+ switch (external_info->handleType) {
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT:
+ get_external_image_format_properties(
+ base_info,
+ external_info->handleType,
+ &external_props->externalMemoryProperties);
+ break;
+ default:
+ /* From the Vulkan 1.0.42 spec:
+ *
+ * If handleType is not compatible with the [parameters]
+ * specified
+ * in VkPhysicalDeviceImageFormatInfo2KHR, then
+ * vkGetPhysicalDeviceImageFormatProperties2KHR returns
+ * VK_ERROR_FORMAT_NOT_SUPPORTED.
+ */
+ result =
+ vk_errorf(physical_device->instance,
+ VK_ERROR_FORMAT_NOT_SUPPORTED,
+ "unsupported VkExternalMemoryTypeFlagBitsKHR 0x%x",
+ external_info->handleType);
+ goto fail;
+ }
+ }
+
+ return VK_SUCCESS;
+
+fail:
+ if (result == VK_ERROR_FORMAT_NOT_SUPPORTED) {
+ /* From the Vulkan 1.0.42 spec:
+ *
+ * If the combination of parameters to
+ * vkGetPhysicalDeviceImageFormatProperties2KHR is not supported by
+ * the implementation for use in vkCreateImage, then all members of
+ * imageFormatProperties will be filled with zero.
+ */
+ base_props->imageFormatProperties = (VkImageFormatProperties){ 0 };
+ }
+
+ return result;
+}
+
+void
+tu_GetPhysicalDeviceSparseImageFormatProperties(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkImageType type,
+ uint32_t samples,
+ VkImageUsageFlags usage,
+ VkImageTiling tiling,
+ uint32_t *pNumProperties,
+ VkSparseImageFormatProperties *pProperties)
+{
+ /* Sparse images are not yet supported. */
+ *pNumProperties = 0;
+}
+
+void
+tu_GetPhysicalDeviceSparseImageFormatProperties2(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo,
+ uint32_t *pPropertyCount,
+ VkSparseImageFormatProperties2KHR *pProperties)
+{
+ /* Sparse images are not yet supported. */
+ *pPropertyCount = 0;
+}
+
+void
+tu_GetPhysicalDeviceExternalBufferProperties(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceExternalBufferInfoKHR *pExternalBufferInfo,
+ VkExternalBufferPropertiesKHR *pExternalBufferProperties)
+{
+ VkExternalMemoryFeatureFlagBitsKHR flags = 0;
+ VkExternalMemoryHandleTypeFlagsKHR export_flags = 0;
+ VkExternalMemoryHandleTypeFlagsKHR compat_flags = 0;
+ switch (pExternalBufferInfo->handleType) {
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
+ flags = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR |
+ VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR;
+ compat_flags = export_flags =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR |
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+ break;
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT:
+ flags = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR;
+ compat_flags = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
+ break;
+ default:
+ break;
+ }
+ pExternalBufferProperties->externalMemoryProperties =
+ (VkExternalMemoryPropertiesKHR){
+ .externalMemoryFeatures = flags,
+ .exportFromImportedHandleTypes = export_flags,
+ .compatibleHandleTypes = compat_flags,
+ };
+}
diff --git a/src/freedreno/vulkan/tu_icd.py b/src/freedreno/vulkan/tu_icd.py
new file mode 100644
index 00000000000..1947a969aff
--- /dev/null
+++ b/src/freedreno/vulkan/tu_icd.py
@@ -0,0 +1,47 @@
+# Copyright 2017 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import json
+import os.path
+
+from tu_extensions import *
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--out', help='Output json file.', required=True)
+ parser.add_argument('--lib-path', help='Path to libvulkan_freedreno.so')
+ args = parser.parse_args()
+
+ path = 'libvulkan_freedreno.so'
+ if args.lib_path:
+ path = os.path.join(args.lib_path, path)
+
+ json_data = {
+ 'file_format_version': '1.0.0',
+ 'ICD': {
+ 'library_path': path,
+ 'api_version': str(MAX_API_VERSION),
+ },
+ }
+
+ with open(args.out, 'w') as f:
+ json.dump(json_data, f, indent = 4, sort_keys=True, separators=(',', ': '))
diff --git a/src/freedreno/vulkan/tu_image.c b/src/freedreno/vulkan/tu_image.c
new file mode 100644
index 00000000000..491bf30f2ad
--- /dev/null
+++ b/src/freedreno/vulkan/tu_image.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright © 2016 Red Hat.
+ * Copyright © 2016 Bas Nieuwenhuizen
+ *
+ * based in part on anv driver which is:
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "tu_private.h"
+#include "util/debug.h"
+#include "util/u_atomic.h"
+#include "vk_format.h"
+#include "vk_util.h"
+
+VkResult
+tu_image_create(VkDevice _device,
+ const struct tu_image_create_info *create_info,
+ const VkAllocationCallbacks *alloc,
+ VkImage *pImage)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ const VkImageCreateInfo *pCreateInfo = create_info->vk_info;
+ struct tu_image *image = NULL;
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
+
+ tu_assert(pCreateInfo->mipLevels > 0);
+ tu_assert(pCreateInfo->arrayLayers > 0);
+ tu_assert(pCreateInfo->samples > 0);
+ tu_assert(pCreateInfo->extent.width > 0);
+ tu_assert(pCreateInfo->extent.height > 0);
+ tu_assert(pCreateInfo->extent.depth > 0);
+
+ image = vk_zalloc2(&device->alloc,
+ alloc,
+ sizeof(*image),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!image)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ image->type = pCreateInfo->imageType;
+
+ image->vk_format = pCreateInfo->format;
+ image->tiling = pCreateInfo->tiling;
+ image->usage = pCreateInfo->usage;
+ image->flags = pCreateInfo->flags;
+
+ image->exclusive = pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE;
+ if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) {
+ for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; ++i)
+ if (pCreateInfo->pQueueFamilyIndices[i] ==
+ VK_QUEUE_FAMILY_EXTERNAL_KHR)
+ image->queue_family_mask |= (1u << TU_MAX_QUEUE_FAMILIES) - 1u;
+ else
+ image->queue_family_mask |= 1u
+ << pCreateInfo->pQueueFamilyIndices[i];
+ }
+
+ image->shareable =
+ vk_find_struct_const(pCreateInfo->pNext,
+ EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR) != NULL;
+
+ *pImage = tu_image_to_handle(image);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_image_view_init(struct tu_image_view *iview,
+ struct tu_device *device,
+ const VkImageViewCreateInfo *pCreateInfo)
+{
+}
+
+unsigned
+tu_image_queue_family_mask(const struct tu_image *image,
+ uint32_t family,
+ uint32_t queue_family)
+{
+ if (!image->exclusive)
+ return image->queue_family_mask;
+ if (family == VK_QUEUE_FAMILY_EXTERNAL_KHR)
+ return (1u << TU_MAX_QUEUE_FAMILIES) - 1u;
+ if (family == VK_QUEUE_FAMILY_IGNORED)
+ return 1u << queue_family;
+ return 1u << family;
+}
+
+VkResult
+tu_CreateImage(VkDevice device,
+ const VkImageCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkImage *pImage)
+{
+#ifdef ANDROID
+ const VkNativeBufferANDROID *gralloc_info =
+ vk_find_struct_const(pCreateInfo->pNext, NATIVE_BUFFER_ANDROID);
+
+ if (gralloc_info)
+ return tu_image_from_gralloc(
+ device, pCreateInfo, gralloc_info, pAllocator, pImage);
+#endif
+
+ return tu_image_create(device,
+ &(struct tu_image_create_info) {
+ .vk_info = pCreateInfo,
+ .scanout = false,
+ },
+ pAllocator,
+ pImage);
+}
+
+void
+tu_DestroyImage(VkDevice _device,
+ VkImage _image,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_image, image, _image);
+
+ if (!image)
+ return;
+
+ if (image->owned_memory != VK_NULL_HANDLE)
+ tu_FreeMemory(_device, image->owned_memory, pAllocator);
+
+ vk_free2(&device->alloc, pAllocator, image);
+}
+
+void
+tu_GetImageSubresourceLayout(VkDevice _device,
+ VkImage _image,
+ const VkImageSubresource *pSubresource,
+ VkSubresourceLayout *pLayout)
+{
+}
+
+VkResult
+tu_CreateImageView(VkDevice _device,
+ const VkImageViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkImageView *pView)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_image_view *view;
+
+ view = vk_alloc2(&device->alloc,
+ pAllocator,
+ sizeof(*view),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (view == NULL)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ tu_image_view_init(view, device, pCreateInfo);
+
+ *pView = tu_image_view_to_handle(view);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyImageView(VkDevice _device,
+ VkImageView _iview,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_image_view, iview, _iview);
+
+ if (!iview)
+ return;
+ vk_free2(&device->alloc, pAllocator, iview);
+}
+
+void
+tu_buffer_view_init(struct tu_buffer_view *view,
+ struct tu_device *device,
+ const VkBufferViewCreateInfo *pCreateInfo)
+{
+ TU_FROM_HANDLE(tu_buffer, buffer, pCreateInfo->buffer);
+
+ view->range = pCreateInfo->range == VK_WHOLE_SIZE
+ ? buffer->size - pCreateInfo->offset
+ : pCreateInfo->range;
+ view->vk_format = pCreateInfo->format;
+}
+
+VkResult
+tu_CreateBufferView(VkDevice _device,
+ const VkBufferViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkBufferView *pView)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_buffer_view *view;
+
+ view = vk_alloc2(&device->alloc,
+ pAllocator,
+ sizeof(*view),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!view)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ tu_buffer_view_init(view, device, pCreateInfo);
+
+ *pView = tu_buffer_view_to_handle(view);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyBufferView(VkDevice _device,
+ VkBufferView bufferView,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_buffer_view, view, bufferView);
+
+ if (!view)
+ return;
+
+ vk_free2(&device->alloc, pAllocator, view);
+}
diff --git a/src/freedreno/vulkan/tu_meta_blit.c b/src/freedreno/vulkan/tu_meta_blit.c
new file mode 100644
index 00000000000..ea4e48e1cac
--- /dev/null
+++ b/src/freedreno/vulkan/tu_meta_blit.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "tu_private.h"
+#include "nir/nir_builder.h"
+
+void
+tu_CmdBlitImage(VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
+ uint32_t regionCount,
+ const VkImageBlit *pRegions,
+ VkFilter filter)
+
+{
+}
diff --git a/src/freedreno/vulkan/tu_meta_buffer.c b/src/freedreno/vulkan/tu_meta_buffer.c
new file mode 100644
index 00000000000..fd5bb6e5af1
--- /dev/null
+++ b/src/freedreno/vulkan/tu_meta_buffer.c
@@ -0,0 +1,28 @@
+#include "tu_private.h"
+
+void
+tu_CmdFillBuffer(VkCommandBuffer commandBuffer,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize fillSize,
+ uint32_t data)
+{
+}
+
+void
+tu_CmdCopyBuffer(VkCommandBuffer commandBuffer,
+ VkBuffer srcBuffer,
+ VkBuffer destBuffer,
+ uint32_t regionCount,
+ const VkBufferCopy *pRegions)
+{
+}
+
+void
+tu_CmdUpdateBuffer(VkCommandBuffer commandBuffer,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize dataSize,
+ const void *pData)
+{
+}
diff --git a/src/freedreno/vulkan/tu_meta_clear.c b/src/freedreno/vulkan/tu_meta_clear.c
new file mode 100644
index 00000000000..c363668d99a
--- /dev/null
+++ b/src/freedreno/vulkan/tu_meta_clear.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "tu_private.h"
+
+void
+tu_CmdClearColorImage(VkCommandBuffer commandBuffer,
+ VkImage image_h,
+ VkImageLayout imageLayout,
+ const VkClearColorValue *pColor,
+ uint32_t rangeCount,
+ const VkImageSubresourceRange *pRanges)
+{
+}
+
+void
+tu_CmdClearDepthStencilImage(VkCommandBuffer commandBuffer,
+ VkImage image_h,
+ VkImageLayout imageLayout,
+ const VkClearDepthStencilValue *pDepthStencil,
+ uint32_t rangeCount,
+ const VkImageSubresourceRange *pRanges)
+{
+}
+
+void
+tu_CmdClearAttachments(VkCommandBuffer commandBuffer,
+ uint32_t attachmentCount,
+ const VkClearAttachment *pAttachments,
+ uint32_t rectCount,
+ const VkClearRect *pRects)
+{
+}
diff --git a/src/freedreno/vulkan/tu_meta_copy.c b/src/freedreno/vulkan/tu_meta_copy.c
new file mode 100644
index 00000000000..00d7b247cc8
--- /dev/null
+++ b/src/freedreno/vulkan/tu_meta_copy.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "tu_private.h"
+
+static void
+meta_copy_buffer_to_image(struct tu_cmd_buffer *cmd_buffer,
+ struct tu_buffer *buffer,
+ struct tu_image *image,
+ VkImageLayout layout,
+ uint32_t regionCount,
+ const VkBufferImageCopy *pRegions)
+{
+}
+
+void
+tu_CmdCopyBufferToImage(VkCommandBuffer commandBuffer,
+ VkBuffer srcBuffer,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
+ uint32_t regionCount,
+ const VkBufferImageCopy *pRegions)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ TU_FROM_HANDLE(tu_image, dest_image, destImage);
+ TU_FROM_HANDLE(tu_buffer, src_buffer, srcBuffer);
+
+ meta_copy_buffer_to_image(cmd_buffer,
+ src_buffer,
+ dest_image,
+ destImageLayout,
+ regionCount,
+ pRegions);
+}
+
+static void
+meta_copy_image_to_buffer(struct tu_cmd_buffer *cmd_buffer,
+ struct tu_buffer *buffer,
+ struct tu_image *image,
+ VkImageLayout layout,
+ uint32_t regionCount,
+ const VkBufferImageCopy *pRegions)
+{
+}
+
+void
+tu_CmdCopyImageToBuffer(VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkBuffer destBuffer,
+ uint32_t regionCount,
+ const VkBufferImageCopy *pRegions)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ TU_FROM_HANDLE(tu_image, src_image, srcImage);
+ TU_FROM_HANDLE(tu_buffer, dst_buffer, destBuffer);
+
+ meta_copy_image_to_buffer(
+ cmd_buffer, dst_buffer, src_image, srcImageLayout, regionCount, pRegions);
+}
+
+static void
+meta_copy_image(struct tu_cmd_buffer *cmd_buffer,
+ struct tu_image *src_image,
+ VkImageLayout src_image_layout,
+ struct tu_image *dest_image,
+ VkImageLayout dest_image_layout,
+ uint32_t regionCount,
+ const VkImageCopy *pRegions)
+{
+}
+
+void
+tu_CmdCopyImage(VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
+ uint32_t regionCount,
+ const VkImageCopy *pRegions)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ TU_FROM_HANDLE(tu_image, src_image, srcImage);
+ TU_FROM_HANDLE(tu_image, dest_image, destImage);
+
+ meta_copy_image(cmd_buffer,
+ src_image,
+ srcImageLayout,
+ dest_image,
+ destImageLayout,
+ regionCount,
+ pRegions);
+}
diff --git a/src/freedreno/vulkan/tu_meta_resolve.c b/src/freedreno/vulkan/tu_meta_resolve.c
new file mode 100644
index 00000000000..e68d00c9700
--- /dev/null
+++ b/src/freedreno/vulkan/tu_meta_resolve.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include "tu_private.h"
+#include "nir/nir_builder.h"
+#include "vk_format.h"
+
+void
+tu_CmdResolveImage(VkCommandBuffer cmd_buffer_h,
+ VkImage src_image_h,
+ VkImageLayout src_image_layout,
+ VkImage dest_image_h,
+ VkImageLayout dest_image_layout,
+ uint32_t region_count,
+ const VkImageResolve *regions)
+{
+}
diff --git a/src/freedreno/vulkan/tu_pass.c b/src/freedreno/vulkan/tu_pass.c
new file mode 100644
index 00000000000..e918f2a4470
--- /dev/null
+++ b/src/freedreno/vulkan/tu_pass.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright © 2016 Red Hat.
+ * Copyright © 2016 Bas Nieuwenhuizen
+ *
+ * based in part on anv driver which is:
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include "tu_private.h"
+
+#include "vk_util.h"
+
+VkResult
+tu_CreateRenderPass(VkDevice _device,
+ const VkRenderPassCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkRenderPass *pRenderPass)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_render_pass *pass;
+ size_t size;
+ size_t attachments_offset;
+ VkRenderPassMultiviewCreateInfoKHR *multiview_info = NULL;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
+
+ size = sizeof(*pass);
+ size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
+ attachments_offset = size;
+ size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
+
+ pass = vk_alloc2(
+ &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (pass == NULL)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ memset(pass, 0, size);
+ pass->attachment_count = pCreateInfo->attachmentCount;
+ pass->subpass_count = pCreateInfo->subpassCount;
+ pass->attachments = (void *)pass + attachments_offset;
+
+ vk_foreach_struct(ext, pCreateInfo->pNext)
+ {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR:
+ multiview_info = (VkRenderPassMultiviewCreateInfoKHR *)ext;
+ break;
+ default:
+ break;
+ }
+ }
+
+ for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
+ struct tu_render_pass_attachment *att = &pass->attachments[i];
+
+ att->format = pCreateInfo->pAttachments[i].format;
+ att->samples = pCreateInfo->pAttachments[i].samples;
+ att->load_op = pCreateInfo->pAttachments[i].loadOp;
+ att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp;
+ att->initial_layout = pCreateInfo->pAttachments[i].initialLayout;
+ att->final_layout = pCreateInfo->pAttachments[i].finalLayout;
+ // att->store_op = pCreateInfo->pAttachments[i].storeOp;
+ // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
+ }
+ uint32_t subpass_attachment_count = 0;
+ struct tu_subpass_attachment *p;
+ for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
+ const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
+
+ subpass_attachment_count +=
+ desc->inputAttachmentCount + desc->colorAttachmentCount +
+ (desc->pResolveAttachments ? desc->colorAttachmentCount : 0) +
+ (desc->pDepthStencilAttachment != NULL);
+ }
+
+ if (subpass_attachment_count) {
+ pass->subpass_attachments = vk_alloc2(
+ &device->alloc,
+ pAllocator,
+ subpass_attachment_count * sizeof(struct tu_subpass_attachment),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (pass->subpass_attachments == NULL) {
+ vk_free2(&device->alloc, pAllocator, pass);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ } else
+ pass->subpass_attachments = NULL;
+
+ p = pass->subpass_attachments;
+ for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
+ const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
+ uint32_t color_sample_count = 1, depth_sample_count = 1;
+ struct tu_subpass *subpass = &pass->subpasses[i];
+
+ subpass->input_count = desc->inputAttachmentCount;
+ subpass->color_count = desc->colorAttachmentCount;
+ if (multiview_info)
+ subpass->view_mask = multiview_info->pViewMasks[i];
+
+ if (desc->inputAttachmentCount > 0) {
+ subpass->input_attachments = p;
+ p += desc->inputAttachmentCount;
+
+ for (uint32_t j = 0; j < desc->inputAttachmentCount; j++) {
+ subpass->input_attachments[j] = (struct tu_subpass_attachment){
+ .attachment = desc->pInputAttachments[j].attachment,
+ .layout = desc->pInputAttachments[j].layout,
+ };
+ if (desc->pInputAttachments[j].attachment != VK_ATTACHMENT_UNUSED)
+ pass->attachments[desc->pInputAttachments[j].attachment]
+ .view_mask |= subpass->view_mask;
+ }
+ }
+
+ if (desc->colorAttachmentCount > 0) {
+ subpass->color_attachments = p;
+ p += desc->colorAttachmentCount;
+
+ for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
+ subpass->color_attachments[j] = (struct tu_subpass_attachment){
+ .attachment = desc->pColorAttachments[j].attachment,
+ .layout = desc->pColorAttachments[j].layout,
+ };
+ if (desc->pColorAttachments[j].attachment != VK_ATTACHMENT_UNUSED) {
+ pass->attachments[desc->pColorAttachments[j].attachment]
+ .view_mask |= subpass->view_mask;
+ color_sample_count =
+ pCreateInfo
+ ->pAttachments[desc->pColorAttachments[j].attachment]
+ .samples;
+ }
+ }
+ }
+
+ subpass->has_resolve = false;
+ if (desc->pResolveAttachments) {
+ subpass->resolve_attachments = p;
+ p += desc->colorAttachmentCount;
+
+ for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
+ uint32_t a = desc->pResolveAttachments[j].attachment;
+ subpass->resolve_attachments[j] = (struct tu_subpass_attachment){
+ .attachment = desc->pResolveAttachments[j].attachment,
+ .layout = desc->pResolveAttachments[j].layout,
+ };
+ if (a != VK_ATTACHMENT_UNUSED) {
+ subpass->has_resolve = true;
+ pass->attachments[desc->pResolveAttachments[j].attachment]
+ .view_mask |= subpass->view_mask;
+ }
+ }
+ }
+
+ if (desc->pDepthStencilAttachment) {
+ subpass->depth_stencil_attachment = (struct tu_subpass_attachment){
+ .attachment = desc->pDepthStencilAttachment->attachment,
+ .layout = desc->pDepthStencilAttachment->layout,
+ };
+ if (desc->pDepthStencilAttachment->attachment !=
+ VK_ATTACHMENT_UNUSED) {
+ pass->attachments[desc->pDepthStencilAttachment->attachment]
+ .view_mask |= subpass->view_mask;
+ depth_sample_count =
+ pCreateInfo
+ ->pAttachments[desc->pDepthStencilAttachment->attachment]
+ .samples;
+ }
+ } else {
+ subpass->depth_stencil_attachment.attachment = VK_ATTACHMENT_UNUSED;
+ }
+
+ subpass->max_sample_count = MAX2(color_sample_count, depth_sample_count);
+ }
+
+ for (unsigned i = 0; i < pCreateInfo->dependencyCount; ++i) {
+ uint32_t dst = pCreateInfo->pDependencies[i].dstSubpass;
+ if (dst == VK_SUBPASS_EXTERNAL) {
+ pass->end_barrier.src_stage_mask =
+ pCreateInfo->pDependencies[i].srcStageMask;
+ pass->end_barrier.src_access_mask =
+ pCreateInfo->pDependencies[i].srcAccessMask;
+ pass->end_barrier.dst_access_mask =
+ pCreateInfo->pDependencies[i].dstAccessMask;
+ } else {
+ pass->subpasses[dst].start_barrier.src_stage_mask =
+ pCreateInfo->pDependencies[i].srcStageMask;
+ pass->subpasses[dst].start_barrier.src_access_mask =
+ pCreateInfo->pDependencies[i].srcAccessMask;
+ pass->subpasses[dst].start_barrier.dst_access_mask =
+ pCreateInfo->pDependencies[i].dstAccessMask;
+ }
+ }
+
+ *pRenderPass = tu_render_pass_to_handle(pass);
+
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_CreateRenderPass2KHR(VkDevice _device,
+ const VkRenderPassCreateInfo2KHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkRenderPass *pRenderPass)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_render_pass *pass;
+ size_t size;
+ size_t attachments_offset;
+
+ assert(pCreateInfo->sType ==
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR);
+
+ size = sizeof(*pass);
+ size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
+ attachments_offset = size;
+ size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
+
+ pass = vk_alloc2(
+ &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (pass == NULL)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ memset(pass, 0, size);
+ pass->attachment_count = pCreateInfo->attachmentCount;
+ pass->subpass_count = pCreateInfo->subpassCount;
+ pass->attachments = (void *)pass + attachments_offset;
+
+ for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
+ struct tu_render_pass_attachment *att = &pass->attachments[i];
+
+ att->format = pCreateInfo->pAttachments[i].format;
+ att->samples = pCreateInfo->pAttachments[i].samples;
+ att->load_op = pCreateInfo->pAttachments[i].loadOp;
+ att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp;
+ att->initial_layout = pCreateInfo->pAttachments[i].initialLayout;
+ att->final_layout = pCreateInfo->pAttachments[i].finalLayout;
+ // att->store_op = pCreateInfo->pAttachments[i].storeOp;
+ // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
+ }
+ uint32_t subpass_attachment_count = 0;
+ struct tu_subpass_attachment *p;
+ for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
+ const VkSubpassDescription2KHR *desc = &pCreateInfo->pSubpasses[i];
+
+ subpass_attachment_count +=
+ desc->inputAttachmentCount + desc->colorAttachmentCount +
+ (desc->pResolveAttachments ? desc->colorAttachmentCount : 0) +
+ (desc->pDepthStencilAttachment != NULL);
+ }
+
+ if (subpass_attachment_count) {
+ pass->subpass_attachments = vk_alloc2(
+ &device->alloc,
+ pAllocator,
+ subpass_attachment_count * sizeof(struct tu_subpass_attachment),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (pass->subpass_attachments == NULL) {
+ vk_free2(&device->alloc, pAllocator, pass);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ } else
+ pass->subpass_attachments = NULL;
+
+ p = pass->subpass_attachments;
+ for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
+ const VkSubpassDescription2KHR *desc = &pCreateInfo->pSubpasses[i];
+ uint32_t color_sample_count = 1, depth_sample_count = 1;
+ struct tu_subpass *subpass = &pass->subpasses[i];
+
+ subpass->input_count = desc->inputAttachmentCount;
+ subpass->color_count = desc->colorAttachmentCount;
+ subpass->view_mask = desc->viewMask;
+
+ if (desc->inputAttachmentCount > 0) {
+ subpass->input_attachments = p;
+ p += desc->inputAttachmentCount;
+
+ for (uint32_t j = 0; j < desc->inputAttachmentCount; j++) {
+ subpass->input_attachments[j] = (struct tu_subpass_attachment){
+ .attachment = desc->pInputAttachments[j].attachment,
+ .layout = desc->pInputAttachments[j].layout,
+ };
+ if (desc->pInputAttachments[j].attachment != VK_ATTACHMENT_UNUSED)
+ pass->attachments[desc->pInputAttachments[j].attachment]
+ .view_mask |= subpass->view_mask;
+ }
+ }
+
+ if (desc->colorAttachmentCount > 0) {
+ subpass->color_attachments = p;
+ p += desc->colorAttachmentCount;
+
+ for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
+ subpass->color_attachments[j] = (struct tu_subpass_attachment){
+ .attachment = desc->pColorAttachments[j].attachment,
+ .layout = desc->pColorAttachments[j].layout,
+ };
+ if (desc->pColorAttachments[j].attachment != VK_ATTACHMENT_UNUSED) {
+ pass->attachments[desc->pColorAttachments[j].attachment]
+ .view_mask |= subpass->view_mask;
+ color_sample_count =
+ pCreateInfo
+ ->pAttachments[desc->pColorAttachments[j].attachment]
+ .samples;
+ }
+ }
+ }
+
+ subpass->has_resolve = false;
+ if (desc->pResolveAttachments) {
+ subpass->resolve_attachments = p;
+ p += desc->colorAttachmentCount;
+
+ for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
+ uint32_t a = desc->pResolveAttachments[j].attachment;
+ subpass->resolve_attachments[j] = (struct tu_subpass_attachment){
+ .attachment = desc->pResolveAttachments[j].attachment,
+ .layout = desc->pResolveAttachments[j].layout,
+ };
+ if (a != VK_ATTACHMENT_UNUSED) {
+ subpass->has_resolve = true;
+ pass->attachments[desc->pResolveAttachments[j].attachment]
+ .view_mask |= subpass->view_mask;
+ }
+ }
+ }
+
+ if (desc->pDepthStencilAttachment) {
+ subpass->depth_stencil_attachment = (struct tu_subpass_attachment){
+ .attachment = desc->pDepthStencilAttachment->attachment,
+ .layout = desc->pDepthStencilAttachment->layout,
+ };
+ if (desc->pDepthStencilAttachment->attachment !=
+ VK_ATTACHMENT_UNUSED) {
+ pass->attachments[desc->pDepthStencilAttachment->attachment]
+ .view_mask |= subpass->view_mask;
+ depth_sample_count =
+ pCreateInfo
+ ->pAttachments[desc->pDepthStencilAttachment->attachment]
+ .samples;
+ }
+ } else {
+ subpass->depth_stencil_attachment.attachment = VK_ATTACHMENT_UNUSED;
+ }
+
+ subpass->max_sample_count = MAX2(color_sample_count, depth_sample_count);
+ }
+
+ for (unsigned i = 0; i < pCreateInfo->dependencyCount; ++i) {
+ uint32_t dst = pCreateInfo->pDependencies[i].dstSubpass;
+ if (dst == VK_SUBPASS_EXTERNAL) {
+ pass->end_barrier.src_stage_mask =
+ pCreateInfo->pDependencies[i].srcStageMask;
+ pass->end_barrier.src_access_mask =
+ pCreateInfo->pDependencies[i].srcAccessMask;
+ pass->end_barrier.dst_access_mask =
+ pCreateInfo->pDependencies[i].dstAccessMask;
+ } else {
+ pass->subpasses[dst].start_barrier.src_stage_mask =
+ pCreateInfo->pDependencies[i].srcStageMask;
+ pass->subpasses[dst].start_barrier.src_access_mask =
+ pCreateInfo->pDependencies[i].srcAccessMask;
+ pass->subpasses[dst].start_barrier.dst_access_mask =
+ pCreateInfo->pDependencies[i].dstAccessMask;
+ }
+ }
+
+ *pRenderPass = tu_render_pass_to_handle(pass);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyRenderPass(VkDevice _device,
+ VkRenderPass _pass,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_render_pass, pass, _pass);
+
+ if (!_pass)
+ return;
+ vk_free2(&device->alloc, pAllocator, pass->subpass_attachments);
+ vk_free2(&device->alloc, pAllocator, pass);
+}
+
+void
+tu_GetRenderAreaGranularity(VkDevice device,
+ VkRenderPass renderPass,
+ VkExtent2D *pGranularity)
+{
+ pGranularity->width = 1;
+ pGranularity->height = 1;
+}
diff --git a/src/freedreno/vulkan/tu_pipeline.c b/src/freedreno/vulkan/tu_pipeline.c
new file mode 100644
index 00000000000..d6713688ce4
--- /dev/null
+++ b/src/freedreno/vulkan/tu_pipeline.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright © 2016 Red Hat.
+ * Copyright © 2016 Bas Nieuwenhuizen
+ *
+ * based in part on anv driver which is:
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "tu_private.h"
+#include "nir/nir.h"
+#include "nir/nir_builder.h"
+#include "spirv/nir_spirv.h"
+#include "util/mesa-sha1.h"
+#include "util/u_atomic.h"
+#include "vk_util.h"
+
+#include "main/menums.h"
+#include "util/debug.h"
+#include "vk_format.h"
+
+VkResult
+tu_graphics_pipeline_create(
+ VkDevice _device,
+ VkPipelineCache _cache,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo,
+ const struct tu_graphics_pipeline_create_info *extra,
+ const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipeline)
+{
+
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_CreateGraphicsPipelines(VkDevice _device,
+ VkPipelineCache pipelineCache,
+ uint32_t count,
+ const VkGraphicsPipelineCreateInfo *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipelines)
+{
+ VkResult result = VK_SUCCESS;
+ unsigned i = 0;
+
+ for (; i < count; i++) {
+ VkResult r;
+ r = tu_graphics_pipeline_create(_device,
+ pipelineCache,
+ &pCreateInfos[i],
+ NULL,
+ pAllocator,
+ &pPipelines[i]);
+ if (r != VK_SUCCESS) {
+ result = r;
+ pPipelines[i] = VK_NULL_HANDLE;
+ }
+ }
+
+ return result;
+}
+
+static VkResult
+tu_compute_pipeline_create(VkDevice _device,
+ VkPipelineCache _cache,
+ const VkComputePipelineCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipeline)
+{
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_CreateComputePipelines(VkDevice _device,
+ VkPipelineCache pipelineCache,
+ uint32_t count,
+ const VkComputePipelineCreateInfo *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipelines)
+{
+ VkResult result = VK_SUCCESS;
+
+ unsigned i = 0;
+ for (; i < count; i++) {
+ VkResult r;
+ r = tu_compute_pipeline_create(
+ _device, pipelineCache, &pCreateInfos[i], pAllocator, &pPipelines[i]);
+ if (r != VK_SUCCESS) {
+ result = r;
+ pPipelines[i] = VK_NULL_HANDLE;
+ }
+ }
+
+ return result;
+}
diff --git a/src/freedreno/vulkan/tu_pipeline_cache.c b/src/freedreno/vulkan/tu_pipeline_cache.c
new file mode 100644
index 00000000000..32470b75aa0
--- /dev/null
+++ b/src/freedreno/vulkan/tu_pipeline_cache.c
@@ -0,0 +1,424 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "tu_private.h"
+#include "util/debug.h"
+#include "util/disk_cache.h"
+#include "util/mesa-sha1.h"
+#include "util/u_atomic.h"
+
+struct cache_entry_variant_info
+{
+};
+
+struct cache_entry
+{
+ union
+ {
+ unsigned char sha1[20];
+ uint32_t sha1_dw[5];
+ };
+ uint32_t code_sizes[MESA_SHADER_STAGES];
+ struct tu_shader_variant *variants[MESA_SHADER_STAGES];
+ char code[0];
+};
+
+void
+tu_pipeline_cache_init(struct tu_pipeline_cache *cache,
+ struct tu_device *device)
+{
+ cache->device = device;
+ pthread_mutex_init(&cache->mutex, NULL);
+
+ cache->modified = false;
+ cache->kernel_count = 0;
+ cache->total_size = 0;
+ cache->table_size = 1024;
+ const size_t byte_size = cache->table_size * sizeof(cache->hash_table[0]);
+ cache->hash_table = malloc(byte_size);
+
+ /* We don't consider allocation failure fatal, we just start with a 0-sized
+ * cache. Disable caching when we want to keep shader debug info, since
+ * we don't get the debug info on cached shaders. */
+ if (cache->hash_table == NULL)
+ cache->table_size = 0;
+ else
+ memset(cache->hash_table, 0, byte_size);
+}
+
+void
+tu_pipeline_cache_finish(struct tu_pipeline_cache *cache)
+{
+ for (unsigned i = 0; i < cache->table_size; ++i)
+ if (cache->hash_table[i]) {
+ vk_free(&cache->alloc, cache->hash_table[i]);
+ }
+ pthread_mutex_destroy(&cache->mutex);
+ free(cache->hash_table);
+}
+
+static uint32_t
+entry_size(struct cache_entry *entry)
+{
+ size_t ret = sizeof(*entry);
+ for (int i = 0; i < MESA_SHADER_STAGES; ++i)
+ if (entry->code_sizes[i])
+ ret += sizeof(struct cache_entry_variant_info) + entry->code_sizes[i];
+ return ret;
+}
+
+void
+tu_hash_shaders(unsigned char *hash,
+ const VkPipelineShaderStageCreateInfo **stages,
+ const struct tu_pipeline_layout *layout,
+ const struct tu_pipeline_key *key,
+ uint32_t flags)
+{
+ struct mesa_sha1 ctx;
+
+ _mesa_sha1_init(&ctx);
+ if (key)
+ _mesa_sha1_update(&ctx, key, sizeof(*key));
+ if (layout)
+ _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
+
+ for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
+ if (stages[i]) {
+ TU_FROM_HANDLE(tu_shader_module, module, stages[i]->module);
+ const VkSpecializationInfo *spec_info = stages[i]->pSpecializationInfo;
+
+ _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
+ _mesa_sha1_update(&ctx, stages[i]->pName, strlen(stages[i]->pName));
+ if (spec_info) {
+ _mesa_sha1_update(&ctx,
+ spec_info->pMapEntries,
+ spec_info->mapEntryCount *
+ sizeof spec_info->pMapEntries[0]);
+ _mesa_sha1_update(&ctx, spec_info->pData, spec_info->dataSize);
+ }
+ }
+ }
+ _mesa_sha1_update(&ctx, &flags, 4);
+ _mesa_sha1_final(&ctx, hash);
+}
+
+static struct cache_entry *
+tu_pipeline_cache_search_unlocked(struct tu_pipeline_cache *cache,
+ const unsigned char *sha1)
+{
+ const uint32_t mask = cache->table_size - 1;
+ const uint32_t start = (*(uint32_t *)sha1);
+
+ if (cache->table_size == 0)
+ return NULL;
+
+ for (uint32_t i = 0; i < cache->table_size; i++) {
+ const uint32_t index = (start + i) & mask;
+ struct cache_entry *entry = cache->hash_table[index];
+
+ if (!entry)
+ return NULL;
+
+ if (memcmp(entry->sha1, sha1, sizeof(entry->sha1)) == 0) {
+ return entry;
+ }
+ }
+
+ unreachable("hash table should never be full");
+}
+
+static struct cache_entry *
+tu_pipeline_cache_search(struct tu_pipeline_cache *cache,
+ const unsigned char *sha1)
+{
+ struct cache_entry *entry;
+
+ pthread_mutex_lock(&cache->mutex);
+
+ entry = tu_pipeline_cache_search_unlocked(cache, sha1);
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ return entry;
+}
+
+static void
+tu_pipeline_cache_set_entry(struct tu_pipeline_cache *cache,
+ struct cache_entry *entry)
+{
+ const uint32_t mask = cache->table_size - 1;
+ const uint32_t start = entry->sha1_dw[0];
+
+ /* We'll always be able to insert when we get here. */
+ assert(cache->kernel_count < cache->table_size / 2);
+
+ for (uint32_t i = 0; i < cache->table_size; i++) {
+ const uint32_t index = (start + i) & mask;
+ if (!cache->hash_table[index]) {
+ cache->hash_table[index] = entry;
+ break;
+ }
+ }
+
+ cache->total_size += entry_size(entry);
+ cache->kernel_count++;
+}
+
+static VkResult
+tu_pipeline_cache_grow(struct tu_pipeline_cache *cache)
+{
+ const uint32_t table_size = cache->table_size * 2;
+ const uint32_t old_table_size = cache->table_size;
+ const size_t byte_size = table_size * sizeof(cache->hash_table[0]);
+ struct cache_entry **table;
+ struct cache_entry **old_table = cache->hash_table;
+
+ table = malloc(byte_size);
+ if (table == NULL)
+ return vk_error(cache->device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ cache->hash_table = table;
+ cache->table_size = table_size;
+ cache->kernel_count = 0;
+ cache->total_size = 0;
+
+ memset(cache->hash_table, 0, byte_size);
+ for (uint32_t i = 0; i < old_table_size; i++) {
+ struct cache_entry *entry = old_table[i];
+ if (!entry)
+ continue;
+
+ tu_pipeline_cache_set_entry(cache, entry);
+ }
+
+ free(old_table);
+
+ return VK_SUCCESS;
+}
+
+static void
+tu_pipeline_cache_add_entry(struct tu_pipeline_cache *cache,
+ struct cache_entry *entry)
+{
+ if (cache->kernel_count == cache->table_size / 2)
+ tu_pipeline_cache_grow(cache);
+
+ /* Failing to grow that hash table isn't fatal, but may mean we don't
+ * have enough space to add this new kernel. Only add it if there's room.
+ */
+ if (cache->kernel_count < cache->table_size / 2)
+ tu_pipeline_cache_set_entry(cache, entry);
+}
+
+struct cache_header
+{
+ uint32_t header_size;
+ uint32_t header_version;
+ uint32_t vendor_id;
+ uint32_t device_id;
+ uint8_t uuid[VK_UUID_SIZE];
+};
+
+void
+tu_pipeline_cache_load(struct tu_pipeline_cache *cache,
+ const void *data,
+ size_t size)
+{
+ struct tu_device *device = cache->device;
+ struct cache_header header;
+
+ if (size < sizeof(header))
+ return;
+ memcpy(&header, data, sizeof(header));
+ if (header.header_size < sizeof(header))
+ return;
+ if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
+ return;
+ if (header.vendor_id != 0 /* TODO */)
+ return;
+ if (header.device_id != 0 /* TODO */)
+ return;
+ if (memcmp(header.uuid, device->physical_device->cache_uuid, VK_UUID_SIZE) !=
+ 0)
+ return;
+
+ char *end = (void *)data + size;
+ char *p = (void *)data + header.header_size;
+
+ while (end - p >= sizeof(struct cache_entry)) {
+ struct cache_entry *entry = (struct cache_entry *)p;
+ struct cache_entry *dest_entry;
+ size_t size = entry_size(entry);
+ if (end - p < size)
+ break;
+
+ dest_entry =
+ vk_alloc(&cache->alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
+ if (dest_entry) {
+ memcpy(dest_entry, entry, size);
+ for (int i = 0; i < MESA_SHADER_STAGES; ++i)
+ dest_entry->variants[i] = NULL;
+ tu_pipeline_cache_add_entry(cache, dest_entry);
+ }
+ p += size;
+ }
+}
+
+VkResult
+tu_CreatePipelineCache(VkDevice _device,
+ const VkPipelineCacheCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkPipelineCache *pPipelineCache)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_pipeline_cache *cache;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
+ assert(pCreateInfo->flags == 0);
+
+ cache = vk_alloc2(&device->alloc,
+ pAllocator,
+ sizeof(*cache),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (cache == NULL)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ if (pAllocator)
+ cache->alloc = *pAllocator;
+ else
+ cache->alloc = device->alloc;
+
+ tu_pipeline_cache_init(cache, device);
+
+ if (pCreateInfo->initialDataSize > 0) {
+ tu_pipeline_cache_load(
+ cache, pCreateInfo->pInitialData, pCreateInfo->initialDataSize);
+ }
+
+ *pPipelineCache = tu_pipeline_cache_to_handle(cache);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyPipelineCache(VkDevice _device,
+ VkPipelineCache _cache,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_pipeline_cache, cache, _cache);
+
+ if (!cache)
+ return;
+ tu_pipeline_cache_finish(cache);
+
+ vk_free2(&device->alloc, pAllocator, cache);
+}
+
+VkResult
+tu_GetPipelineCacheData(VkDevice _device,
+ VkPipelineCache _cache,
+ size_t *pDataSize,
+ void *pData)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_pipeline_cache, cache, _cache);
+ struct cache_header *header;
+ VkResult result = VK_SUCCESS;
+
+ pthread_mutex_lock(&cache->mutex);
+
+ const size_t size = sizeof(*header) + cache->total_size;
+ if (pData == NULL) {
+ pthread_mutex_unlock(&cache->mutex);
+ *pDataSize = size;
+ return VK_SUCCESS;
+ }
+ if (*pDataSize < sizeof(*header)) {
+ pthread_mutex_unlock(&cache->mutex);
+ *pDataSize = 0;
+ return VK_INCOMPLETE;
+ }
+ void *p = pData, *end = pData + *pDataSize;
+ header = p;
+ header->header_size = sizeof(*header);
+ header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
+ header->vendor_id = 0 /* TODO */;
+ header->device_id = 0 /* TODO */;
+ memcpy(header->uuid, device->physical_device->cache_uuid, VK_UUID_SIZE);
+ p += header->header_size;
+
+ struct cache_entry *entry;
+ for (uint32_t i = 0; i < cache->table_size; i++) {
+ if (!cache->hash_table[i])
+ continue;
+ entry = cache->hash_table[i];
+ const uint32_t size = entry_size(entry);
+ if (end < p + size) {
+ result = VK_INCOMPLETE;
+ break;
+ }
+
+ memcpy(p, entry, size);
+ for (int j = 0; j < MESA_SHADER_STAGES; ++j)
+ ((struct cache_entry *)p)->variants[j] = NULL;
+ p += size;
+ }
+ *pDataSize = p - pData;
+
+ pthread_mutex_unlock(&cache->mutex);
+ return result;
+}
+
+static void
+tu_pipeline_cache_merge(struct tu_pipeline_cache *dst,
+ struct tu_pipeline_cache *src)
+{
+ for (uint32_t i = 0; i < src->table_size; i++) {
+ struct cache_entry *entry = src->hash_table[i];
+ if (!entry || tu_pipeline_cache_search(dst, entry->sha1))
+ continue;
+
+ tu_pipeline_cache_add_entry(dst, entry);
+
+ src->hash_table[i] = NULL;
+ }
+}
+
+VkResult
+tu_MergePipelineCaches(VkDevice _device,
+ VkPipelineCache destCache,
+ uint32_t srcCacheCount,
+ const VkPipelineCache *pSrcCaches)
+{
+ TU_FROM_HANDLE(tu_pipeline_cache, dst, destCache);
+
+ for (uint32_t i = 0; i < srcCacheCount; i++) {
+ TU_FROM_HANDLE(tu_pipeline_cache, src, pSrcCaches[i]);
+
+ tu_pipeline_cache_merge(dst, src);
+ }
+
+ return VK_SUCCESS;
+}
diff --git a/src/freedreno/vulkan/tu_private.h b/src/freedreno/vulkan/tu_private.h
new file mode 100644
index 00000000000..2f035951956
--- /dev/null
+++ b/src/freedreno/vulkan/tu_private.h
@@ -0,0 +1,1221 @@
+/*
+ * Copyright © 2016 Red Hat.
+ * Copyright © 2016 Bas Nieuwenhuizen
+ *
+ * based in part on anv driver which is:
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef TU_PRIVATE_H
+#define TU_PRIVATE_H
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef HAVE_VALGRIND
+#include <memcheck.h>
+#include <valgrind.h>
+#define VG(x) x
+#else
+#define VG(x)
+#endif
+
+#include "c11/threads.h"
+#include "compiler/shader_enums.h"
+#include "main/macros.h"
+#include "util/list.h"
+#include "util/macros.h"
+#include "vk_alloc.h"
+#include "vk_debug_report.h"
+
+#include "tu_descriptor_set.h"
+#include "tu_extensions.h"
+
+/* Pre-declarations needed for WSI entrypoints */
+struct wl_surface;
+struct wl_display;
+typedef struct xcb_connection_t xcb_connection_t;
+typedef uint32_t xcb_visualid_t;
+typedef uint32_t xcb_window_t;
+
+#include <vulkan/vk_android_native_buffer.h>
+#include <vulkan/vk_icd.h>
+#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_intel.h>
+
+#include "tu_entrypoints.h"
+
+#define MAX_VBS 32
+#define MAX_VERTEX_ATTRIBS 32
+#define MAX_RTS 8
+#define MAX_VIEWPORTS 16
+#define MAX_SCISSORS 16
+#define MAX_DISCARD_RECTANGLES 4
+#define MAX_PUSH_CONSTANTS_SIZE 128
+#define MAX_PUSH_DESCRIPTORS 32
+#define MAX_DYNAMIC_UNIFORM_BUFFERS 16
+#define MAX_DYNAMIC_STORAGE_BUFFERS 8
+#define MAX_DYNAMIC_BUFFERS \
+ (MAX_DYNAMIC_UNIFORM_BUFFERS + MAX_DYNAMIC_STORAGE_BUFFERS)
+#define MAX_SAMPLES_LOG2 4
+#define NUM_META_FS_KEYS 13
+#define TU_MAX_DRM_DEVICES 8
+#define MAX_VIEWS 8
+
+#define NUM_DEPTH_CLEAR_PIPELINES 3
+
+/*
+ * This is the point we switch from using CP to compute shader
+ * for certain buffer operations.
+ */
+#define TU_BUFFER_OPS_CS_THRESHOLD 4096
+
+enum tu_mem_heap
+{
+ TU_MEM_HEAP_VRAM,
+ TU_MEM_HEAP_VRAM_CPU_ACCESS,
+ TU_MEM_HEAP_GTT,
+ TU_MEM_HEAP_COUNT
+};
+
+enum tu_mem_type
+{
+ TU_MEM_TYPE_VRAM,
+ TU_MEM_TYPE_GTT_WRITE_COMBINE,
+ TU_MEM_TYPE_VRAM_CPU_ACCESS,
+ TU_MEM_TYPE_GTT_CACHED,
+ TU_MEM_TYPE_COUNT
+};
+
+#define tu_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
+
+static inline uint32_t
+align_u32(uint32_t v, uint32_t a)
+{
+ assert(a != 0 && a == (a & -a));
+ return (v + a - 1) & ~(a - 1);
+}
+
+static inline uint32_t
+align_u32_npot(uint32_t v, uint32_t a)
+{
+ return (v + a - 1) / a * a;
+}
+
+static inline uint64_t
+align_u64(uint64_t v, uint64_t a)
+{
+ assert(a != 0 && a == (a & -a));
+ return (v + a - 1) & ~(a - 1);
+}
+
+static inline int32_t
+align_i32(int32_t v, int32_t a)
+{
+ assert(a != 0 && a == (a & -a));
+ return (v + a - 1) & ~(a - 1);
+}
+
+/** Alignment must be a power of 2. */
+static inline bool
+tu_is_aligned(uintmax_t n, uintmax_t a)
+{
+ assert(a == (a & -a));
+ return (n & (a - 1)) == 0;
+}
+
+static inline uint32_t
+round_up_u32(uint32_t v, uint32_t a)
+{
+ return (v + a - 1) / a;
+}
+
+static inline uint64_t
+round_up_u64(uint64_t v, uint64_t a)
+{
+ return (v + a - 1) / a;
+}
+
+static inline uint32_t
+tu_minify(uint32_t n, uint32_t levels)
+{
+ if (unlikely(n == 0))
+ return 0;
+ else
+ return MAX2(n >> levels, 1);
+}
+static inline float
+tu_clamp_f(float f, float min, float max)
+{
+ assert(min < max);
+
+ if (f > max)
+ return max;
+ else if (f < min)
+ return min;
+ else
+ return f;
+}
+
+static inline bool
+tu_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
+{
+ if (*inout_mask & clear_mask) {
+ *inout_mask &= ~clear_mask;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+#define for_each_bit(b, dword) \
+ for (uint32_t __dword = (dword); (b) = __builtin_ffs(__dword) - 1, __dword; \
+ __dword &= ~(1 << (b)))
+
+#define typed_memcpy(dest, src, count) \
+ ({ \
+ STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
+ memcpy((dest), (src), (count) * sizeof(*(src))); \
+ })
+
+/* Whenever we generate an error, pass it through this function. Useful for
+ * debugging, where we can break on it. Only call at error site, not when
+ * propagating errors. Might be useful to plug in a stack trace here.
+ */
+
+struct tu_instance;
+
+VkResult
+__vk_errorf(struct tu_instance *instance,
+ VkResult error,
+ const char *file,
+ int line,
+ const char *format,
+ ...);
+
+#define vk_error(instance, error) \
+ __vk_errorf(instance, error, __FILE__, __LINE__, NULL);
+#define vk_errorf(instance, error, format, ...) \
+ __vk_errorf(instance, error, __FILE__, __LINE__, format, ##__VA_ARGS__);
+
+void
+__tu_finishme(const char *file, int line, const char *format, ...)
+ tu_printflike(3, 4);
+void
+tu_loge(const char *format, ...) tu_printflike(1, 2);
+void
+tu_loge_v(const char *format, va_list va);
+void
+tu_logi(const char *format, ...) tu_printflike(1, 2);
+void
+tu_logi_v(const char *format, va_list va);
+
+/**
+ * Print a FINISHME message, including its source location.
+ */
+#define tu_finishme(format, ...) \
+ do { \
+ static bool reported = false; \
+ if (!reported) { \
+ __tu_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
+ reported = true; \
+ } \
+ } while (0)
+
+/* A non-fatal assert. Useful for debugging. */
+#ifdef DEBUG
+#define tu_assert(x) \
+ ({ \
+ if (unlikely(!(x))) \
+ fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
+ })
+#else
+#define tu_assert(x)
+#endif
+
+#define stub_return(v) \
+ do { \
+ tu_finishme("stub %s", __func__); \
+ return (v); \
+ } while (0)
+
+#define stub() \
+ do { \
+ tu_finishme("stub %s", __func__); \
+ return; \
+ } while (0)
+
+void *
+tu_lookup_entrypoint_unchecked(const char *name);
+void *
+tu_lookup_entrypoint_checked(
+ const char *name,
+ uint32_t core_version,
+ const struct tu_instance_extension_table *instance,
+ const struct tu_device_extension_table *device);
+
+struct tu_physical_device
+{
+ VK_LOADER_DATA _loader_data;
+
+ struct tu_instance *instance;
+
+ char path[20];
+ char name[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE];
+ uint8_t driver_uuid[VK_UUID_SIZE];
+ uint8_t device_uuid[VK_UUID_SIZE];
+ uint8_t cache_uuid[VK_UUID_SIZE];
+
+ int local_fd;
+ int master_fd;
+
+ /* This is the drivers on-disk cache used as a fallback as opposed to
+ * the pipeline cache defined by apps.
+ */
+ struct disk_cache *disk_cache;
+
+ struct tu_device_extension_table supported_extensions;
+};
+
+enum tu_debug_flags
+{
+ TU_DEBUG_STARTUP = 1 << 0,
+};
+
+struct tu_instance
+{
+ VK_LOADER_DATA _loader_data;
+
+ VkAllocationCallbacks alloc;
+
+ uint32_t api_version;
+ int physical_device_count;
+ struct tu_physical_device physical_devices[TU_MAX_DRM_DEVICES];
+
+ enum tu_debug_flags debug_flags;
+
+ struct vk_debug_report_instance debug_report_callbacks;
+
+ struct tu_instance_extension_table enabled_extensions;
+};
+
+bool
+tu_instance_extension_supported(const char *name);
+uint32_t
+tu_physical_device_api_version(struct tu_physical_device *dev);
+bool
+tu_physical_device_extension_supported(struct tu_physical_device *dev,
+ const char *name);
+
+struct cache_entry;
+
+struct tu_pipeline_cache
+{
+ struct tu_device *device;
+ pthread_mutex_t mutex;
+
+ uint32_t total_size;
+ uint32_t table_size;
+ uint32_t kernel_count;
+ struct cache_entry **hash_table;
+ bool modified;
+
+ VkAllocationCallbacks alloc;
+};
+
+struct tu_pipeline_key
+{
+};
+
+void
+tu_pipeline_cache_init(struct tu_pipeline_cache *cache,
+ struct tu_device *device);
+void
+tu_pipeline_cache_finish(struct tu_pipeline_cache *cache);
+void
+tu_pipeline_cache_load(struct tu_pipeline_cache *cache,
+ const void *data,
+ size_t size);
+
+struct tu_shader_variant;
+
+bool
+tu_create_shader_variants_from_pipeline_cache(
+ struct tu_device *device,
+ struct tu_pipeline_cache *cache,
+ const unsigned char *sha1,
+ struct tu_shader_variant **variants);
+
+void
+tu_pipeline_cache_insert_shaders(struct tu_device *device,
+ struct tu_pipeline_cache *cache,
+ const unsigned char *sha1,
+ struct tu_shader_variant **variants,
+ const void *const *codes,
+ const unsigned *code_sizes);
+
+struct tu_meta_state
+{
+ VkAllocationCallbacks alloc;
+
+ struct tu_pipeline_cache cache;
+};
+
+/* queue types */
+#define TU_QUEUE_GENERAL 0
+
+#define TU_MAX_QUEUE_FAMILIES 1
+
+struct tu_queue
+{
+ VK_LOADER_DATA _loader_data;
+ struct tu_device *device;
+ uint32_t queue_family_index;
+ int queue_idx;
+ VkDeviceQueueCreateFlags flags;
+};
+
+struct tu_bo_list
+{
+ unsigned capacity;
+ pthread_mutex_t mutex;
+};
+
+struct tu_device
+{
+ VK_LOADER_DATA _loader_data;
+
+ VkAllocationCallbacks alloc;
+
+ struct tu_instance *instance;
+ struct radeon_winsys *ws;
+
+ struct tu_meta_state meta_state;
+
+ struct tu_queue *queues[TU_MAX_QUEUE_FAMILIES];
+ int queue_count[TU_MAX_QUEUE_FAMILIES];
+
+ struct tu_physical_device *physical_device;
+
+ /* Backup in-memory cache to be used if the app doesn't provide one */
+ struct tu_pipeline_cache *mem_cache;
+
+ struct list_head shader_slabs;
+ mtx_t shader_slab_mutex;
+
+ struct tu_device_extension_table enabled_extensions;
+
+ /* Whether the driver uses a global BO list. */
+ bool use_global_bo_list;
+
+ struct tu_bo_list bo_list;
+};
+
+struct tu_device_memory
+{
+
+ /* for dedicated allocations */
+ struct tu_image *image;
+ struct tu_buffer *buffer;
+ uint32_t type_index;
+ VkDeviceSize map_size;
+ void *map;
+ void *user_ptr;
+};
+
+struct tu_descriptor_range
+{
+ uint64_t va;
+ uint32_t size;
+};
+
+struct tu_descriptor_set
+{
+ const struct tu_descriptor_set_layout *layout;
+ uint32_t size;
+
+ struct radeon_winsys_bo *bo;
+ uint64_t va;
+ uint32_t *mapped_ptr;
+ struct tu_descriptor_range *dynamic_descriptors;
+};
+
+struct tu_push_descriptor_set
+{
+ struct tu_descriptor_set set;
+ uint32_t capacity;
+};
+
+struct tu_descriptor_pool_entry
+{
+ uint32_t offset;
+ uint32_t size;
+ struct tu_descriptor_set *set;
+};
+
+struct tu_descriptor_pool
+{
+ struct radeon_winsys_bo *bo;
+ uint8_t *mapped_ptr;
+ uint64_t current_offset;
+ uint64_t size;
+
+ uint8_t *host_memory_base;
+ uint8_t *host_memory_ptr;
+ uint8_t *host_memory_end;
+
+ uint32_t entry_count;
+ uint32_t max_entry_count;
+ struct tu_descriptor_pool_entry entries[0];
+};
+
+struct tu_descriptor_update_template_entry
+{
+ VkDescriptorType descriptor_type;
+
+ /* The number of descriptors to update */
+ uint32_t descriptor_count;
+
+ /* Into mapped_ptr or dynamic_descriptors, in units of the respective array
+ */
+ uint32_t dst_offset;
+
+ /* In dwords. Not valid/used for dynamic descriptors */
+ uint32_t dst_stride;
+
+ uint32_t buffer_offset;
+
+ /* Only valid for combined image samplers and samplers */
+ uint16_t has_sampler;
+
+ /* In bytes */
+ size_t src_offset;
+ size_t src_stride;
+
+ /* For push descriptors */
+ const uint32_t *immutable_samplers;
+};
+
+struct tu_descriptor_update_template
+{
+ uint32_t entry_count;
+ VkPipelineBindPoint bind_point;
+ struct tu_descriptor_update_template_entry entry[0];
+};
+
+struct tu_buffer
+{
+ VkDeviceSize size;
+
+ VkBufferUsageFlags usage;
+ VkBufferCreateFlags flags;
+};
+
+enum tu_dynamic_state_bits
+{
+ TU_DYNAMIC_VIEWPORT = 1 << 0,
+ TU_DYNAMIC_SCISSOR = 1 << 1,
+ TU_DYNAMIC_LINE_WIDTH = 1 << 2,
+ TU_DYNAMIC_DEPTH_BIAS = 1 << 3,
+ TU_DYNAMIC_BLEND_CONSTANTS = 1 << 4,
+ TU_DYNAMIC_DEPTH_BOUNDS = 1 << 5,
+ TU_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6,
+ TU_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7,
+ TU_DYNAMIC_STENCIL_REFERENCE = 1 << 8,
+ TU_DYNAMIC_DISCARD_RECTANGLE = 1 << 9,
+ TU_DYNAMIC_ALL = (1 << 10) - 1,
+};
+
+struct tu_vertex_binding
+{
+ struct tu_buffer *buffer;
+ VkDeviceSize offset;
+};
+
+struct tu_viewport_state
+{
+ uint32_t count;
+ VkViewport viewports[MAX_VIEWPORTS];
+};
+
+struct tu_scissor_state
+{
+ uint32_t count;
+ VkRect2D scissors[MAX_SCISSORS];
+};
+
+struct tu_discard_rectangle_state
+{
+ uint32_t count;
+ VkRect2D rectangles[MAX_DISCARD_RECTANGLES];
+};
+
+struct tu_dynamic_state
+{
+ /**
+ * Bitmask of (1 << VK_DYNAMIC_STATE_*).
+ * Defines the set of saved dynamic state.
+ */
+ uint32_t mask;
+
+ struct tu_viewport_state viewport;
+
+ struct tu_scissor_state scissor;
+
+ float line_width;
+
+ struct
+ {
+ float bias;
+ float clamp;
+ float slope;
+ } depth_bias;
+
+ float blend_constants[4];
+
+ struct
+ {
+ float min;
+ float max;
+ } depth_bounds;
+
+ struct
+ {
+ uint32_t front;
+ uint32_t back;
+ } stencil_compare_mask;
+
+ struct
+ {
+ uint32_t front;
+ uint32_t back;
+ } stencil_write_mask;
+
+ struct
+ {
+ uint32_t front;
+ uint32_t back;
+ } stencil_reference;
+
+ struct tu_discard_rectangle_state discard_rectangle;
+};
+
+extern const struct tu_dynamic_state default_dynamic_state;
+
+const char *
+tu_get_debug_option_name(int id);
+
+const char *
+tu_get_perftest_option_name(int id);
+
+/**
+ * Attachment state when recording a renderpass instance.
+ *
+ * The clear value is valid only if there exists a pending clear.
+ */
+struct tu_attachment_state
+{
+ VkImageAspectFlags pending_clear_aspects;
+ uint32_t cleared_views;
+ VkClearValue clear_value;
+ VkImageLayout current_layout;
+};
+
+struct tu_descriptor_state
+{
+ struct tu_descriptor_set *sets[MAX_SETS];
+ uint32_t dirty;
+ uint32_t valid;
+ struct tu_push_descriptor_set push_set;
+ bool push_dirty;
+ uint32_t dynamic_buffers[4 * MAX_DYNAMIC_BUFFERS];
+};
+
+struct tu_cmd_state
+{
+ /* Vertex descriptors */
+ uint64_t vb_va;
+ unsigned vb_size;
+
+ struct tu_dynamic_state dynamic;
+
+ /* Index buffer */
+ struct tu_buffer *index_buffer;
+ uint64_t index_offset;
+ uint32_t index_type;
+ uint32_t max_index_count;
+ uint64_t index_va;
+};
+
+struct tu_cmd_pool
+{
+ VkAllocationCallbacks alloc;
+ struct list_head cmd_buffers;
+ struct list_head free_cmd_buffers;
+ uint32_t queue_family_index;
+};
+
+struct tu_cmd_buffer_upload
+{
+ uint8_t *map;
+ unsigned offset;
+ uint64_t size;
+ struct radeon_winsys_bo *upload_bo;
+ struct list_head list;
+};
+
+enum tu_cmd_buffer_status
+{
+ TU_CMD_BUFFER_STATUS_INVALID,
+ TU_CMD_BUFFER_STATUS_INITIAL,
+ TU_CMD_BUFFER_STATUS_RECORDING,
+ TU_CMD_BUFFER_STATUS_EXECUTABLE,
+ TU_CMD_BUFFER_STATUS_PENDING,
+};
+
+struct tu_cmd_buffer
+{
+ VK_LOADER_DATA _loader_data;
+
+ struct tu_device *device;
+
+ struct tu_cmd_pool *pool;
+ struct list_head pool_link;
+
+ VkCommandBufferUsageFlags usage_flags;
+ VkCommandBufferLevel level;
+ enum tu_cmd_buffer_status status;
+ struct radeon_cmdbuf *cs;
+ struct tu_cmd_state state;
+ struct tu_vertex_binding vertex_bindings[MAX_VBS];
+ uint32_t queue_family_index;
+
+ uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
+ VkShaderStageFlags push_constant_stages;
+ struct tu_descriptor_set meta_push_descriptors;
+
+ struct tu_descriptor_state descriptors[VK_PIPELINE_BIND_POINT_RANGE_SIZE];
+
+ struct tu_cmd_buffer_upload upload;
+
+ uint32_t scratch_size_needed;
+ uint32_t compute_scratch_size_needed;
+ uint32_t esgs_ring_size_needed;
+ uint32_t gsvs_ring_size_needed;
+ bool tess_rings_needed;
+ bool sample_positions_needed;
+
+ VkResult record_result;
+
+ uint32_t gfx9_fence_offset;
+ struct radeon_winsys_bo *gfx9_fence_bo;
+ uint32_t gfx9_fence_idx;
+ uint64_t gfx9_eop_bug_va;
+
+ /**
+ * Whether a query pool has been resetted and we have to flush caches.
+ */
+ bool pending_reset_query;
+};
+
+bool
+tu_get_memory_fd(struct tu_device *device,
+ struct tu_device_memory *memory,
+ int *pFD);
+
+/*
+ * Takes x,y,z as exact numbers of invocations, instead of blocks.
+ *
+ * Limitations: Can't call normal dispatch functions without binding or
+ * rebinding
+ * the compute pipeline.
+ */
+void
+tu_unaligned_dispatch(struct tu_cmd_buffer *cmd_buffer,
+ uint32_t x,
+ uint32_t y,
+ uint32_t z);
+
+struct tu_event
+{
+ struct radeon_winsys_bo *bo;
+ uint64_t *map;
+};
+
+struct tu_shader_module;
+
+#define TU_HASH_SHADER_IS_GEOM_COPY_SHADER (1 << 0)
+#define TU_HASH_SHADER_SISCHED (1 << 1)
+#define TU_HASH_SHADER_UNSAFE_MATH (1 << 2)
+void
+tu_hash_shaders(unsigned char *hash,
+ const VkPipelineShaderStageCreateInfo **stages,
+ const struct tu_pipeline_layout *layout,
+ const struct tu_pipeline_key *key,
+ uint32_t flags);
+
+static inline gl_shader_stage
+vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
+{
+ assert(__builtin_popcount(vk_stage) == 1);
+ return ffs(vk_stage) - 1;
+}
+
+static inline VkShaderStageFlagBits
+mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
+{
+ return (1 << mesa_stage);
+}
+
+#define TU_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
+
+#define tu_foreach_stage(stage, stage_bits) \
+ for (gl_shader_stage stage, \
+ __tmp = (gl_shader_stage)((stage_bits)&TU_STAGE_MASK); \
+ stage = __builtin_ffs(__tmp) - 1, __tmp; \
+ __tmp &= ~(1 << (stage)))
+
+struct tu_shader_module
+{
+ struct nir_shader *nir;
+ unsigned char sha1[20];
+ uint32_t size;
+ char data[0];
+};
+
+struct tu_pipeline
+{
+ struct tu_device *device;
+ struct tu_dynamic_state dynamic_state;
+
+ struct tu_pipeline_layout *layout;
+
+ bool need_indirect_descriptor_sets;
+ VkShaderStageFlags active_stages;
+};
+
+struct tu_userdata_info *
+tu_lookup_user_sgpr(struct tu_pipeline *pipeline,
+ gl_shader_stage stage,
+ int idx);
+
+struct tu_shader_variant *
+tu_get_shader(struct tu_pipeline *pipeline, gl_shader_stage stage);
+
+struct tu_graphics_pipeline_create_info
+{
+ bool use_rectlist;
+ bool db_depth_clear;
+ bool db_stencil_clear;
+ bool db_depth_disable_expclear;
+ bool db_stencil_disable_expclear;
+ bool db_flush_depth_inplace;
+ bool db_flush_stencil_inplace;
+ bool db_resummarize;
+ uint32_t custom_blend_mode;
+};
+
+VkResult
+tu_graphics_pipeline_create(
+ VkDevice device,
+ VkPipelineCache cache,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo,
+ const struct tu_graphics_pipeline_create_info *extra,
+ const VkAllocationCallbacks *alloc,
+ VkPipeline *pPipeline);
+
+struct vk_format_description;
+uint32_t
+tu_translate_buffer_dataformat(const struct vk_format_description *desc,
+ int first_non_void);
+uint32_t
+tu_translate_buffer_numformat(const struct vk_format_description *desc,
+ int first_non_void);
+uint32_t
+tu_translate_colorformat(VkFormat format);
+uint32_t
+tu_translate_color_numformat(VkFormat format,
+ const struct vk_format_description *desc,
+ int first_non_void);
+uint32_t
+tu_colorformat_endian_swap(uint32_t colorformat);
+unsigned
+tu_translate_colorswap(VkFormat format, bool do_endian_swap);
+uint32_t
+tu_translate_dbformat(VkFormat format);
+uint32_t
+tu_translate_tex_dataformat(VkFormat format,
+ const struct vk_format_description *desc,
+ int first_non_void);
+uint32_t
+tu_translate_tex_numformat(VkFormat format,
+ const struct vk_format_description *desc,
+ int first_non_void);
+bool
+tu_format_pack_clear_color(VkFormat format,
+ uint32_t clear_vals[2],
+ VkClearColorValue *value);
+bool
+tu_is_colorbuffer_format_supported(VkFormat format, bool *blendable);
+bool
+tu_dcc_formats_compatible(VkFormat format1, VkFormat format2);
+
+struct tu_image
+{
+ VkImageType type;
+ /* The original VkFormat provided by the client. This may not match any
+ * of the actual surface formats.
+ */
+ VkFormat vk_format;
+ VkImageAspectFlags aspects;
+ VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
+ VkImageTiling tiling; /** VkImageCreateInfo::tiling */
+ VkImageCreateFlags flags; /** VkImageCreateInfo::flags */
+
+ VkDeviceSize size;
+ uint32_t alignment;
+
+ unsigned queue_family_mask;
+ bool exclusive;
+ bool shareable;
+
+ /* For VK_ANDROID_native_buffer, the WSI image owns the memory, */
+ VkDeviceMemory owned_memory;
+};
+
+unsigned
+tu_image_queue_family_mask(const struct tu_image *image,
+ uint32_t family,
+ uint32_t queue_family);
+
+static inline uint32_t
+tu_get_layerCount(const struct tu_image *image,
+ const VkImageSubresourceRange *range)
+{
+ abort();
+}
+
+static inline uint32_t
+tu_get_levelCount(const struct tu_image *image,
+ const VkImageSubresourceRange *range)
+{
+ abort();
+}
+
+struct tu_image_view
+{
+ struct tu_image *image; /**< VkImageViewCreateInfo::image */
+
+ VkImageViewType type;
+ VkImageAspectFlags aspect_mask;
+ VkFormat vk_format;
+ uint32_t base_layer;
+ uint32_t layer_count;
+ uint32_t base_mip;
+ uint32_t level_count;
+ VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
+
+ uint32_t descriptor[16];
+
+ /* Descriptor for use as a storage image as opposed to a sampled image.
+ * This has a few differences for cube maps (e.g. type).
+ */
+ uint32_t storage_descriptor[16];
+};
+
+struct tu_sampler
+{
+};
+
+struct tu_image_create_info
+{
+ const VkImageCreateInfo *vk_info;
+ bool scanout;
+ bool no_metadata_planes;
+};
+
+VkResult
+tu_image_create(VkDevice _device,
+ const struct tu_image_create_info *info,
+ const VkAllocationCallbacks *alloc,
+ VkImage *pImage);
+
+VkResult
+tu_image_from_gralloc(VkDevice device_h,
+ const VkImageCreateInfo *base_info,
+ const VkNativeBufferANDROID *gralloc_info,
+ const VkAllocationCallbacks *alloc,
+ VkImage *out_image_h);
+
+void
+tu_image_view_init(struct tu_image_view *view,
+ struct tu_device *device,
+ const VkImageViewCreateInfo *pCreateInfo);
+
+struct tu_buffer_view
+{
+ struct radeon_winsys_bo *bo;
+ VkFormat vk_format;
+ uint64_t range; /**< VkBufferViewCreateInfo::range */
+ uint32_t state[4];
+};
+void
+tu_buffer_view_init(struct tu_buffer_view *view,
+ struct tu_device *device,
+ const VkBufferViewCreateInfo *pCreateInfo);
+
+static inline struct VkExtent3D
+tu_sanitize_image_extent(const VkImageType imageType,
+ const struct VkExtent3D imageExtent)
+{
+ switch (imageType) {
+ case VK_IMAGE_TYPE_1D:
+ return (VkExtent3D){ imageExtent.width, 1, 1 };
+ case VK_IMAGE_TYPE_2D:
+ return (VkExtent3D){ imageExtent.width, imageExtent.height, 1 };
+ case VK_IMAGE_TYPE_3D:
+ return imageExtent;
+ default:
+ unreachable("invalid image type");
+ }
+}
+
+static inline struct VkOffset3D
+tu_sanitize_image_offset(const VkImageType imageType,
+ const struct VkOffset3D imageOffset)
+{
+ switch (imageType) {
+ case VK_IMAGE_TYPE_1D:
+ return (VkOffset3D){ imageOffset.x, 0, 0 };
+ case VK_IMAGE_TYPE_2D:
+ return (VkOffset3D){ imageOffset.x, imageOffset.y, 0 };
+ case VK_IMAGE_TYPE_3D:
+ return imageOffset;
+ default:
+ unreachable("invalid image type");
+ }
+}
+
+struct tu_attachment_info
+{
+ struct tu_image_view *attachment;
+};
+
+struct tu_framebuffer
+{
+ uint32_t width;
+ uint32_t height;
+ uint32_t layers;
+
+ uint32_t attachment_count;
+ struct tu_attachment_info attachments[0];
+};
+
+struct tu_subpass_barrier
+{
+ VkPipelineStageFlags src_stage_mask;
+ VkAccessFlags src_access_mask;
+ VkAccessFlags dst_access_mask;
+};
+
+void
+tu_subpass_barrier(struct tu_cmd_buffer *cmd_buffer,
+ const struct tu_subpass_barrier *barrier);
+
+struct tu_subpass_attachment
+{
+ uint32_t attachment;
+ VkImageLayout layout;
+};
+
+struct tu_subpass
+{
+ uint32_t input_count;
+ uint32_t color_count;
+ struct tu_subpass_attachment *input_attachments;
+ struct tu_subpass_attachment *color_attachments;
+ struct tu_subpass_attachment *resolve_attachments;
+ struct tu_subpass_attachment depth_stencil_attachment;
+
+ /** Subpass has at least one resolve attachment */
+ bool has_resolve;
+
+ struct tu_subpass_barrier start_barrier;
+
+ uint32_t view_mask;
+ VkSampleCountFlagBits max_sample_count;
+};
+
+struct tu_render_pass_attachment
+{
+ VkFormat format;
+ uint32_t samples;
+ VkAttachmentLoadOp load_op;
+ VkAttachmentLoadOp stencil_load_op;
+ VkImageLayout initial_layout;
+ VkImageLayout final_layout;
+ uint32_t view_mask;
+};
+
+struct tu_render_pass
+{
+ uint32_t attachment_count;
+ uint32_t subpass_count;
+ struct tu_subpass_attachment *subpass_attachments;
+ struct tu_render_pass_attachment *attachments;
+ struct tu_subpass_barrier end_barrier;
+ struct tu_subpass subpasses[0];
+};
+
+VkResult
+tu_device_init_meta(struct tu_device *device);
+void
+tu_device_finish_meta(struct tu_device *device);
+
+struct tu_query_pool
+{
+ struct radeon_winsys_bo *bo;
+ uint32_t stride;
+ uint32_t availability_offset;
+ uint64_t size;
+ char *ptr;
+ VkQueryType type;
+ uint32_t pipeline_stats_mask;
+};
+
+struct tu_semaphore
+{
+ /* use a winsys sem for non-exportable */
+ struct radeon_winsys_sem *sem;
+ uint32_t syncobj;
+ uint32_t temp_syncobj;
+};
+
+void
+tu_set_descriptor_set(struct tu_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point,
+ struct tu_descriptor_set *set,
+ unsigned idx);
+
+void
+tu_update_descriptor_sets(struct tu_device *device,
+ struct tu_cmd_buffer *cmd_buffer,
+ VkDescriptorSet overrideSet,
+ uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet *pDescriptorWrites,
+ uint32_t descriptorCopyCount,
+ const VkCopyDescriptorSet *pDescriptorCopies);
+
+void
+tu_update_descriptor_set_with_template(
+ struct tu_device *device,
+ struct tu_cmd_buffer *cmd_buffer,
+ struct tu_descriptor_set *set,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const void *pData);
+
+void
+tu_meta_push_descriptor_set(struct tu_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout _layout,
+ uint32_t set,
+ uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet *pDescriptorWrites);
+
+struct tu_fence
+{
+ struct radeon_winsys_fence *fence;
+ bool submitted;
+ bool signalled;
+
+ uint32_t syncobj;
+ uint32_t temp_syncobj;
+};
+
+/* tu_nir_to_llvm.c */
+struct tu_shader_variant_info;
+struct tu_nir_compiler_options;
+
+struct radeon_winsys_sem;
+
+#define TU_DEFINE_HANDLE_CASTS(__tu_type, __VkType) \
+ \
+ static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \
+ { \
+ return (struct __tu_type *)_handle; \
+ } \
+ \
+ static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \
+ { \
+ return (__VkType)_obj; \
+ }
+
+#define TU_DEFINE_NONDISP_HANDLE_CASTS(__tu_type, __VkType) \
+ \
+ static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \
+ { \
+ return (struct __tu_type *)(uintptr_t)_handle; \
+ } \
+ \
+ static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \
+ { \
+ return (__VkType)(uintptr_t)_obj; \
+ }
+
+#define TU_FROM_HANDLE(__tu_type, __name, __handle) \
+ struct __tu_type *__name = __tu_type##_from_handle(__handle)
+
+TU_DEFINE_HANDLE_CASTS(tu_cmd_buffer, VkCommandBuffer)
+TU_DEFINE_HANDLE_CASTS(tu_device, VkDevice)
+TU_DEFINE_HANDLE_CASTS(tu_instance, VkInstance)
+TU_DEFINE_HANDLE_CASTS(tu_physical_device, VkPhysicalDevice)
+TU_DEFINE_HANDLE_CASTS(tu_queue, VkQueue)
+
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_cmd_pool, VkCommandPool)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_buffer, VkBuffer)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_buffer_view, VkBufferView)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_pool, VkDescriptorPool)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set, VkDescriptorSet)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set_layout,
+ VkDescriptorSetLayout)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_update_template,
+ VkDescriptorUpdateTemplateKHR)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_device_memory, VkDeviceMemory)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_fence, VkFence)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_event, VkEvent)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_framebuffer, VkFramebuffer)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_image, VkImage)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_image_view, VkImageView);
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline_cache, VkPipelineCache)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline, VkPipeline)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline_layout, VkPipelineLayout)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_query_pool, VkQueryPool)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_render_pass, VkRenderPass)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler, VkSampler)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_shader_module, VkShaderModule)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_semaphore, VkSemaphore)
+
+#endif /* TU_PRIVATE_H */
diff --git a/src/freedreno/vulkan/tu_query.c b/src/freedreno/vulkan/tu_query.c
new file mode 100644
index 00000000000..30f47c8ca9d
--- /dev/null
+++ b/src/freedreno/vulkan/tu_query.c
@@ -0,0 +1,123 @@
+/*
+ * Copyrigh 2016 Red Hat Inc.
+ * Based on anv:
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "tu_private.h"
+#include "nir/nir_builder.h"
+
+VkResult
+tu_CreateQueryPool(VkDevice _device,
+ const VkQueryPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkQueryPool *pQueryPool)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ struct tu_query_pool *pool = vk_alloc2(&device->alloc,
+ pAllocator,
+ sizeof(*pool),
+ 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ if (!pool)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ *pQueryPool = tu_query_pool_to_handle(pool);
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyQueryPool(VkDevice _device,
+ VkQueryPool _pool,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_query_pool, pool, _pool);
+
+ if (!pool)
+ return;
+
+ vk_free2(&device->alloc, pAllocator, pool);
+}
+
+VkResult
+tu_GetQueryPoolResults(VkDevice _device,
+ VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ size_t dataSize,
+ void *pData,
+ VkDeviceSize stride,
+ VkQueryResultFlags flags)
+{
+ return VK_SUCCESS;
+}
+
+void
+tu_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize stride,
+ VkQueryResultFlags flags)
+{
+}
+
+void
+tu_CmdResetQueryPool(VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount)
+{
+}
+
+void
+tu_CmdBeginQuery(VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t query,
+ VkQueryControlFlags flags)
+{
+}
+
+void
+tu_CmdEndQuery(VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t query)
+{
+}
+
+void
+tu_CmdWriteTimestamp(VkCommandBuffer commandBuffer,
+ VkPipelineStageFlagBits pipelineStage,
+ VkQueryPool queryPool,
+ uint32_t query)
+{
+}
diff --git a/src/freedreno/vulkan/tu_util.c b/src/freedreno/vulkan/tu_util.c
new file mode 100644
index 00000000000..311c3a1b141
--- /dev/null
+++ b/src/freedreno/vulkan/tu_util.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "tu_private.h"
+#include "vk_enum_to_str.h"
+
+#include "util/u_math.h"
+
+/** Log an error message. */
+void tu_printflike(1, 2) tu_loge(const char *format, ...)
+{
+ va_list va;
+
+ va_start(va, format);
+ tu_loge_v(format, va);
+ va_end(va);
+}
+
+/** \see tu_loge() */
+void
+tu_loge_v(const char *format, va_list va)
+{
+ fprintf(stderr, "vk: error: ");
+ vfprintf(stderr, format, va);
+ fprintf(stderr, "\n");
+}
+
+/** Log an error message. */
+void tu_printflike(1, 2) tu_logi(const char *format, ...)
+{
+ va_list va;
+
+ va_start(va, format);
+ tu_logi_v(format, va);
+ va_end(va);
+}
+
+/** \see tu_logi() */
+void
+tu_logi_v(const char *format, va_list va)
+{
+ fprintf(stderr, "tu: info: ");
+ vfprintf(stderr, format, va);
+ fprintf(stderr, "\n");
+}
+
+void tu_printflike(3, 4)
+ __tu_finishme(const char *file, int line, const char *format, ...)
+{
+ va_list ap;
+ char buffer[256];
+
+ va_start(ap, format);
+ vsnprintf(buffer, sizeof(buffer), format, ap);
+ va_end(ap);
+
+ fprintf(stderr, "%s:%d: FINISHME: %s\n", file, line, buffer);
+}
+
+VkResult
+__vk_errorf(struct tu_instance *instance,
+ VkResult error,
+ const char *file,
+ int line,
+ const char *format,
+ ...)
+{
+ va_list ap;
+ char buffer[256];
+
+ const char *error_str = vk_Result_to_str(error);
+
+#ifndef DEBUG
+ return error;
+#endif
+
+ if (format) {
+ va_start(ap, format);
+ vsnprintf(buffer, sizeof(buffer), format, ap);
+ va_end(ap);
+
+ fprintf(stderr, "%s:%d: %s (%s)\n", file, line, buffer, error_str);
+ } else {
+ fprintf(stderr, "%s:%d: %s\n", file, line, error_str);
+ }
+
+ return error;
+}
diff --git a/src/freedreno/vulkan/tu_util.h b/src/freedreno/vulkan/tu_util.h
new file mode 100644
index 00000000000..b013079d518
--- /dev/null
+++ b/src/freedreno/vulkan/tu_util.h
@@ -0,0 +1,11 @@
+#ifndef TU_UTIL_H
+#define TU_UTIL_H
+
+#ifdef HAVE___BUILTIN_POPCOUNT
+#define util_bitcount(i) __builtin_popcount(i)
+#else
+extern unsigned int
+util_bitcount(unsigned int n);
+#endif
+
+#endif /* TU_UTIL_H */
diff --git a/src/freedreno/vulkan/vk_format.h b/src/freedreno/vulkan/vk_format.h
new file mode 100644
index 00000000000..bab0eda2076
--- /dev/null
+++ b/src/freedreno/vulkan/vk_format.h
@@ -0,0 +1,545 @@
+/*
+ * Copyright © 2016 Red Hat.
+ * Copyright © 2016 Bas Nieuwenhuizen
+ *
+ * Based on u_format.h which is:
+ * Copyright 2009-2010 Vmware, Inc.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VK_FORMAT_H
+#define VK_FORMAT_H
+
+#include <assert.h>
+#include <util/macros.h>
+#include <vulkan/vulkan.h>
+
+enum vk_format_layout
+{
+ /**
+ * Formats with vk_format_block::width == vk_format_block::height == 1
+ * that can be described as an ordinary data structure.
+ */
+ VK_FORMAT_LAYOUT_PLAIN = 0,
+
+ /**
+ * Formats with sub-sampled channels.
+ *
+ * This is for formats like YVYU where there is less than one sample per
+ * pixel.
+ */
+ VK_FORMAT_LAYOUT_SUBSAMPLED = 3,
+
+ /**
+ * S3 Texture Compression formats.
+ */
+ VK_FORMAT_LAYOUT_S3TC = 4,
+
+ /**
+ * Red-Green Texture Compression formats.
+ */
+ VK_FORMAT_LAYOUT_RGTC = 5,
+
+ /**
+ * Ericsson Texture Compression
+ */
+ VK_FORMAT_LAYOUT_ETC = 6,
+
+ /**
+ * BC6/7 Texture Compression
+ */
+ VK_FORMAT_LAYOUT_BPTC = 7,
+
+ /**
+ * ASTC
+ */
+ VK_FORMAT_LAYOUT_ASTC = 8,
+
+ /**
+ * Everything else that doesn't fit in any of the above layouts.
+ */
+ VK_FORMAT_LAYOUT_OTHER = 9
+};
+
+struct vk_format_block
+{
+ /** Block width in pixels */
+ unsigned width;
+
+ /** Block height in pixels */
+ unsigned height;
+
+ /** Block size in bits */
+ unsigned bits;
+};
+
+enum vk_format_type
+{
+ VK_FORMAT_TYPE_VOID = 0,
+ VK_FORMAT_TYPE_UNSIGNED = 1,
+ VK_FORMAT_TYPE_SIGNED = 2,
+ VK_FORMAT_TYPE_FIXED = 3,
+ VK_FORMAT_TYPE_FLOAT = 4
+};
+
+enum vk_format_colorspace
+{
+ VK_FORMAT_COLORSPACE_RGB = 0,
+ VK_FORMAT_COLORSPACE_SRGB = 1,
+ VK_FORMAT_COLORSPACE_YUV = 2,
+ VK_FORMAT_COLORSPACE_ZS = 3
+};
+
+struct vk_format_channel_description
+{
+ unsigned type : 5;
+ unsigned normalized : 1;
+ unsigned pure_integer : 1;
+ unsigned scaled : 1;
+ unsigned size : 8;
+ unsigned shift : 16;
+};
+
+struct vk_format_description
+{
+ VkFormat format;
+ const char *name;
+ const char *short_name;
+
+ struct vk_format_block block;
+ enum vk_format_layout layout;
+
+ unsigned nr_channels : 3;
+ unsigned is_array : 1;
+ unsigned is_bitmask : 1;
+ unsigned is_mixed : 1;
+
+ struct vk_format_channel_description channel[4];
+
+ unsigned char swizzle[4];
+
+ enum vk_format_colorspace colorspace;
+};
+
+extern const struct vk_format_description vk_format_description_table[];
+
+const struct vk_format_description *
+vk_format_description(VkFormat format);
+
+/**
+ * Return total bits needed for the pixel format per block.
+ */
+static inline unsigned
+vk_format_get_blocksizebits(VkFormat format)
+{
+ const struct vk_format_description *desc = vk_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return 0;
+ }
+
+ return desc->block.bits;
+}
+
+/**
+ * Return bytes per block (not pixel) for the given format.
+ */
+static inline unsigned
+vk_format_get_blocksize(VkFormat format)
+{
+ unsigned bits = vk_format_get_blocksizebits(format);
+ unsigned bytes = bits / 8;
+
+ assert(bits % 8 == 0);
+ assert(bytes > 0);
+ if (bytes == 0) {
+ bytes = 1;
+ }
+
+ return bytes;
+}
+
+static inline unsigned
+vk_format_get_blockwidth(VkFormat format)
+{
+ const struct vk_format_description *desc = vk_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return 1;
+ }
+
+ return desc->block.width;
+}
+
+static inline unsigned
+vk_format_get_blockheight(VkFormat format)
+{
+ const struct vk_format_description *desc = vk_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return 1;
+ }
+
+ return desc->block.height;
+}
+
+/**
+ * Return the index of the first non-void channel
+ * -1 if no non-void channels
+ */
+static inline int
+vk_format_get_first_non_void_channel(VkFormat format)
+{
+ const struct vk_format_description *desc = vk_format_description(format);
+ int i;
+
+ for (i = 0; i < 4; i++)
+ if (desc->channel[i].type != VK_FORMAT_TYPE_VOID)
+ break;
+
+ if (i == 4)
+ return -1;
+
+ return i;
+}
+
+enum vk_swizzle
+{
+ VK_SWIZZLE_X,
+ VK_SWIZZLE_Y,
+ VK_SWIZZLE_Z,
+ VK_SWIZZLE_W,
+ VK_SWIZZLE_0,
+ VK_SWIZZLE_1,
+ VK_SWIZZLE_NONE,
+ VK_SWIZZLE_MAX, /**< Number of enums counter (must be last) */
+};
+
+static inline VkImageAspectFlags
+vk_format_aspects(VkFormat format)
+{
+ switch (format) {
+ case VK_FORMAT_UNDEFINED:
+ return 0;
+
+ case VK_FORMAT_S8_UINT:
+ return VK_IMAGE_ASPECT_STENCIL_BIT;
+
+ case VK_FORMAT_D16_UNORM_S8_UINT:
+ case VK_FORMAT_D24_UNORM_S8_UINT:
+ case VK_FORMAT_D32_SFLOAT_S8_UINT:
+ return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
+
+ case VK_FORMAT_D16_UNORM:
+ case VK_FORMAT_X8_D24_UNORM_PACK32:
+ case VK_FORMAT_D32_SFLOAT:
+ return VK_IMAGE_ASPECT_DEPTH_BIT;
+
+ default:
+ return VK_IMAGE_ASPECT_COLOR_BIT;
+ }
+}
+
+static inline enum vk_swizzle
+tu_swizzle_conv(VkComponentSwizzle component,
+ const unsigned char chan[4],
+ VkComponentSwizzle vk_swiz)
+{
+ int x;
+
+ if (vk_swiz == VK_COMPONENT_SWIZZLE_IDENTITY)
+ vk_swiz = component;
+ switch (vk_swiz) {
+ case VK_COMPONENT_SWIZZLE_ZERO:
+ return VK_SWIZZLE_0;
+ case VK_COMPONENT_SWIZZLE_ONE:
+ return VK_SWIZZLE_1;
+ case VK_COMPONENT_SWIZZLE_R:
+ for (x = 0; x < 4; x++)
+ if (chan[x] == 0)
+ return x;
+ return VK_SWIZZLE_0;
+ case VK_COMPONENT_SWIZZLE_G:
+ for (x = 0; x < 4; x++)
+ if (chan[x] == 1)
+ return x;
+ return VK_SWIZZLE_0;
+ case VK_COMPONENT_SWIZZLE_B:
+ for (x = 0; x < 4; x++)
+ if (chan[x] == 2)
+ return x;
+ return VK_SWIZZLE_0;
+ case VK_COMPONENT_SWIZZLE_A:
+ for (x = 0; x < 4; x++)
+ if (chan[x] == 3)
+ return x;
+ return VK_SWIZZLE_1;
+ default:
+ unreachable("Illegal swizzle");
+ }
+}
+
+static inline void
+vk_format_compose_swizzles(const VkComponentMapping *mapping,
+ const unsigned char swz[4],
+ enum vk_swizzle dst[4])
+{
+ dst[0] = tu_swizzle_conv(VK_COMPONENT_SWIZZLE_R, swz, mapping->r);
+ dst[1] = tu_swizzle_conv(VK_COMPONENT_SWIZZLE_G, swz, mapping->g);
+ dst[2] = tu_swizzle_conv(VK_COMPONENT_SWIZZLE_B, swz, mapping->b);
+ dst[3] = tu_swizzle_conv(VK_COMPONENT_SWIZZLE_A, swz, mapping->a);
+}
+
+static inline bool
+vk_format_is_compressed(VkFormat format)
+{
+ const struct vk_format_description *desc = vk_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return false;
+ }
+
+ switch (desc->layout) {
+ case VK_FORMAT_LAYOUT_S3TC:
+ case VK_FORMAT_LAYOUT_RGTC:
+ case VK_FORMAT_LAYOUT_ETC:
+ case VK_FORMAT_LAYOUT_BPTC:
+ case VK_FORMAT_LAYOUT_ASTC:
+ /* XXX add other formats in the future */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool
+vk_format_has_depth(const struct vk_format_description *desc)
+{
+ return desc->colorspace == VK_FORMAT_COLORSPACE_ZS &&
+ desc->swizzle[0] != VK_SWIZZLE_NONE;
+}
+
+static inline bool
+vk_format_has_stencil(const struct vk_format_description *desc)
+{
+ return desc->colorspace == VK_FORMAT_COLORSPACE_ZS &&
+ desc->swizzle[1] != VK_SWIZZLE_NONE;
+}
+
+static inline bool
+vk_format_is_depth_or_stencil(VkFormat format)
+{
+ const struct vk_format_description *desc = vk_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return false;
+ }
+
+ return vk_format_has_depth(desc) || vk_format_has_stencil(desc);
+}
+
+static inline bool
+vk_format_is_depth(VkFormat format)
+{
+ const struct vk_format_description *desc = vk_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return false;
+ }
+
+ return vk_format_has_depth(desc);
+}
+
+static inline bool
+vk_format_is_stencil(VkFormat format)
+{
+ const struct vk_format_description *desc = vk_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return false;
+ }
+
+ return vk_format_has_stencil(desc);
+}
+
+static inline bool
+vk_format_is_color(VkFormat format)
+{
+ return !vk_format_is_depth_or_stencil(format);
+}
+
+static inline VkFormat
+vk_format_depth_only(VkFormat format)
+{
+ switch (format) {
+ case VK_FORMAT_D16_UNORM_S8_UINT:
+ return VK_FORMAT_D16_UNORM;
+ case VK_FORMAT_D24_UNORM_S8_UINT:
+ return VK_FORMAT_X8_D24_UNORM_PACK32;
+ case VK_FORMAT_D32_SFLOAT_S8_UINT:
+ return VK_FORMAT_D32_SFLOAT;
+ default:
+ return format;
+ }
+}
+
+static inline bool
+vk_format_is_int(VkFormat format)
+{
+ const struct vk_format_description *desc = vk_format_description(format);
+ int channel = vk_format_get_first_non_void_channel(format);
+
+ return channel >= 0 && desc->channel[channel].pure_integer;
+}
+
+static inline bool
+vk_format_is_srgb(VkFormat format)
+{
+ const struct vk_format_description *desc = vk_format_description(format);
+ return desc->colorspace == VK_FORMAT_COLORSPACE_SRGB;
+}
+
+static inline VkFormat
+vk_format_no_srgb(VkFormat format)
+{
+ switch (format) {
+ case VK_FORMAT_R8_SRGB:
+ return VK_FORMAT_R8_UNORM;
+ case VK_FORMAT_R8G8_SRGB:
+ return VK_FORMAT_R8G8_UNORM;
+ case VK_FORMAT_R8G8B8_SRGB:
+ return VK_FORMAT_R8G8B8_UNORM;
+ case VK_FORMAT_B8G8R8_SRGB:
+ return VK_FORMAT_B8G8R8_UNORM;
+ case VK_FORMAT_R8G8B8A8_SRGB:
+ return VK_FORMAT_R8G8B8A8_UNORM;
+ case VK_FORMAT_B8G8R8A8_SRGB:
+ return VK_FORMAT_B8G8R8A8_UNORM;
+ case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+ return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
+ case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+ return VK_FORMAT_BC1_RGB_UNORM_BLOCK;
+ case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+ return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
+ case VK_FORMAT_BC2_SRGB_BLOCK:
+ return VK_FORMAT_BC2_UNORM_BLOCK;
+ case VK_FORMAT_BC3_SRGB_BLOCK:
+ return VK_FORMAT_BC3_UNORM_BLOCK;
+ case VK_FORMAT_BC7_SRGB_BLOCK:
+ return VK_FORMAT_BC7_UNORM_BLOCK;
+ case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+ return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
+ case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+ return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
+ case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+ return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
+ default:
+ assert(!vk_format_is_srgb(format));
+ return format;
+ }
+}
+
+static inline VkFormat
+vk_format_stencil_only(VkFormat format)
+{
+ return VK_FORMAT_S8_UINT;
+}
+
+static inline unsigned
+vk_format_get_component_bits(VkFormat format,
+ enum vk_format_colorspace colorspace,
+ unsigned component)
+{
+ const struct vk_format_description *desc = vk_format_description(format);
+ enum vk_format_colorspace desc_colorspace;
+
+ assert(format);
+ if (!format) {
+ return 0;
+ }
+
+ assert(component < 4);
+
+ /* Treat RGB and SRGB as equivalent. */
+ if (colorspace == VK_FORMAT_COLORSPACE_SRGB) {
+ colorspace = VK_FORMAT_COLORSPACE_RGB;
+ }
+ if (desc->colorspace == VK_FORMAT_COLORSPACE_SRGB) {
+ desc_colorspace = VK_FORMAT_COLORSPACE_RGB;
+ } else {
+ desc_colorspace = desc->colorspace;
+ }
+
+ if (desc_colorspace != colorspace) {
+ return 0;
+ }
+
+ switch (desc->swizzle[component]) {
+ case VK_SWIZZLE_X:
+ return desc->channel[0].size;
+ case VK_SWIZZLE_Y:
+ return desc->channel[1].size;
+ case VK_SWIZZLE_Z:
+ return desc->channel[2].size;
+ case VK_SWIZZLE_W:
+ return desc->channel[3].size;
+ default:
+ return 0;
+ }
+}
+
+static inline VkFormat
+vk_to_non_srgb_format(VkFormat format)
+{
+ switch (format) {
+ case VK_FORMAT_R8_SRGB:
+ return VK_FORMAT_R8_UNORM;
+ case VK_FORMAT_R8G8_SRGB:
+ return VK_FORMAT_R8G8_UNORM;
+ case VK_FORMAT_R8G8B8_SRGB:
+ return VK_FORMAT_R8G8B8_UNORM;
+ case VK_FORMAT_B8G8R8_SRGB:
+ return VK_FORMAT_B8G8R8_UNORM;
+ case VK_FORMAT_R8G8B8A8_SRGB:
+ return VK_FORMAT_R8G8B8A8_UNORM;
+ case VK_FORMAT_B8G8R8A8_SRGB:
+ return VK_FORMAT_B8G8R8A8_UNORM;
+ case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+ return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
+ default:
+ return format;
+ }
+}
+
+static inline unsigned
+vk_format_get_nr_components(VkFormat format)
+{
+ const struct vk_format_description *desc = vk_format_description(format);
+ return desc->nr_channels;
+}
+
+#endif /* VK_FORMAT_H */
diff --git a/src/freedreno/vulkan/vk_format_layout.csv b/src/freedreno/vulkan/vk_format_layout.csv
new file mode 100644
index 00000000000..f9c2e6f7c35
--- /dev/null
+++ b/src/freedreno/vulkan/vk_format_layout.csv
@@ -0,0 +1,188 @@
+/* this is pretty much taken from the gallium one. */
+
+
+VK_FORMAT_UNDEFINED , plain, 1, 1, u8 , , , , x001, rgb
+VK_FORMAT_R4G4_UNORM_PACK8 , plain, 1, 1, un4 , un4 , , , xy01, rgb
+VK_FORMAT_R4G4B4A4_UNORM_PACK16 , plain, 1, 1, un4 , un4 , un4 , un4 , wzyx, rgb
+VK_FORMAT_B4G4R4A4_UNORM_PACK16 , plain, 1, 1, un4 , un4 , un4 , un4 , wxyz, rgb
+VK_FORMAT_R5G6B5_UNORM_PACK16 , plain, 1, 1, un5 , un6 , un5 , , zyx1, rgb
+VK_FORMAT_B5G6R5_UNORM_PACK16 , plain, 1, 1, un5 , un6 , un5 , , xyz1, rgb
+VK_FORMAT_R5G5B5A1_UNORM_PACK16 , plain, 1, 1, un1 , un5 , un5 , un5 , wzyx, rgb
+VK_FORMAT_B5G5R5A1_UNORM_PACK16 , plain, 1, 1, un1 , un5 , un5 , un5 , wxyz, rgb
+VK_FORMAT_A1R5G5B5_UNORM_PACK16 , plain, 1, 1, un5 , un5 , un5 , un1 , zyxw, rgb
+VK_FORMAT_R8_UNORM , plain, 1, 1, un8 , , , , x001, rgb
+VK_FORMAT_R8_SNORM , plain, 1, 1, sn8 , , , , x001, rgb
+VK_FORMAT_R8_USCALED , plain, 1, 1, us8 , , , , x001, rgb
+VK_FORMAT_R8_SSCALED , plain, 1, 1, ss8 , , , , x001, rgb
+VK_FORMAT_R8_UINT , plain, 1, 1, up8 , , , , x001, rgb
+VK_FORMAT_R8_SINT , plain, 1, 1, sp8 , , , , x001, rgb
+VK_FORMAT_R8_SRGB , plain, 1, 1, un8 , , , , x001, srgb
+VK_FORMAT_R8G8_UNORM , plain, 1, 1, un8 , un8 , , , xy01, rgb
+VK_FORMAT_R8G8_SNORM , plain, 1, 1, sn8 , sn8 , , , xy01, rgb
+VK_FORMAT_R8G8_USCALED , plain, 1, 1, us8 , us8 , , , xy01, rgb
+VK_FORMAT_R8G8_SSCALED , plain, 1, 1, ss8 , ss8 , , , xy01, rgb
+VK_FORMAT_R8G8_UINT , plain, 1, 1, up8 , up8 , , , xy01, rgb
+VK_FORMAT_R8G8_SINT , plain, 1, 1, sp8 , sp8 , , , xy01, rgb
+VK_FORMAT_R8G8_SRGB , plain, 1, 1, un8 , un8 , , , xy01, srgb
+VK_FORMAT_R8G8B8_UNORM , plain, 1, 1, un8 , un8 , un8 , , xyz1, rgb
+VK_FORMAT_R8G8B8_SNORM , plain, 1, 1, sn8 , sn8 , sn8 , , xyz1, rgb
+VK_FORMAT_R8G8B8_USCALED , plain, 1, 1, us8 , us8 , us8 , , xyz1, rgb
+VK_FORMAT_R8G8B8_SSCALED , plain, 1, 1, ss8 , ss8 , ss8 , , xyz1, rgb
+VK_FORMAT_R8G8B8_UINT , plain, 1, 1, up8 , up8 , up8 , , xyz1, rgb
+VK_FORMAT_R8G8B8_SINT , plain, 1, 1, sp8 , sp8 , sp8 , , xyz1, rgb
+VK_FORMAT_R8G8B8_SRGB , plain, 1, 1, un8 , un8 , un8 , , xyz1, srgb
+VK_FORMAT_B8G8R8_UNORM , plain, 1, 1, un8 , un8 , un8 , , zyx1, rgb
+VK_FORMAT_B8G8R8_SNORM , plain, 1, 1, sn8 , sn8 , sn8 , , zyx1, rgb
+VK_FORMAT_B8G8R8_USCALED , plain, 1, 1, us8 , us8 , us8 , , zyx1, rgb
+VK_FORMAT_B8G8R8_SSCALED , plain, 1, 1, ss8 , ss8 , ss8 , , zyx1, rgb
+VK_FORMAT_B8G8R8_UINT , plain, 1, 1, up8 , up8 , up8 , , zyx1, rgb
+VK_FORMAT_B8G8R8_SINT , plain, 1, 1, sp8 , sp8 , sp8 , , zyx1, rgb
+VK_FORMAT_B8G8R8_SRGB , plain, 1, 1, un8 , un8 , un8 , , zyx1, srgb
+VK_FORMAT_R8G8B8A8_UNORM , plain, 1, 1, un8 , un8 , un8 , un8 , xyzw, rgb
+VK_FORMAT_R8G8B8A8_SNORM , plain, 1, 1, sn8 , sn8 , sn8 , sn8 , xyzw, rgb
+VK_FORMAT_R8G8B8A8_USCALED , plain, 1, 1, us8 , us8 , us8 , us8 , xyzw, rgb
+VK_FORMAT_R8G8B8A8_SSCALED , plain, 1, 1, ss8 , ss8 , ss8 , ss8 , xyzw, rgb
+VK_FORMAT_R8G8B8A8_UINT , plain, 1, 1, up8 , up8 , up8 , up8 , xyzw, rgb
+VK_FORMAT_R8G8B8A8_SINT , plain, 1, 1, sp8 , sp8 , sp8 , sp8 , xyzw, rgb
+VK_FORMAT_R8G8B8A8_SRGB , plain, 1, 1, un8 , un8 , un8 , un8 , xyzw, srgb
+VK_FORMAT_B8G8R8A8_UNORM , plain, 1, 1, un8 , un8 , un8 , un8 , zyxw, rgb
+VK_FORMAT_B8G8R8A8_SNORM , plain, 1, 1, sn8 , sn8 , sn8 , sn8 , zyxw, rgb
+VK_FORMAT_B8G8R8A8_USCALED , plain, 1, 1, us8 , us8 , us8 , us8 , zyxw, rgb
+VK_FORMAT_B8G8R8A8_SSCALED , plain, 1, 1, ss8 , ss8 , ss8 , ss8 , zyxw, rgb
+VK_FORMAT_B8G8R8A8_UINT , plain, 1, 1, up8 , up8 , up8 , up8 , zyxw, rgb
+VK_FORMAT_B8G8R8A8_SINT , plain, 1, 1, sp8 , sp8 , sp8 , sp8 , zyxw, rgb
+VK_FORMAT_B8G8R8A8_SRGB , plain, 1, 1, un8 , un8 , un8 , un8 , zyxw, srgb
+VK_FORMAT_A8B8G8R8_UNORM_PACK32 , plain, 1, 1, un8 , un8 , un8 , un8 , xyzw, rgb
+VK_FORMAT_A8B8G8R8_SNORM_PACK32 , plain, 1, 1, sn8 , sn8 , sn8 , sn8 , xyzw, rgb
+VK_FORMAT_A8B8G8R8_USCALED_PACK32 , plain, 1, 1, us8 , us8 , us8 , us8 , xyzw, rgb
+VK_FORMAT_A8B8G8R8_SSCALED_PACK32 , plain, 1, 1, ss8 , ss8 , ss8 , ss8 , xyzw, rgb
+VK_FORMAT_A8B8G8R8_UINT_PACK32 , plain, 1, 1, up8 , up8 , up8 , up8 , xyzw, rgb
+VK_FORMAT_A8B8G8R8_SINT_PACK32 , plain, 1, 1, sp8 , sp8 , sp8 , sp8 , xyzw, rgb
+VK_FORMAT_A8B8G8R8_SRGB_PACK32 , plain, 1, 1, un8 , un8 , un8 , un8 , xyzw, srgb
+VK_FORMAT_A2R10G10B10_UNORM_PACK32 , plain, 1, 1, un10, un10, un10, un2 , zyxw, rgb
+VK_FORMAT_A2R10G10B10_SNORM_PACK32 , plain, 1, 1, sn10, sn10, sn10, sn2 , zyxw, rgb
+VK_FORMAT_A2R10G10B10_USCALED_PACK32 , plain, 1, 1, us10, us10, us10, us2 , zyxw, rgb
+VK_FORMAT_A2R10G10B10_SSCALED_PACK32 , plain, 1, 1, ss10, ss10, ss10, ss2 , zyxw, rgb
+VK_FORMAT_A2R10G10B10_UINT_PACK32 , plain, 1, 1, up10, up10, up10, up2 , zyxw, rgb
+VK_FORMAT_A2R10G10B10_SINT_PACK32 , plain, 1, 1, sp10, sp10, sp10, sp2 , zyxw, rgb
+VK_FORMAT_A2B10G10R10_UNORM_PACK32 , plain, 1, 1, un10, un10, un10, un2 , xyzw, rgb
+VK_FORMAT_A2B10G10R10_SNORM_PACK32 , plain, 1, 1, sn10, sn10, sn10, sn2 , xyzw, rgb
+VK_FORMAT_A2B10G10R10_USCALED_PACK32 , plain, 1, 1, us10, us10, us10, us2 , xyzw, rgb
+VK_FORMAT_A2B10G10R10_SSCALED_PACK32 , plain, 1, 1, ss10, ss10, ss10, ss2 , xyzw, rgb
+VK_FORMAT_A2B10G10R10_UINT_PACK32 , plain, 1, 1, up10, up10, up10, up2 , xyzw, rgb
+VK_FORMAT_A2B10G10R10_SINT_PACK32 , plain, 1, 1, sp10, sp10, sp10, sp2 , xyzw, rgb
+VK_FORMAT_R16_UNORM , plain, 1, 1, un16, , , , x001, rgb
+VK_FORMAT_R16_SNORM , plain, 1, 1, sn16, , , , x001, rgb
+VK_FORMAT_R16_USCALED , plain, 1, 1, us16, , , , x001, rgb
+VK_FORMAT_R16_SSCALED , plain, 1, 1, ss16, , , , x001, rgb
+VK_FORMAT_R16_UINT , plain, 1, 1, up16, , , , x001, rgb
+VK_FORMAT_R16_SINT , plain, 1, 1, sp16, , , , x001, rgb
+VK_FORMAT_R16_SFLOAT , plain, 1, 1, f16 , , , , x001, rgb
+VK_FORMAT_R16G16_UNORM , plain, 1, 1, un16, un16, , , xy01, rgb
+VK_FORMAT_R16G16_SNORM , plain, 1, 1, sn16, sn16, , , xy01, rgb
+VK_FORMAT_R16G16_USCALED , plain, 1, 1, us16, us16, , , xy01, rgb
+VK_FORMAT_R16G16_SSCALED , plain, 1, 1, ss16, ss16, , , xy01, rgb
+VK_FORMAT_R16G16_UINT , plain, 1, 1, up16, up16, , , xy01, rgb
+VK_FORMAT_R16G16_SINT , plain, 1, 1, sp16, sp16, , , xy01, rgb
+VK_FORMAT_R16G16_SFLOAT , plain, 1, 1, f16 , f16 , , , xy01, rgb
+VK_FORMAT_R16G16B16_UNORM , plain, 1, 1, un16, un16, un16, , xyz1, rgb
+VK_FORMAT_R16G16B16_SNORM , plain, 1, 1, sn16, sn16, sn16, , xyz1, rgb
+VK_FORMAT_R16G16B16_USCALED , plain, 1, 1, us16, us16, us16, , xyz1, rgb
+VK_FORMAT_R16G16B16_SSCALED , plain, 1, 1, ss16, ss16, ss16, , xyz1, rgb
+VK_FORMAT_R16G16B16_UINT , plain, 1, 1, up16, up16, up16, , xyz1, rgb
+VK_FORMAT_R16G16B16_SINT , plain, 1, 1, sp16, sp16, sp16, , xyz1, rgb
+VK_FORMAT_R16G16B16_SFLOAT , plain, 1, 1, f16 , f16 , f16 , , xyz1, rgb
+VK_FORMAT_R16G16B16A16_UNORM , plain, 1, 1, un16, un16, un16, un16, xyzw, rgb
+VK_FORMAT_R16G16B16A16_SNORM , plain, 1, 1, sn16, sn16, sn16, sn16, xyzw, rgb
+VK_FORMAT_R16G16B16A16_USCALED , plain, 1, 1, us16, us16, us16, us16, xyzw, rgb
+VK_FORMAT_R16G16B16A16_SSCALED , plain, 1, 1, ss16, ss16, ss16, ss16, xyzw, rgb
+VK_FORMAT_R16G16B16A16_UINT , plain, 1, 1, up16, up16, up16, up16, xyzw, rgb
+VK_FORMAT_R16G16B16A16_SINT , plain, 1, 1, sp16, sp16, sp16, sp16, xyzw, rgb
+VK_FORMAT_R16G16B16A16_SFLOAT , plain, 1, 1, f16 , f16 , f16 , f16 , xyzw, rgb
+VK_FORMAT_R32_UINT , plain, 1, 1, up32, , , , x001, rgb
+VK_FORMAT_R32_SINT , plain, 1, 1, sp32, , , , x001, rgb
+VK_FORMAT_R32_SFLOAT , plain, 1, 1, f32 , , , , x001, rgb
+VK_FORMAT_R32G32_UINT , plain, 1, 1, up32, up32, , , xy01, rgb
+VK_FORMAT_R32G32_SINT , plain, 1, 1, sp32, sp32, , , xy01, rgb
+VK_FORMAT_R32G32_SFLOAT , plain, 1, 1, f32 , f32 , , , xy01, rgb
+VK_FORMAT_R32G32B32_UINT , plain, 1, 1, up32, up32, up32, , xyz1, rgb
+VK_FORMAT_R32G32B32_SINT , plain, 1, 1, sp32, sp32, sp32, , xyz1, rgb
+VK_FORMAT_R32G32B32_SFLOAT , plain, 1, 1, f32 , f32 , f32 , , xyz1, rgb
+VK_FORMAT_R32G32B32A32_UINT , plain, 1, 1, up32, up32, up32, up32, xyzw, rgb
+VK_FORMAT_R32G32B32A32_SINT , plain, 1, 1, sp32, sp32, sp32, sp32, xyzw, rgb
+VK_FORMAT_R32G32B32A32_SFLOAT , plain, 1, 1, f32 , f32 , f32 , f32 , xyzw, rgb
+VK_FORMAT_R64_UINT , plain, 1, 1, up64, , , , x001, rgb
+VK_FORMAT_R64_SINT , plain, 1, 1, sp64, , , , x001, rgb
+VK_FORMAT_R64_SFLOAT , plain, 1, 1, f64 , , , , x001, rgb
+VK_FORMAT_R64G64_UINT , plain, 1, 1, up64, up64, , , xy01, rgb
+VK_FORMAT_R64G64_SINT , plain, 1, 1, sp64, sp64, , , xy01, rgb
+VK_FORMAT_R64G64_SFLOAT , plain, 1, 1, f64 , f64 , , , xy01, rgb
+VK_FORMAT_R64G64B64_UINT , plain, 1, 1, up64, up64, up64, , xyz1, rgb
+VK_FORMAT_R64G64B64_SINT , plain, 1, 1, sp64, sp64, sp64, , xyz1, rgb
+VK_FORMAT_R64G64B64_SFLOAT , plain, 1, 1, f64 , f64 , f64 , , xyz1, rgb
+VK_FORMAT_R64G64B64A64_UINT , plain, 1, 1, up64, up64, up64, up64, xyzw, rgb
+VK_FORMAT_R64G64B64A64_SINT , plain, 1, 1, sp64, sp64, sp64, sp64, xyzw, rgb
+VK_FORMAT_R64G64B64A64_SFLOAT , plain, 1, 1, f64 , f64 , f64 , f64 , xyzw, rgb
+VK_FORMAT_B10G11R11_UFLOAT_PACK32 , other, 1, 1, x32 , , , , xyz1, rgb
+VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 , other, 1, 1, x32 , , , , xyz1, rgb
+VK_FORMAT_D16_UNORM , plain, 1, 1, un16, , , , x___, zs
+VK_FORMAT_X8_D24_UNORM_PACK32 , plain, 1, 1, un24, x8 , , , x___, zs
+VK_FORMAT_D32_SFLOAT , plain, 1, 1, f32 , , , , x___, zs
+VK_FORMAT_S8_UINT , plain, 1, 1, up8 , , , , _x__, zs
+VK_FORMAT_D16_UNORM_S8_UINT , plain, 1, 1, un16, up8 , , , xy__, zs
+VK_FORMAT_D24_UNORM_S8_UINT , plain, 1, 1, un24, up8 , , , xy__, zs
+VK_FORMAT_D32_SFLOAT_S8_UINT , plain, 1, 1, f32 , up8 , , , xy__, zs
+VK_FORMAT_BC1_RGB_UNORM_BLOCK , s3tc, 4, 4, x64 , , , , xyz1, rgb
+VK_FORMAT_BC1_RGB_SRGB_BLOCK , s3tc, 4, 4, x64 , , , , xyz1, srgb
+VK_FORMAT_BC1_RGBA_UNORM_BLOCK , s3tc, 4, 4, x64 , , , , xyzw, rgb
+VK_FORMAT_BC1_RGBA_SRGB_BLOCK , s3tc, 4, 4, x64 , , , , xyzw, srgb
+VK_FORMAT_BC2_UNORM_BLOCK , s3tc, 4, 4, x128, , , , xyzw, rgb
+VK_FORMAT_BC2_SRGB_BLOCK , s3tc, 4, 4, x128, , , , xyzw, srgb
+VK_FORMAT_BC3_UNORM_BLOCK , s3tc, 4, 4, x128, , , , xyzw, rgb
+VK_FORMAT_BC3_SRGB_BLOCK , s3tc, 4, 4, x128, , , , xyzw, srgb
+VK_FORMAT_BC4_UNORM_BLOCK , rgtc, 4, 4, x64, , , , x001, rgb
+VK_FORMAT_BC4_SNORM_BLOCK , rgtc, 4, 4, x64, , , , x001, rgb
+VK_FORMAT_BC5_UNORM_BLOCK , rgtc, 4, 4, x128, , , , xy01, rgb
+VK_FORMAT_BC5_SNORM_BLOCK , rgtc, 4, 4, x128, , , , xy01, rgb
+VK_FORMAT_BC6H_UFLOAT_BLOCK , bptc, 4, 4, x128, , , , xyz1, rgb
+VK_FORMAT_BC6H_SFLOAT_BLOCK , bptc, 4, 4, x128, , , , xyz1, rgb
+VK_FORMAT_BC7_UNORM_BLOCK , bptc, 4, 4, x128, , , , xyzw, rgb
+VK_FORMAT_BC7_SRGB_BLOCK , bptc, 4, 4, x128, , , , xyzw, srgb
+VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK , etc, 4, 4, x64, , , , xyz1, rgb
+VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK , etc, 4, 4, x64, , , , xyz1, srgb
+VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK , etc, 4, 4, x64, , , , xyzw, rgb
+VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK , etc, 4, 4, x64, , , , xyzw, srgb
+VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK , etc, 4, 4, x128, , , , xyzw, rgb
+VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK , etc, 4, 4, x128, , , , xyzw, srgb
+VK_FORMAT_EAC_R11_UNORM_BLOCK , etc, 4, 4, x64, , , , x001, rgb
+VK_FORMAT_EAC_R11_SNORM_BLOCK , etc, 4, 4, x64, , , , x001, rgb
+VK_FORMAT_EAC_R11G11_UNORM_BLOCK , etc, 4, 4, x128, , , , xy01, rgb
+VK_FORMAT_EAC_R11G11_SNORM_BLOCK , etc, 4, 4, x128, , , , xy01, rgb
+VK_FORMAT_ASTC_4x4_UNORM_BLOCK,
+VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
+VK_FORMAT_ASTC_5x4_UNORM_BLOCK,
+VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
+VK_FORMAT_ASTC_5x5_UNORM_BLOCK,
+VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
+VK_FORMAT_ASTC_6x5_UNORM_BLOCK,
+VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
+VK_FORMAT_ASTC_6x6_UNORM_BLOCK,
+VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
+VK_FORMAT_ASTC_8x5_UNORM_BLOCK,
+VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
+VK_FORMAT_ASTC_8x6_UNORM_BLOCK,
+VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
+VK_FORMAT_ASTC_8x8_UNORM_BLOCK,
+VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
+VK_FORMAT_ASTC_10x5_UNORM_BLOCK,
+VK_FORMAT_ASTC_10x5_SRGB_BLOCK,
+VK_FORMAT_ASTC_10x6_UNORM_BLOCK,
+VK_FORMAT_ASTC_10x6_SRGB_BLOCK,
+VK_FORMAT_ASTC_10x8_UNORM_BLOCK,
+VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
+VK_FORMAT_ASTC_10x10_UNORM_BLOCK,
+VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
+VK_FORMAT_ASTC_12x10_UNORM_BLOCK,
+VK_FORMAT_ASTC_12x10_SRGB_BLOCK,
+VK_FORMAT_ASTC_12x12_UNORM_BLOCK,
+VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
diff --git a/src/freedreno/vulkan/vk_format_parse.py b/src/freedreno/vulkan/vk_format_parse.py
new file mode 100644
index 00000000000..8f3823c806f
--- /dev/null
+++ b/src/freedreno/vulkan/vk_format_parse.py
@@ -0,0 +1,388 @@
+
+'''
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+'''
+
+
+VOID, UNSIGNED, SIGNED, FIXED, FLOAT = range(5)
+
+SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_0, SWIZZLE_1, SWIZZLE_NONE, = range(7)
+
+PLAIN = 'plain'
+SCALED = 'scaled'
+
+RGB = 'rgb'
+SRGB = 'srgb'
+YUV = 'yuv'
+ZS = 'zs'
+
+
+def is_pot(x):
+ return (x & (x - 1)) == 0
+
+
+VERY_LARGE = 99999999999999999999999
+
+
+class Channel:
+ '''Describe the channel of a color channel.'''
+
+ def __init__(self, type, norm, pure, scaled, size, name = ''):
+ self.type = type
+ self.norm = norm
+ self.pure = pure
+ self.size = size
+ self.scaled = scaled
+ self.sign = type in (SIGNED, FIXED, FLOAT)
+ self.name = name
+
+ def __str__(self):
+ s = str(self.type)
+ if self.norm:
+ s += 'n'
+ if self.pure:
+ s += 'p'
+ if self.scaled:
+ s += 's'
+ s += str(self.size)
+ return s
+
+ def __eq__(self, other):
+ return (other is not None and
+ self.type == other.type and
+ self.norm == other.norm and
+ self.pure == other.pure and
+ self.size == other.size and
+ self.scaled == other.scaled)
+
+ def max(self):
+ '''Maximum representable number.'''
+ if self.type == FLOAT:
+ return VERY_LARGE
+ if self.type == FIXED:
+ return (1 << (self.size/2)) - 1
+ if self.norm:
+ return 1
+ if self.type == UNSIGNED:
+ return (1 << self.size) - 1
+ if self.type == SIGNED:
+ return (1 << (self.size - 1)) - 1
+ assert False
+
+ def min(self):
+ '''Minimum representable number.'''
+ if self.type == FLOAT:
+ return -VERY_LARGE
+ if self.type == FIXED:
+ return -(1 << (self.size/2))
+ if self.type == UNSIGNED:
+ return 0
+ if self.norm:
+ return -1
+ if self.type == SIGNED:
+ return -(1 << (self.size - 1))
+ assert False
+
+
+class Format:
+ '''Describe a pixel format.'''
+
+ def __init__(self, name, layout, block_width, block_height, le_channels, le_swizzles, be_channels, be_swizzles, colorspace):
+ self.name = name
+ self.layout = layout
+ self.block_width = block_width
+ self.block_height = block_height
+ self.le_channels = le_channels
+ self.le_swizzles = le_swizzles
+ self.be_channels = be_channels
+ self.be_swizzles = be_swizzles
+ self.name = name
+ self.colorspace = colorspace
+
+ def __str__(self):
+ return self.name
+
+ def short_name(self):
+ '''Make up a short norm for a format, suitable to be used as suffix in
+ function names.'''
+
+ name = self.name
+ if name.startswith('VK_FORMAT_'):
+ name = name[len('VK_FORMAT_'):]
+ name = name.lower()
+ return name
+
+ def block_size(self):
+ size = 0
+ for channel in self.le_channels:
+ size += channel.size
+ return size
+
+ def nr_channels(self):
+ nr_channels = 0
+ for channel in self.le_channels:
+ if channel.size:
+ nr_channels += 1
+ return nr_channels
+
+ def array_element(self):
+ if self.layout != PLAIN:
+ return None
+ ref_channel = self.le_channels[0]
+ if ref_channel.type == VOID:
+ ref_channel = self.le_channels[1]
+ for channel in self.le_channels:
+ if channel.size and (channel.size != ref_channel.size or channel.size % 8):
+ return None
+ if channel.type != VOID:
+ if channel.type != ref_channel.type:
+ return None
+ if channel.norm != ref_channel.norm:
+ return None
+ if channel.pure != ref_channel.pure:
+ return None
+ if channel.scaled != ref_channel.scaled:
+ return None
+ return ref_channel
+
+ def is_array(self):
+ return self.array_element() != None
+
+ def is_mixed(self):
+ if self.layout != PLAIN:
+ return False
+ ref_channel = self.le_channels[0]
+ if ref_channel.type == VOID:
+ ref_channel = self.le_channels[1]
+ for channel in self.le_channels[1:]:
+ if channel.type != VOID:
+ if channel.type != ref_channel.type:
+ return True
+ if channel.norm != ref_channel.norm:
+ return True
+ if channel.pure != ref_channel.pure:
+ return True
+ if channel.scaled != ref_channel.scaled:
+ return True
+ return False
+
+ def is_pot(self):
+ return is_pot(self.block_size())
+
+ def is_int(self):
+ if self.layout != PLAIN:
+ return False
+ for channel in self.le_channels:
+ if channel.type not in (VOID, UNSIGNED, SIGNED):
+ return False
+ return True
+
+ def is_float(self):
+ if self.layout != PLAIN:
+ return False
+ for channel in self.le_channels:
+ if channel.type not in (VOID, FLOAT):
+ return False
+ return True
+
+ def is_bitmask(self):
+ if self.layout != PLAIN:
+ return False
+ if self.block_size() not in (8, 16, 32):
+ return False
+ for channel in self.le_channels:
+ if channel.type not in (VOID, UNSIGNED, SIGNED):
+ return False
+ return True
+
+ def is_pure_color(self):
+ if self.layout != PLAIN or self.colorspace == ZS:
+ return False
+ pures = [channel.pure
+ for channel in self.le_channels
+ if channel.type != VOID]
+ for x in pures:
+ assert x == pures[0]
+ return pures[0]
+
+ def channel_type(self):
+ types = [channel.type
+ for channel in self.le_channels
+ if channel.type != VOID]
+ for x in types:
+ assert x == types[0]
+ return types[0]
+
+ def is_pure_signed(self):
+ return self.is_pure_color() and self.channel_type() == SIGNED
+
+ def is_pure_unsigned(self):
+ return self.is_pure_color() and self.channel_type() == UNSIGNED
+
+ def has_channel(self, id):
+ return self.le_swizzles[id] != SWIZZLE_NONE
+
+ def has_depth(self):
+ return self.colorspace == ZS and self.has_channel(0)
+
+ def has_stencil(self):
+ return self.colorspace == ZS and self.has_channel(1)
+
+ def stride(self):
+ return self.block_size()/8
+
+
+_type_parse_map = {
+ '': VOID,
+ 'x': VOID,
+ 'u': UNSIGNED,
+ 's': SIGNED,
+ 'h': FIXED,
+ 'f': FLOAT,
+}
+
+_swizzle_parse_map = {
+ 'x': SWIZZLE_X,
+ 'y': SWIZZLE_Y,
+ 'z': SWIZZLE_Z,
+ 'w': SWIZZLE_W,
+ '0': SWIZZLE_0,
+ '1': SWIZZLE_1,
+ '_': SWIZZLE_NONE,
+}
+
+def _parse_channels(fields, layout, colorspace, swizzles):
+ if layout == PLAIN:
+ names = ['']*4
+ if colorspace in (RGB, SRGB):
+ for i in range(4):
+ swizzle = swizzles[i]
+ if swizzle < 4:
+ names[swizzle] += 'rgba'[i]
+ elif colorspace == ZS:
+ for i in range(4):
+ swizzle = swizzles[i]
+ if swizzle < 4:
+ names[swizzle] += 'zs'[i]
+ else:
+ assert False
+ for i in range(4):
+ if names[i] == '':
+ names[i] = 'x'
+ else:
+ names = ['x', 'y', 'z', 'w']
+
+ channels = []
+ for i in range(0, 4):
+ field = fields[i]
+ if field:
+ type = _type_parse_map[field[0]]
+ if field[1] == 'n':
+ norm = True
+ pure = False
+ scaled = False
+ size = int(field[2:])
+ elif field[1] == 'p':
+ pure = True
+ norm = False
+ scaled = False
+ size = int(field[2:])
+ elif field[1] == 's':
+ pure = False
+ norm = False
+ scaled = True
+ size = int(field[2:])
+ else:
+ norm = False
+ pure = False
+ scaled = False
+ size = int(field[1:])
+ else:
+ type = VOID
+ norm = False
+ pure = False
+ scaled = False
+ size = 0
+ channel = Channel(type, norm, pure, scaled, size, names[i])
+ channels.append(channel)
+
+ return channels
+
+def parse(filename):
+ '''Parse the format description in CSV format in terms of the
+ Channel and Format classes above.'''
+
+ stream = open(filename)
+ formats = []
+ for line in stream:
+ try:
+ comment = line.index('#')
+ except ValueError:
+ pass
+ else:
+ line = line[:comment]
+ line = line.strip()
+ if not line:
+ continue
+
+ fields = [field.strip() for field in line.split(',')]
+ if len (fields) < 10:
+ continue
+ if len (fields) == 10:
+ fields += fields[4:9]
+ assert len (fields) == 15
+
+ name = fields[0]
+ layout = fields[1]
+ block_width, block_height = map(int, fields[2:4])
+ colorspace = fields[9]
+
+ le_swizzles = [_swizzle_parse_map[swizzle] for swizzle in fields[8]]
+ le_channels = _parse_channels(fields[4:8], layout, colorspace, le_swizzles)
+
+ be_swizzles = [_swizzle_parse_map[swizzle] for swizzle in fields[14]]
+ be_channels = _parse_channels(fields[10:14], layout, colorspace, be_swizzles)
+
+ le_shift = 0
+ for channel in le_channels:
+ channel.shift = le_shift
+ le_shift += channel.size
+
+ be_shift = 0
+ for channel in be_channels[3::-1]:
+ channel.shift = be_shift
+ be_shift += channel.size
+
+ assert le_shift == be_shift
+ for i in range(4):
+ assert (le_swizzles[i] != SWIZZLE_NONE) == (be_swizzles[i] != SWIZZLE_NONE)
+
+ format = Format(name, layout, block_width, block_height, le_channels, le_swizzles, be_channels, be_swizzles, colorspace)
+ formats.append(format)
+ return formats
+
diff --git a/src/freedreno/vulkan/vk_format_table.py b/src/freedreno/vulkan/vk_format_table.py
new file mode 100644
index 00000000000..604aac8fa75
--- /dev/null
+++ b/src/freedreno/vulkan/vk_format_table.py
@@ -0,0 +1,173 @@
+from __future__ import print_function
+
+CopyRight = '''
+/**************************************************************************
+ *
+ * Copyright 2010 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+'''
+
+
+import sys
+
+from vk_format_parse import *
+
+def layout_map(layout):
+ return 'VK_FORMAT_LAYOUT_' + str(layout).upper()
+
+
+def colorspace_map(colorspace):
+ return 'VK_FORMAT_COLORSPACE_' + str(colorspace).upper()
+
+
+colorspace_channels_map = {
+ 'rgb': ['r', 'g', 'b', 'a'],
+ 'srgb': ['sr', 'sg', 'sb', 'a'],
+ 'zs': ['z', 's'],
+ 'yuv': ['y', 'u', 'v'],
+}
+
+
+type_map = {
+ VOID: "VK_FORMAT_TYPE_VOID",
+ UNSIGNED: "VK_FORMAT_TYPE_UNSIGNED",
+ SIGNED: "VK_FORMAT_TYPE_SIGNED",
+ FIXED: "VK_FORMAT_TYPE_FIXED",
+ FLOAT: "VK_FORMAT_TYPE_FLOAT",
+}
+
+
+def bool_map(value):
+ if value:
+ return "true"
+ else:
+ return "false"
+
+
+swizzle_map = {
+ SWIZZLE_X: "VK_SWIZZLE_X",
+ SWIZZLE_Y: "VK_SWIZZLE_Y",
+ SWIZZLE_Z: "VK_SWIZZLE_Z",
+ SWIZZLE_W: "VK_SWIZZLE_W",
+ SWIZZLE_0: "VK_SWIZZLE_0",
+ SWIZZLE_1: "VK_SWIZZLE_1",
+ SWIZZLE_NONE: "VK_SWIZZLE_NONE",
+}
+
+def print_channels(format, func):
+ if format.nr_channels() <= 1:
+ func(format.le_channels, format.le_swizzles)
+ else:
+ print('#ifdef PIPE_ARCH_BIG_ENDIAN')
+ func(format.be_channels, format.be_swizzles)
+ print('#else')
+ func(format.le_channels, format.le_swizzles)
+ print('#endif')
+
+def write_format_table(formats):
+ print('/* This file is autogenerated by vk_format_table.py from vk_format_layout.csv. Do not edit directly. */')
+ print()
+ # This will print the copyright message on the top of this file
+ print(CopyRight.strip())
+ print()
+ print('#include "stdbool.h"')
+ print('#include "vk_format.h"')
+ print()
+
+ def do_channel_array(channels, swizzles):
+ print(" {")
+ for i in range(4):
+ channel = channels[i]
+ if i < 3:
+ sep = ","
+ else:
+ sep = ""
+ if channel.size:
+ print(" {%s, %s, %s, %s, %u, %u}%s\t/* %s = %s */" % (type_map[channel.type], bool_map(channel.norm), bool_map(channel.pure), bool_map(channel.scaled), channel.size, channel.shift, sep, "xyzw"[i], channel.name))
+ else:
+ print(" {0, 0, 0, 0, 0}%s" % (sep,))
+ print(" },")
+
+ def do_swizzle_array(channels, swizzles):
+ print(" {")
+ for i in range(4):
+ swizzle = swizzles[i]
+ if i < 3:
+ sep = ","
+ else:
+ sep = ""
+ try:
+ comment = colorspace_channels_map[format.colorspace][i]
+ except (KeyError, IndexError):
+ comment = 'ignored'
+ print(" %s%s\t/* %s */" % (swizzle_map[swizzle], sep, comment))
+ print(" },")
+
+ for format in formats:
+ print('static const struct vk_format_description')
+ print('vk_format_%s_description = {' % (format.short_name(),))
+ print(" %s," % (format.name,))
+ print(" \"%s\"," % (format.name,))
+ print(" \"%s\"," % (format.short_name(),))
+ print(" {%u, %u, %u},\t/* block */" % (format.block_width, format.block_height, format.block_size()))
+ print(" %s," % (layout_map(format.layout),))
+ print(" %u,\t/* nr_channels */" % (format.nr_channels(),))
+ print(" %s,\t/* is_array */" % (bool_map(format.is_array()),))
+ print(" %s,\t/* is_bitmask */" % (bool_map(format.is_bitmask()),))
+ print(" %s,\t/* is_mixed */" % (bool_map(format.is_mixed()),))
+ print_channels(format, do_channel_array)
+ print_channels(format, do_swizzle_array)
+ print(" %s," % (colorspace_map(format.colorspace),))
+ print("};")
+ print()
+
+ print("const struct vk_format_description *")
+ print("vk_format_description(VkFormat format)")
+ print("{")
+ print(" if (format > VK_FORMAT_END_RANGE) {")
+ print(" return NULL;")
+ print(" }")
+ print()
+ print(" switch (format) {")
+ for format in formats:
+ print(" case %s:" % format.name)
+ print(" return &vk_format_%s_description;" % (format.short_name(),))
+ print(" default:")
+ print(" return NULL;")
+ print(" }")
+ print("}")
+ print()
+
+
+def main():
+
+ formats = []
+ for arg in sys.argv[1:]:
+ formats.extend(parse(arg))
+ write_format_table(formats)
+
+
+if __name__ == '__main__':
+ main()