summaryrefslogtreecommitdiffstats
path: root/src/compiler/spirv
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2016-04-14 10:28:45 -0700
committerJason Ekstrand <[email protected]>2016-04-14 10:28:47 -0700
commitc34be07230ef98d5021f0bdc88c3b0bc804ee2dd (patch)
tree19e1e31d40852890f1bfc10f64834e7544fe08e0 /src/compiler/spirv
parentbfa3a38280d27fe373cb78d666e926265ef80854 (diff)
spirv: Move to compiler/
While it does rely on NIR, it's not really part of the NIR core. At the moment, it still builds as part of libnir but that can be changed later if desired.
Diffstat (limited to 'src/compiler/spirv')
-rw-r--r--src/compiler/spirv/GLSL.std.450.h127
-rw-r--r--src/compiler/spirv/nir_spirv.h54
-rw-r--r--src/compiler/spirv/spirv.h870
-rw-r--r--src/compiler/spirv/spirv_to_nir.c2710
-rw-r--r--src/compiler/spirv/vtn_alu.c464
-rw-r--r--src/compiler/spirv/vtn_cfg.c778
-rw-r--r--src/compiler/spirv/vtn_glsl450.c666
-rw-r--r--src/compiler/spirv/vtn_private.h484
-rw-r--r--src/compiler/spirv/vtn_variables.c1415
9 files changed, 7568 insertions, 0 deletions
diff --git a/src/compiler/spirv/GLSL.std.450.h b/src/compiler/spirv/GLSL.std.450.h
new file mode 100644
index 00000000000..d1c9b5c1d44
--- /dev/null
+++ b/src/compiler/spirv/GLSL.std.450.h
@@ -0,0 +1,127 @@
+/*
+** Copyright (c) 2014-2015 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLstd450_H
+#define GLSLstd450_H
+
+const int GLSLstd450Version = 99;
+const int GLSLstd450Revision = 3;
+
+enum GLSLstd450 {
+ GLSLstd450Bad = 0, // Don't use
+
+ GLSLstd450Round = 1,
+ GLSLstd450RoundEven = 2,
+ GLSLstd450Trunc = 3,
+ GLSLstd450FAbs = 4,
+ GLSLstd450SAbs = 5,
+ GLSLstd450FSign = 6,
+ GLSLstd450SSign = 7,
+ GLSLstd450Floor = 8,
+ GLSLstd450Ceil = 9,
+ GLSLstd450Fract = 10,
+
+ GLSLstd450Radians = 11,
+ GLSLstd450Degrees = 12,
+ GLSLstd450Sin = 13,
+ GLSLstd450Cos = 14,
+ GLSLstd450Tan = 15,
+ GLSLstd450Asin = 16,
+ GLSLstd450Acos = 17,
+ GLSLstd450Atan = 18,
+ GLSLstd450Sinh = 19,
+ GLSLstd450Cosh = 20,
+ GLSLstd450Tanh = 21,
+ GLSLstd450Asinh = 22,
+ GLSLstd450Acosh = 23,
+ GLSLstd450Atanh = 24,
+ GLSLstd450Atan2 = 25,
+
+ GLSLstd450Pow = 26,
+ GLSLstd450Exp = 27,
+ GLSLstd450Log = 28,
+ GLSLstd450Exp2 = 29,
+ GLSLstd450Log2 = 30,
+ GLSLstd450Sqrt = 31,
+ GLSLstd450InverseSqrt = 32,
+
+ GLSLstd450Determinant = 33,
+ GLSLstd450MatrixInverse = 34,
+
+ GLSLstd450Modf = 35, // second operand needs an OpVariable to write to
+ GLSLstd450ModfStruct = 36, // no OpVariable operand
+ GLSLstd450FMin = 37,
+ GLSLstd450UMin = 38,
+ GLSLstd450SMin = 39,
+ GLSLstd450FMax = 40,
+ GLSLstd450UMax = 41,
+ GLSLstd450SMax = 42,
+ GLSLstd450FClamp = 43,
+ GLSLstd450UClamp = 44,
+ GLSLstd450SClamp = 45,
+ GLSLstd450FMix = 46,
+ GLSLstd450IMix = 47,
+ GLSLstd450Step = 48,
+ GLSLstd450SmoothStep = 49,
+
+ GLSLstd450Fma = 50,
+ GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to
+ GLSLstd450FrexpStruct = 52, // no OpVariable operand
+ GLSLstd450Ldexp = 53,
+
+ GLSLstd450PackSnorm4x8 = 54,
+ GLSLstd450PackUnorm4x8 = 55,
+ GLSLstd450PackSnorm2x16 = 56,
+ GLSLstd450PackUnorm2x16 = 57,
+ GLSLstd450PackHalf2x16 = 58,
+ GLSLstd450PackDouble2x32 = 59,
+ GLSLstd450UnpackSnorm2x16 = 60,
+ GLSLstd450UnpackUnorm2x16 = 61,
+ GLSLstd450UnpackHalf2x16 = 62,
+ GLSLstd450UnpackSnorm4x8 = 63,
+ GLSLstd450UnpackUnorm4x8 = 64,
+ GLSLstd450UnpackDouble2x32 = 65,
+
+ GLSLstd450Length = 66,
+ GLSLstd450Distance = 67,
+ GLSLstd450Cross = 68,
+ GLSLstd450Normalize = 69,
+ GLSLstd450FaceForward = 70,
+ GLSLstd450Reflect = 71,
+ GLSLstd450Refract = 72,
+
+ GLSLstd450FindILsb = 73,
+ GLSLstd450FindSMsb = 74,
+ GLSLstd450FindUMsb = 75,
+
+ GLSLstd450InterpolateAtCentroid = 76,
+ GLSLstd450InterpolateAtSample = 77,
+ GLSLstd450InterpolateAtOffset = 78,
+
+ GLSLstd450Count
+};
+
+#endif // #ifndef GLSLstd450_H
diff --git a/src/compiler/spirv/nir_spirv.h b/src/compiler/spirv/nir_spirv.h
new file mode 100644
index 00000000000..500f2cb94df
--- /dev/null
+++ b/src/compiler/spirv/nir_spirv.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jason Ekstrand ([email protected])
+ *
+ */
+
+#pragma once
+
+#ifndef _NIR_SPIRV_H_
+#define _NIR_SPIRV_H_
+
+#include "nir/nir.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct nir_spirv_specialization {
+ uint32_t id;
+ uint32_t data;
+};
+
+nir_function *spirv_to_nir(const uint32_t *words, size_t word_count,
+ struct nir_spirv_specialization *specializations,
+ unsigned num_specializations,
+ gl_shader_stage stage, const char *entry_point_name,
+ const nir_shader_compiler_options *options);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _NIR_SPIRV_H_ */
diff --git a/src/compiler/spirv/spirv.h b/src/compiler/spirv/spirv.h
new file mode 100644
index 00000000000..63bcb2f88dd
--- /dev/null
+++ b/src/compiler/spirv/spirv.h
@@ -0,0 +1,870 @@
+/*
+** Copyright (c) 2014-2015 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+/*
+** This header is automatically generated by the same tool that creates
+** the Binary Section of the SPIR-V specification.
+*/
+
+/*
+** Enumeration tokens for SPIR-V, in various styles:
+** C, C++, C++11, JSON, Lua, Python
+**
+** - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
+** - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
+** - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
+** - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
+** - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
+**
+** Some tokens act like mask values, which can be OR'd together,
+** while others are mutually exclusive. The mask-like ones have
+** "Mask" in their name, and a parallel enum that has the shift
+** amount (1 << x) for each corresponding enumerant.
+*/
+
+#ifndef spirv_H
+#define spirv_H
+
+typedef unsigned int SpvId;
+
+#define SPV_VERSION 0x10000
+#define SPV_REVISION 2
+
+static const unsigned int SpvMagicNumber = 0x07230203;
+static const unsigned int SpvVersion = 0x00010000;
+static const unsigned int SpvRevision = 2;
+static const unsigned int SpvOpCodeMask = 0xffff;
+static const unsigned int SpvWordCountShift = 16;
+
+typedef enum SpvSourceLanguage_ {
+ SpvSourceLanguageUnknown = 0,
+ SpvSourceLanguageESSL = 1,
+ SpvSourceLanguageGLSL = 2,
+ SpvSourceLanguageOpenCL_C = 3,
+ SpvSourceLanguageOpenCL_CPP = 4,
+} SpvSourceLanguage;
+
+typedef enum SpvExecutionModel_ {
+ SpvExecutionModelVertex = 0,
+ SpvExecutionModelTessellationControl = 1,
+ SpvExecutionModelTessellationEvaluation = 2,
+ SpvExecutionModelGeometry = 3,
+ SpvExecutionModelFragment = 4,
+ SpvExecutionModelGLCompute = 5,
+ SpvExecutionModelKernel = 6,
+} SpvExecutionModel;
+
+typedef enum SpvAddressingModel_ {
+ SpvAddressingModelLogical = 0,
+ SpvAddressingModelPhysical32 = 1,
+ SpvAddressingModelPhysical64 = 2,
+} SpvAddressingModel;
+
+typedef enum SpvMemoryModel_ {
+ SpvMemoryModelSimple = 0,
+ SpvMemoryModelGLSL450 = 1,
+ SpvMemoryModelOpenCL = 2,
+} SpvMemoryModel;
+
+typedef enum SpvExecutionMode_ {
+ SpvExecutionModeInvocations = 0,
+ SpvExecutionModeSpacingEqual = 1,
+ SpvExecutionModeSpacingFractionalEven = 2,
+ SpvExecutionModeSpacingFractionalOdd = 3,
+ SpvExecutionModeVertexOrderCw = 4,
+ SpvExecutionModeVertexOrderCcw = 5,
+ SpvExecutionModePixelCenterInteger = 6,
+ SpvExecutionModeOriginUpperLeft = 7,
+ SpvExecutionModeOriginLowerLeft = 8,
+ SpvExecutionModeEarlyFragmentTests = 9,
+ SpvExecutionModePointMode = 10,
+ SpvExecutionModeXfb = 11,
+ SpvExecutionModeDepthReplacing = 12,
+ SpvExecutionModeDepthGreater = 14,
+ SpvExecutionModeDepthLess = 15,
+ SpvExecutionModeDepthUnchanged = 16,
+ SpvExecutionModeLocalSize = 17,
+ SpvExecutionModeLocalSizeHint = 18,
+ SpvExecutionModeInputPoints = 19,
+ SpvExecutionModeInputLines = 20,
+ SpvExecutionModeInputLinesAdjacency = 21,
+ SpvExecutionModeTriangles = 22,
+ SpvExecutionModeInputTrianglesAdjacency = 23,
+ SpvExecutionModeQuads = 24,
+ SpvExecutionModeIsolines = 25,
+ SpvExecutionModeOutputVertices = 26,
+ SpvExecutionModeOutputPoints = 27,
+ SpvExecutionModeOutputLineStrip = 28,
+ SpvExecutionModeOutputTriangleStrip = 29,
+ SpvExecutionModeVecTypeHint = 30,
+ SpvExecutionModeContractionOff = 31,
+} SpvExecutionMode;
+
+typedef enum SpvStorageClass_ {
+ SpvStorageClassUniformConstant = 0,
+ SpvStorageClassInput = 1,
+ SpvStorageClassUniform = 2,
+ SpvStorageClassOutput = 3,
+ SpvStorageClassWorkgroup = 4,
+ SpvStorageClassCrossWorkgroup = 5,
+ SpvStorageClassPrivate = 6,
+ SpvStorageClassFunction = 7,
+ SpvStorageClassGeneric = 8,
+ SpvStorageClassPushConstant = 9,
+ SpvStorageClassAtomicCounter = 10,
+ SpvStorageClassImage = 11,
+} SpvStorageClass;
+
+typedef enum SpvDim_ {
+ SpvDim1D = 0,
+ SpvDim2D = 1,
+ SpvDim3D = 2,
+ SpvDimCube = 3,
+ SpvDimRect = 4,
+ SpvDimBuffer = 5,
+ SpvDimSubpassData = 6,
+} SpvDim;
+
+typedef enum SpvSamplerAddressingMode_ {
+ SpvSamplerAddressingModeNone = 0,
+ SpvSamplerAddressingModeClampToEdge = 1,
+ SpvSamplerAddressingModeClamp = 2,
+ SpvSamplerAddressingModeRepeat = 3,
+ SpvSamplerAddressingModeRepeatMirrored = 4,
+} SpvSamplerAddressingMode;
+
+typedef enum SpvSamplerFilterMode_ {
+ SpvSamplerFilterModeNearest = 0,
+ SpvSamplerFilterModeLinear = 1,
+} SpvSamplerFilterMode;
+
+typedef enum SpvImageFormat_ {
+ SpvImageFormatUnknown = 0,
+ SpvImageFormatRgba32f = 1,
+ SpvImageFormatRgba16f = 2,
+ SpvImageFormatR32f = 3,
+ SpvImageFormatRgba8 = 4,
+ SpvImageFormatRgba8Snorm = 5,
+ SpvImageFormatRg32f = 6,
+ SpvImageFormatRg16f = 7,
+ SpvImageFormatR11fG11fB10f = 8,
+ SpvImageFormatR16f = 9,
+ SpvImageFormatRgba16 = 10,
+ SpvImageFormatRgb10A2 = 11,
+ SpvImageFormatRg16 = 12,
+ SpvImageFormatRg8 = 13,
+ SpvImageFormatR16 = 14,
+ SpvImageFormatR8 = 15,
+ SpvImageFormatRgba16Snorm = 16,
+ SpvImageFormatRg16Snorm = 17,
+ SpvImageFormatRg8Snorm = 18,
+ SpvImageFormatR16Snorm = 19,
+ SpvImageFormatR8Snorm = 20,
+ SpvImageFormatRgba32i = 21,
+ SpvImageFormatRgba16i = 22,
+ SpvImageFormatRgba8i = 23,
+ SpvImageFormatR32i = 24,
+ SpvImageFormatRg32i = 25,
+ SpvImageFormatRg16i = 26,
+ SpvImageFormatRg8i = 27,
+ SpvImageFormatR16i = 28,
+ SpvImageFormatR8i = 29,
+ SpvImageFormatRgba32ui = 30,
+ SpvImageFormatRgba16ui = 31,
+ SpvImageFormatRgba8ui = 32,
+ SpvImageFormatR32ui = 33,
+ SpvImageFormatRgb10a2ui = 34,
+ SpvImageFormatRg32ui = 35,
+ SpvImageFormatRg16ui = 36,
+ SpvImageFormatRg8ui = 37,
+ SpvImageFormatR16ui = 38,
+ SpvImageFormatR8ui = 39,
+} SpvImageFormat;
+
+typedef enum SpvImageChannelOrder_ {
+ SpvImageChannelOrderR = 0,
+ SpvImageChannelOrderA = 1,
+ SpvImageChannelOrderRG = 2,
+ SpvImageChannelOrderRA = 3,
+ SpvImageChannelOrderRGB = 4,
+ SpvImageChannelOrderRGBA = 5,
+ SpvImageChannelOrderBGRA = 6,
+ SpvImageChannelOrderARGB = 7,
+ SpvImageChannelOrderIntensity = 8,
+ SpvImageChannelOrderLuminance = 9,
+ SpvImageChannelOrderRx = 10,
+ SpvImageChannelOrderRGx = 11,
+ SpvImageChannelOrderRGBx = 12,
+ SpvImageChannelOrderDepth = 13,
+ SpvImageChannelOrderDepthStencil = 14,
+ SpvImageChannelOrdersRGB = 15,
+ SpvImageChannelOrdersRGBx = 16,
+ SpvImageChannelOrdersRGBA = 17,
+ SpvImageChannelOrdersBGRA = 18,
+} SpvImageChannelOrder;
+
+typedef enum SpvImageChannelDataType_ {
+ SpvImageChannelDataTypeSnormInt8 = 0,
+ SpvImageChannelDataTypeSnormInt16 = 1,
+ SpvImageChannelDataTypeUnormInt8 = 2,
+ SpvImageChannelDataTypeUnormInt16 = 3,
+ SpvImageChannelDataTypeUnormShort565 = 4,
+ SpvImageChannelDataTypeUnormShort555 = 5,
+ SpvImageChannelDataTypeUnormInt101010 = 6,
+ SpvImageChannelDataTypeSignedInt8 = 7,
+ SpvImageChannelDataTypeSignedInt16 = 8,
+ SpvImageChannelDataTypeSignedInt32 = 9,
+ SpvImageChannelDataTypeUnsignedInt8 = 10,
+ SpvImageChannelDataTypeUnsignedInt16 = 11,
+ SpvImageChannelDataTypeUnsignedInt32 = 12,
+ SpvImageChannelDataTypeHalfFloat = 13,
+ SpvImageChannelDataTypeFloat = 14,
+ SpvImageChannelDataTypeUnormInt24 = 15,
+ SpvImageChannelDataTypeUnormInt101010_2 = 16,
+} SpvImageChannelDataType;
+
+typedef enum SpvImageOperandsShift_ {
+ SpvImageOperandsBiasShift = 0,
+ SpvImageOperandsLodShift = 1,
+ SpvImageOperandsGradShift = 2,
+ SpvImageOperandsConstOffsetShift = 3,
+ SpvImageOperandsOffsetShift = 4,
+ SpvImageOperandsConstOffsetsShift = 5,
+ SpvImageOperandsSampleShift = 6,
+ SpvImageOperandsMinLodShift = 7,
+} SpvImageOperandsShift;
+
+typedef enum SpvImageOperandsMask_ {
+ SpvImageOperandsMaskNone = 0,
+ SpvImageOperandsBiasMask = 0x00000001,
+ SpvImageOperandsLodMask = 0x00000002,
+ SpvImageOperandsGradMask = 0x00000004,
+ SpvImageOperandsConstOffsetMask = 0x00000008,
+ SpvImageOperandsOffsetMask = 0x00000010,
+ SpvImageOperandsConstOffsetsMask = 0x00000020,
+ SpvImageOperandsSampleMask = 0x00000040,
+ SpvImageOperandsMinLodMask = 0x00000080,
+} SpvImageOperandsMask;
+
+typedef enum SpvFPFastMathModeShift_ {
+ SpvFPFastMathModeNotNaNShift = 0,
+ SpvFPFastMathModeNotInfShift = 1,
+ SpvFPFastMathModeNSZShift = 2,
+ SpvFPFastMathModeAllowRecipShift = 3,
+ SpvFPFastMathModeFastShift = 4,
+} SpvFPFastMathModeShift;
+
+typedef enum SpvFPFastMathModeMask_ {
+ SpvFPFastMathModeMaskNone = 0,
+ SpvFPFastMathModeNotNaNMask = 0x00000001,
+ SpvFPFastMathModeNotInfMask = 0x00000002,
+ SpvFPFastMathModeNSZMask = 0x00000004,
+ SpvFPFastMathModeAllowRecipMask = 0x00000008,
+ SpvFPFastMathModeFastMask = 0x00000010,
+} SpvFPFastMathModeMask;
+
+typedef enum SpvFPRoundingMode_ {
+ SpvFPRoundingModeRTE = 0,
+ SpvFPRoundingModeRTZ = 1,
+ SpvFPRoundingModeRTP = 2,
+ SpvFPRoundingModeRTN = 3,
+} SpvFPRoundingMode;
+
+typedef enum SpvLinkageType_ {
+ SpvLinkageTypeExport = 0,
+ SpvLinkageTypeImport = 1,
+} SpvLinkageType;
+
+typedef enum SpvAccessQualifier_ {
+ SpvAccessQualifierReadOnly = 0,
+ SpvAccessQualifierWriteOnly = 1,
+ SpvAccessQualifierReadWrite = 2,
+} SpvAccessQualifier;
+
+typedef enum SpvFunctionParameterAttribute_ {
+ SpvFunctionParameterAttributeZext = 0,
+ SpvFunctionParameterAttributeSext = 1,
+ SpvFunctionParameterAttributeByVal = 2,
+ SpvFunctionParameterAttributeSret = 3,
+ SpvFunctionParameterAttributeNoAlias = 4,
+ SpvFunctionParameterAttributeNoCapture = 5,
+ SpvFunctionParameterAttributeNoWrite = 6,
+ SpvFunctionParameterAttributeNoReadWrite = 7,
+} SpvFunctionParameterAttribute;
+
+typedef enum SpvDecoration_ {
+ SpvDecorationRelaxedPrecision = 0,
+ SpvDecorationSpecId = 1,
+ SpvDecorationBlock = 2,
+ SpvDecorationBufferBlock = 3,
+ SpvDecorationRowMajor = 4,
+ SpvDecorationColMajor = 5,
+ SpvDecorationArrayStride = 6,
+ SpvDecorationMatrixStride = 7,
+ SpvDecorationGLSLShared = 8,
+ SpvDecorationGLSLPacked = 9,
+ SpvDecorationCPacked = 10,
+ SpvDecorationBuiltIn = 11,
+ SpvDecorationNoPerspective = 13,
+ SpvDecorationFlat = 14,
+ SpvDecorationPatch = 15,
+ SpvDecorationCentroid = 16,
+ SpvDecorationSample = 17,
+ SpvDecorationInvariant = 18,
+ SpvDecorationRestrict = 19,
+ SpvDecorationAliased = 20,
+ SpvDecorationVolatile = 21,
+ SpvDecorationConstant = 22,
+ SpvDecorationCoherent = 23,
+ SpvDecorationNonWritable = 24,
+ SpvDecorationNonReadable = 25,
+ SpvDecorationUniform = 26,
+ SpvDecorationSaturatedConversion = 28,
+ SpvDecorationStream = 29,
+ SpvDecorationLocation = 30,
+ SpvDecorationComponent = 31,
+ SpvDecorationIndex = 32,
+ SpvDecorationBinding = 33,
+ SpvDecorationDescriptorSet = 34,
+ SpvDecorationOffset = 35,
+ SpvDecorationXfbBuffer = 36,
+ SpvDecorationXfbStride = 37,
+ SpvDecorationFuncParamAttr = 38,
+ SpvDecorationFPRoundingMode = 39,
+ SpvDecorationFPFastMathMode = 40,
+ SpvDecorationLinkageAttributes = 41,
+ SpvDecorationNoContraction = 42,
+ SpvDecorationInputAttachmentIndex = 43,
+ SpvDecorationAlignment = 44,
+} SpvDecoration;
+
+typedef enum SpvBuiltIn_ {
+ SpvBuiltInPosition = 0,
+ SpvBuiltInPointSize = 1,
+ SpvBuiltInClipDistance = 3,
+ SpvBuiltInCullDistance = 4,
+ SpvBuiltInVertexId = 5,
+ SpvBuiltInInstanceId = 6,
+ SpvBuiltInPrimitiveId = 7,
+ SpvBuiltInInvocationId = 8,
+ SpvBuiltInLayer = 9,
+ SpvBuiltInViewportIndex = 10,
+ SpvBuiltInTessLevelOuter = 11,
+ SpvBuiltInTessLevelInner = 12,
+ SpvBuiltInTessCoord = 13,
+ SpvBuiltInPatchVertices = 14,
+ SpvBuiltInFragCoord = 15,
+ SpvBuiltInPointCoord = 16,
+ SpvBuiltInFrontFacing = 17,
+ SpvBuiltInSampleId = 18,
+ SpvBuiltInSamplePosition = 19,
+ SpvBuiltInSampleMask = 20,
+ SpvBuiltInFragDepth = 22,
+ SpvBuiltInHelperInvocation = 23,
+ SpvBuiltInNumWorkgroups = 24,
+ SpvBuiltInWorkgroupSize = 25,
+ SpvBuiltInWorkgroupId = 26,
+ SpvBuiltInLocalInvocationId = 27,
+ SpvBuiltInGlobalInvocationId = 28,
+ SpvBuiltInLocalInvocationIndex = 29,
+ SpvBuiltInWorkDim = 30,
+ SpvBuiltInGlobalSize = 31,
+ SpvBuiltInEnqueuedWorkgroupSize = 32,
+ SpvBuiltInGlobalOffset = 33,
+ SpvBuiltInGlobalLinearId = 34,
+ SpvBuiltInSubgroupSize = 36,
+ SpvBuiltInSubgroupMaxSize = 37,
+ SpvBuiltInNumSubgroups = 38,
+ SpvBuiltInNumEnqueuedSubgroups = 39,
+ SpvBuiltInSubgroupId = 40,
+ SpvBuiltInSubgroupLocalInvocationId = 41,
+ SpvBuiltInVertexIndex = 42,
+ SpvBuiltInInstanceIndex = 43,
+} SpvBuiltIn;
+
+typedef enum SpvSelectionControlShift_ {
+ SpvSelectionControlFlattenShift = 0,
+ SpvSelectionControlDontFlattenShift = 1,
+} SpvSelectionControlShift;
+
+typedef enum SpvSelectionControlMask_ {
+ SpvSelectionControlMaskNone = 0,
+ SpvSelectionControlFlattenMask = 0x00000001,
+ SpvSelectionControlDontFlattenMask = 0x00000002,
+} SpvSelectionControlMask;
+
+typedef enum SpvLoopControlShift_ {
+ SpvLoopControlUnrollShift = 0,
+ SpvLoopControlDontUnrollShift = 1,
+} SpvLoopControlShift;
+
+typedef enum SpvLoopControlMask_ {
+ SpvLoopControlMaskNone = 0,
+ SpvLoopControlUnrollMask = 0x00000001,
+ SpvLoopControlDontUnrollMask = 0x00000002,
+} SpvLoopControlMask;
+
+typedef enum SpvFunctionControlShift_ {
+ SpvFunctionControlInlineShift = 0,
+ SpvFunctionControlDontInlineShift = 1,
+ SpvFunctionControlPureShift = 2,
+ SpvFunctionControlConstShift = 3,
+} SpvFunctionControlShift;
+
+typedef enum SpvFunctionControlMask_ {
+ SpvFunctionControlMaskNone = 0,
+ SpvFunctionControlInlineMask = 0x00000001,
+ SpvFunctionControlDontInlineMask = 0x00000002,
+ SpvFunctionControlPureMask = 0x00000004,
+ SpvFunctionControlConstMask = 0x00000008,
+} SpvFunctionControlMask;
+
+typedef enum SpvMemorySemanticsShift_ {
+ SpvMemorySemanticsAcquireShift = 1,
+ SpvMemorySemanticsReleaseShift = 2,
+ SpvMemorySemanticsAcquireReleaseShift = 3,
+ SpvMemorySemanticsSequentiallyConsistentShift = 4,
+ SpvMemorySemanticsUniformMemoryShift = 6,
+ SpvMemorySemanticsSubgroupMemoryShift = 7,
+ SpvMemorySemanticsWorkgroupMemoryShift = 8,
+ SpvMemorySemanticsCrossWorkgroupMemoryShift = 9,
+ SpvMemorySemanticsAtomicCounterMemoryShift = 10,
+ SpvMemorySemanticsImageMemoryShift = 11,
+} SpvMemorySemanticsShift;
+
+typedef enum SpvMemorySemanticsMask_ {
+ SpvMemorySemanticsMaskNone = 0,
+ SpvMemorySemanticsAcquireMask = 0x00000002,
+ SpvMemorySemanticsReleaseMask = 0x00000004,
+ SpvMemorySemanticsAcquireReleaseMask = 0x00000008,
+ SpvMemorySemanticsSequentiallyConsistentMask = 0x00000010,
+ SpvMemorySemanticsUniformMemoryMask = 0x00000040,
+ SpvMemorySemanticsSubgroupMemoryMask = 0x00000080,
+ SpvMemorySemanticsWorkgroupMemoryMask = 0x00000100,
+ SpvMemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,
+ SpvMemorySemanticsAtomicCounterMemoryMask = 0x00000400,
+ SpvMemorySemanticsImageMemoryMask = 0x00000800,
+} SpvMemorySemanticsMask;
+
+typedef enum SpvMemoryAccessShift_ {
+ SpvMemoryAccessVolatileShift = 0,
+ SpvMemoryAccessAlignedShift = 1,
+ SpvMemoryAccessNontemporalShift = 2,
+} SpvMemoryAccessShift;
+
+typedef enum SpvMemoryAccessMask_ {
+ SpvMemoryAccessMaskNone = 0,
+ SpvMemoryAccessVolatileMask = 0x00000001,
+ SpvMemoryAccessAlignedMask = 0x00000002,
+ SpvMemoryAccessNontemporalMask = 0x00000004,
+} SpvMemoryAccessMask;
+
+typedef enum SpvScope_ {
+ SpvScopeCrossDevice = 0,
+ SpvScopeDevice = 1,
+ SpvScopeWorkgroup = 2,
+ SpvScopeSubgroup = 3,
+ SpvScopeInvocation = 4,
+} SpvScope;
+
+typedef enum SpvGroupOperation_ {
+ SpvGroupOperationReduce = 0,
+ SpvGroupOperationInclusiveScan = 1,
+ SpvGroupOperationExclusiveScan = 2,
+} SpvGroupOperation;
+
+typedef enum SpvKernelEnqueueFlags_ {
+ SpvKernelEnqueueFlagsNoWait = 0,
+ SpvKernelEnqueueFlagsWaitKernel = 1,
+ SpvKernelEnqueueFlagsWaitWorkGroup = 2,
+} SpvKernelEnqueueFlags;
+
+typedef enum SpvKernelProfilingInfoShift_ {
+ SpvKernelProfilingInfoCmdExecTimeShift = 0,
+} SpvKernelProfilingInfoShift;
+
+typedef enum SpvKernelProfilingInfoMask_ {
+ SpvKernelProfilingInfoMaskNone = 0,
+ SpvKernelProfilingInfoCmdExecTimeMask = 0x00000001,
+} SpvKernelProfilingInfoMask;
+
+typedef enum SpvCapability_ {
+ SpvCapabilityMatrix = 0,
+ SpvCapabilityShader = 1,
+ SpvCapabilityGeometry = 2,
+ SpvCapabilityTessellation = 3,
+ SpvCapabilityAddresses = 4,
+ SpvCapabilityLinkage = 5,
+ SpvCapabilityKernel = 6,
+ SpvCapabilityVector16 = 7,
+ SpvCapabilityFloat16Buffer = 8,
+ SpvCapabilityFloat16 = 9,
+ SpvCapabilityFloat64 = 10,
+ SpvCapabilityInt64 = 11,
+ SpvCapabilityInt64Atomics = 12,
+ SpvCapabilityImageBasic = 13,
+ SpvCapabilityImageReadWrite = 14,
+ SpvCapabilityImageMipmap = 15,
+ SpvCapabilityPipes = 17,
+ SpvCapabilityGroups = 18,
+ SpvCapabilityDeviceEnqueue = 19,
+ SpvCapabilityLiteralSampler = 20,
+ SpvCapabilityAtomicStorage = 21,
+ SpvCapabilityInt16 = 22,
+ SpvCapabilityTessellationPointSize = 23,
+ SpvCapabilityGeometryPointSize = 24,
+ SpvCapabilityImageGatherExtended = 25,
+ SpvCapabilityStorageImageMultisample = 27,
+ SpvCapabilityUniformBufferArrayDynamicIndexing = 28,
+ SpvCapabilitySampledImageArrayDynamicIndexing = 29,
+ SpvCapabilityStorageBufferArrayDynamicIndexing = 30,
+ SpvCapabilityStorageImageArrayDynamicIndexing = 31,
+ SpvCapabilityClipDistance = 32,
+ SpvCapabilityCullDistance = 33,
+ SpvCapabilityImageCubeArray = 34,
+ SpvCapabilitySampleRateShading = 35,
+ SpvCapabilityImageRect = 36,
+ SpvCapabilitySampledRect = 37,
+ SpvCapabilityGenericPointer = 38,
+ SpvCapabilityInt8 = 39,
+ SpvCapabilityInputAttachment = 40,
+ SpvCapabilitySparseResidency = 41,
+ SpvCapabilityMinLod = 42,
+ SpvCapabilitySampled1D = 43,
+ SpvCapabilityImage1D = 44,
+ SpvCapabilitySampledCubeArray = 45,
+ SpvCapabilitySampledBuffer = 46,
+ SpvCapabilityImageBuffer = 47,
+ SpvCapabilityImageMSArray = 48,
+ SpvCapabilityStorageImageExtendedFormats = 49,
+ SpvCapabilityImageQuery = 50,
+ SpvCapabilityDerivativeControl = 51,
+ SpvCapabilityInterpolationFunction = 52,
+ SpvCapabilityTransformFeedback = 53,
+ SpvCapabilityGeometryStreams = 54,
+ SpvCapabilityStorageImageReadWithoutFormat = 55,
+ SpvCapabilityStorageImageWriteWithoutFormat = 56,
+ SpvCapabilityMultiViewport = 57,
+} SpvCapability;
+
+typedef enum SpvOp_ {
+ SpvOpNop = 0,
+ SpvOpUndef = 1,
+ SpvOpSourceContinued = 2,
+ SpvOpSource = 3,
+ SpvOpSourceExtension = 4,
+ SpvOpName = 5,
+ SpvOpMemberName = 6,
+ SpvOpString = 7,
+ SpvOpLine = 8,
+ SpvOpExtension = 10,
+ SpvOpExtInstImport = 11,
+ SpvOpExtInst = 12,
+ SpvOpMemoryModel = 14,
+ SpvOpEntryPoint = 15,
+ SpvOpExecutionMode = 16,
+ SpvOpCapability = 17,
+ SpvOpTypeVoid = 19,
+ SpvOpTypeBool = 20,
+ SpvOpTypeInt = 21,
+ SpvOpTypeFloat = 22,
+ SpvOpTypeVector = 23,
+ SpvOpTypeMatrix = 24,
+ SpvOpTypeImage = 25,
+ SpvOpTypeSampler = 26,
+ SpvOpTypeSampledImage = 27,
+ SpvOpTypeArray = 28,
+ SpvOpTypeRuntimeArray = 29,
+ SpvOpTypeStruct = 30,
+ SpvOpTypeOpaque = 31,
+ SpvOpTypePointer = 32,
+ SpvOpTypeFunction = 33,
+ SpvOpTypeEvent = 34,
+ SpvOpTypeDeviceEvent = 35,
+ SpvOpTypeReserveId = 36,
+ SpvOpTypeQueue = 37,
+ SpvOpTypePipe = 38,
+ SpvOpTypeForwardPointer = 39,
+ SpvOpConstantTrue = 41,
+ SpvOpConstantFalse = 42,
+ SpvOpConstant = 43,
+ SpvOpConstantComposite = 44,
+ SpvOpConstantSampler = 45,
+ SpvOpConstantNull = 46,
+ SpvOpSpecConstantTrue = 48,
+ SpvOpSpecConstantFalse = 49,
+ SpvOpSpecConstant = 50,
+ SpvOpSpecConstantComposite = 51,
+ SpvOpSpecConstantOp = 52,
+ SpvOpFunction = 54,
+ SpvOpFunctionParameter = 55,
+ SpvOpFunctionEnd = 56,
+ SpvOpFunctionCall = 57,
+ SpvOpVariable = 59,
+ SpvOpImageTexelPointer = 60,
+ SpvOpLoad = 61,
+ SpvOpStore = 62,
+ SpvOpCopyMemory = 63,
+ SpvOpCopyMemorySized = 64,
+ SpvOpAccessChain = 65,
+ SpvOpInBoundsAccessChain = 66,
+ SpvOpPtrAccessChain = 67,
+ SpvOpArrayLength = 68,
+ SpvOpGenericPtrMemSemantics = 69,
+ SpvOpInBoundsPtrAccessChain = 70,
+ SpvOpDecorate = 71,
+ SpvOpMemberDecorate = 72,
+ SpvOpDecorationGroup = 73,
+ SpvOpGroupDecorate = 74,
+ SpvOpGroupMemberDecorate = 75,
+ SpvOpVectorExtractDynamic = 77,
+ SpvOpVectorInsertDynamic = 78,
+ SpvOpVectorShuffle = 79,
+ SpvOpCompositeConstruct = 80,
+ SpvOpCompositeExtract = 81,
+ SpvOpCompositeInsert = 82,
+ SpvOpCopyObject = 83,
+ SpvOpTranspose = 84,
+ SpvOpSampledImage = 86,
+ SpvOpImageSampleImplicitLod = 87,
+ SpvOpImageSampleExplicitLod = 88,
+ SpvOpImageSampleDrefImplicitLod = 89,
+ SpvOpImageSampleDrefExplicitLod = 90,
+ SpvOpImageSampleProjImplicitLod = 91,
+ SpvOpImageSampleProjExplicitLod = 92,
+ SpvOpImageSampleProjDrefImplicitLod = 93,
+ SpvOpImageSampleProjDrefExplicitLod = 94,
+ SpvOpImageFetch = 95,
+ SpvOpImageGather = 96,
+ SpvOpImageDrefGather = 97,
+ SpvOpImageRead = 98,
+ SpvOpImageWrite = 99,
+ SpvOpImage = 100,
+ SpvOpImageQueryFormat = 101,
+ SpvOpImageQueryOrder = 102,
+ SpvOpImageQuerySizeLod = 103,
+ SpvOpImageQuerySize = 104,
+ SpvOpImageQueryLod = 105,
+ SpvOpImageQueryLevels = 106,
+ SpvOpImageQuerySamples = 107,
+ SpvOpConvertFToU = 109,
+ SpvOpConvertFToS = 110,
+ SpvOpConvertSToF = 111,
+ SpvOpConvertUToF = 112,
+ SpvOpUConvert = 113,
+ SpvOpSConvert = 114,
+ SpvOpFConvert = 115,
+ SpvOpQuantizeToF16 = 116,
+ SpvOpConvertPtrToU = 117,
+ SpvOpSatConvertSToU = 118,
+ SpvOpSatConvertUToS = 119,
+ SpvOpConvertUToPtr = 120,
+ SpvOpPtrCastToGeneric = 121,
+ SpvOpGenericCastToPtr = 122,
+ SpvOpGenericCastToPtrExplicit = 123,
+ SpvOpBitcast = 124,
+ SpvOpSNegate = 126,
+ SpvOpFNegate = 127,
+ SpvOpIAdd = 128,
+ SpvOpFAdd = 129,
+ SpvOpISub = 130,
+ SpvOpFSub = 131,
+ SpvOpIMul = 132,
+ SpvOpFMul = 133,
+ SpvOpUDiv = 134,
+ SpvOpSDiv = 135,
+ SpvOpFDiv = 136,
+ SpvOpUMod = 137,
+ SpvOpSRem = 138,
+ SpvOpSMod = 139,
+ SpvOpFRem = 140,
+ SpvOpFMod = 141,
+ SpvOpVectorTimesScalar = 142,
+ SpvOpMatrixTimesScalar = 143,
+ SpvOpVectorTimesMatrix = 144,
+ SpvOpMatrixTimesVector = 145,
+ SpvOpMatrixTimesMatrix = 146,
+ SpvOpOuterProduct = 147,
+ SpvOpDot = 148,
+ SpvOpIAddCarry = 149,
+ SpvOpISubBorrow = 150,
+ SpvOpUMulExtended = 151,
+ SpvOpSMulExtended = 152,
+ SpvOpAny = 154,
+ SpvOpAll = 155,
+ SpvOpIsNan = 156,
+ SpvOpIsInf = 157,
+ SpvOpIsFinite = 158,
+ SpvOpIsNormal = 159,
+ SpvOpSignBitSet = 160,
+ SpvOpLessOrGreater = 161,
+ SpvOpOrdered = 162,
+ SpvOpUnordered = 163,
+ SpvOpLogicalEqual = 164,
+ SpvOpLogicalNotEqual = 165,
+ SpvOpLogicalOr = 166,
+ SpvOpLogicalAnd = 167,
+ SpvOpLogicalNot = 168,
+ SpvOpSelect = 169,
+ SpvOpIEqual = 170,
+ SpvOpINotEqual = 171,
+ SpvOpUGreaterThan = 172,
+ SpvOpSGreaterThan = 173,
+ SpvOpUGreaterThanEqual = 174,
+ SpvOpSGreaterThanEqual = 175,
+ SpvOpULessThan = 176,
+ SpvOpSLessThan = 177,
+ SpvOpULessThanEqual = 178,
+ SpvOpSLessThanEqual = 179,
+ SpvOpFOrdEqual = 180,
+ SpvOpFUnordEqual = 181,
+ SpvOpFOrdNotEqual = 182,
+ SpvOpFUnordNotEqual = 183,
+ SpvOpFOrdLessThan = 184,
+ SpvOpFUnordLessThan = 185,
+ SpvOpFOrdGreaterThan = 186,
+ SpvOpFUnordGreaterThan = 187,
+ SpvOpFOrdLessThanEqual = 188,
+ SpvOpFUnordLessThanEqual = 189,
+ SpvOpFOrdGreaterThanEqual = 190,
+ SpvOpFUnordGreaterThanEqual = 191,
+ SpvOpShiftRightLogical = 194,
+ SpvOpShiftRightArithmetic = 195,
+ SpvOpShiftLeftLogical = 196,
+ SpvOpBitwiseOr = 197,
+ SpvOpBitwiseXor = 198,
+ SpvOpBitwiseAnd = 199,
+ SpvOpNot = 200,
+ SpvOpBitFieldInsert = 201,
+ SpvOpBitFieldSExtract = 202,
+ SpvOpBitFieldUExtract = 203,
+ SpvOpBitReverse = 204,
+ SpvOpBitCount = 205,
+ SpvOpDPdx = 207,
+ SpvOpDPdy = 208,
+ SpvOpFwidth = 209,
+ SpvOpDPdxFine = 210,
+ SpvOpDPdyFine = 211,
+ SpvOpFwidthFine = 212,
+ SpvOpDPdxCoarse = 213,
+ SpvOpDPdyCoarse = 214,
+ SpvOpFwidthCoarse = 215,
+ SpvOpEmitVertex = 218,
+ SpvOpEndPrimitive = 219,
+ SpvOpEmitStreamVertex = 220,
+ SpvOpEndStreamPrimitive = 221,
+ SpvOpControlBarrier = 224,
+ SpvOpMemoryBarrier = 225,
+ SpvOpAtomicLoad = 227,
+ SpvOpAtomicStore = 228,
+ SpvOpAtomicExchange = 229,
+ SpvOpAtomicCompareExchange = 230,
+ SpvOpAtomicCompareExchangeWeak = 231,
+ SpvOpAtomicIIncrement = 232,
+ SpvOpAtomicIDecrement = 233,
+ SpvOpAtomicIAdd = 234,
+ SpvOpAtomicISub = 235,
+ SpvOpAtomicSMin = 236,
+ SpvOpAtomicUMin = 237,
+ SpvOpAtomicSMax = 238,
+ SpvOpAtomicUMax = 239,
+ SpvOpAtomicAnd = 240,
+ SpvOpAtomicOr = 241,
+ SpvOpAtomicXor = 242,
+ SpvOpPhi = 245,
+ SpvOpLoopMerge = 246,
+ SpvOpSelectionMerge = 247,
+ SpvOpLabel = 248,
+ SpvOpBranch = 249,
+ SpvOpBranchConditional = 250,
+ SpvOpSwitch = 251,
+ SpvOpKill = 252,
+ SpvOpReturn = 253,
+ SpvOpReturnValue = 254,
+ SpvOpUnreachable = 255,
+ SpvOpLifetimeStart = 256,
+ SpvOpLifetimeStop = 257,
+ SpvOpGroupAsyncCopy = 259,
+ SpvOpGroupWaitEvents = 260,
+ SpvOpGroupAll = 261,
+ SpvOpGroupAny = 262,
+ SpvOpGroupBroadcast = 263,
+ SpvOpGroupIAdd = 264,
+ SpvOpGroupFAdd = 265,
+ SpvOpGroupFMin = 266,
+ SpvOpGroupUMin = 267,
+ SpvOpGroupSMin = 268,
+ SpvOpGroupFMax = 269,
+ SpvOpGroupUMax = 270,
+ SpvOpGroupSMax = 271,
+ SpvOpReadPipe = 274,
+ SpvOpWritePipe = 275,
+ SpvOpReservedReadPipe = 276,
+ SpvOpReservedWritePipe = 277,
+ SpvOpReserveReadPipePackets = 278,
+ SpvOpReserveWritePipePackets = 279,
+ SpvOpCommitReadPipe = 280,
+ SpvOpCommitWritePipe = 281,
+ SpvOpIsValidReserveId = 282,
+ SpvOpGetNumPipePackets = 283,
+ SpvOpGetMaxPipePackets = 284,
+ SpvOpGroupReserveReadPipePackets = 285,
+ SpvOpGroupReserveWritePipePackets = 286,
+ SpvOpGroupCommitReadPipe = 287,
+ SpvOpGroupCommitWritePipe = 288,
+ SpvOpEnqueueMarker = 291,
+ SpvOpEnqueueKernel = 292,
+ SpvOpGetKernelNDrangeSubGroupCount = 293,
+ SpvOpGetKernelNDrangeMaxSubGroupSize = 294,
+ SpvOpGetKernelWorkGroupSize = 295,
+ SpvOpGetKernelPreferredWorkGroupSizeMultiple = 296,
+ SpvOpRetainEvent = 297,
+ SpvOpReleaseEvent = 298,
+ SpvOpCreateUserEvent = 299,
+ SpvOpIsValidEvent = 300,
+ SpvOpSetUserEventStatus = 301,
+ SpvOpCaptureEventProfilingInfo = 302,
+ SpvOpGetDefaultQueue = 303,
+ SpvOpBuildNDRange = 304,
+ SpvOpImageSparseSampleImplicitLod = 305,
+ SpvOpImageSparseSampleExplicitLod = 306,
+ SpvOpImageSparseSampleDrefImplicitLod = 307,
+ SpvOpImageSparseSampleDrefExplicitLod = 308,
+ SpvOpImageSparseSampleProjImplicitLod = 309,
+ SpvOpImageSparseSampleProjExplicitLod = 310,
+ SpvOpImageSparseSampleProjDrefImplicitLod = 311,
+ SpvOpImageSparseSampleProjDrefExplicitLod = 312,
+ SpvOpImageSparseFetch = 313,
+ SpvOpImageSparseGather = 314,
+ SpvOpImageSparseDrefGather = 315,
+ SpvOpImageSparseTexelsResident = 316,
+ SpvOpNoLine = 317,
+ SpvOpAtomicFlagTestAndSet = 318,
+ SpvOpAtomicFlagClear = 319,
+} SpvOp;
+
+#endif // #ifndef spirv_H
+
diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c
new file mode 100644
index 00000000000..99514b49650
--- /dev/null
+++ b/src/compiler/spirv/spirv_to_nir.c
@@ -0,0 +1,2710 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jason Ekstrand ([email protected])
+ *
+ */
+
+#include "vtn_private.h"
+#include "nir/nir_vla.h"
+#include "nir/nir_control_flow.h"
+#include "nir/nir_constant_expressions.h"
+
+static struct vtn_ssa_value *
+vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
+{
+ struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
+ val->type = type;
+
+ if (glsl_type_is_vector_or_scalar(type)) {
+ unsigned num_components = glsl_get_vector_elements(val->type);
+ unsigned bit_size = glsl_get_bit_size(glsl_get_base_type(val->type));
+ val->def = nir_ssa_undef(&b->nb, num_components, bit_size);
+ } else {
+ unsigned elems = glsl_get_length(val->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ if (glsl_type_is_matrix(type)) {
+ const struct glsl_type *elem_type =
+ glsl_vector_type(glsl_get_base_type(type),
+ glsl_get_vector_elements(type));
+
+ for (unsigned i = 0; i < elems; i++)
+ val->elems[i] = vtn_undef_ssa_value(b, elem_type);
+ } else if (glsl_type_is_array(type)) {
+ const struct glsl_type *elem_type = glsl_get_array_element(type);
+ for (unsigned i = 0; i < elems; i++)
+ val->elems[i] = vtn_undef_ssa_value(b, elem_type);
+ } else {
+ for (unsigned i = 0; i < elems; i++) {
+ const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
+ val->elems[i] = vtn_undef_ssa_value(b, elem_type);
+ }
+ }
+ }
+
+ return val;
+}
+
+static struct vtn_ssa_value *
+vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
+ const struct glsl_type *type)
+{
+ struct hash_entry *entry = _mesa_hash_table_search(b->const_table, constant);
+
+ if (entry)
+ return entry->data;
+
+ struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
+ val->type = type;
+
+ switch (glsl_get_base_type(type)) {
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ if (glsl_type_is_vector_or_scalar(type)) {
+ unsigned num_components = glsl_get_vector_elements(val->type);
+ nir_load_const_instr *load =
+ nir_load_const_instr_create(b->shader, num_components, 32);
+
+ for (unsigned i = 0; i < num_components; i++)
+ load->value.u32[i] = constant->value.u[i];
+
+ nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
+ val->def = &load->def;
+ } else {
+ assert(glsl_type_is_matrix(type));
+ unsigned rows = glsl_get_vector_elements(val->type);
+ unsigned columns = glsl_get_matrix_columns(val->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, columns);
+
+ for (unsigned i = 0; i < columns; i++) {
+ struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value);
+ col_val->type = glsl_get_column_type(val->type);
+ nir_load_const_instr *load =
+ nir_load_const_instr_create(b->shader, rows, 32);
+
+ for (unsigned j = 0; j < rows; j++)
+ load->value.u32[j] = constant->value.u[rows * i + j];
+
+ nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
+ col_val->def = &load->def;
+
+ val->elems[i] = col_val;
+ }
+ }
+ break;
+
+ case GLSL_TYPE_ARRAY: {
+ unsigned elems = glsl_get_length(val->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ const struct glsl_type *elem_type = glsl_get_array_element(val->type);
+ for (unsigned i = 0; i < elems; i++)
+ val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
+ elem_type);
+ break;
+ }
+
+ case GLSL_TYPE_STRUCT: {
+ unsigned elems = glsl_get_length(val->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ for (unsigned i = 0; i < elems; i++) {
+ const struct glsl_type *elem_type =
+ glsl_get_struct_field(val->type, i);
+ val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
+ elem_type);
+ }
+ break;
+ }
+
+ default:
+ unreachable("bad constant type");
+ }
+
+ return val;
+}
+
+struct vtn_ssa_value *
+vtn_ssa_value(struct vtn_builder *b, uint32_t value_id)
+{
+ struct vtn_value *val = vtn_untyped_value(b, value_id);
+ switch (val->value_type) {
+ case vtn_value_type_undef:
+ return vtn_undef_ssa_value(b, val->type->type);
+
+ case vtn_value_type_constant:
+ return vtn_const_ssa_value(b, val->constant, val->const_type);
+
+ case vtn_value_type_ssa:
+ return val->ssa;
+
+ case vtn_value_type_access_chain:
+ /* This is needed for function parameters */
+ return vtn_variable_load(b, val->access_chain);
+
+ default:
+ unreachable("Invalid type for an SSA value");
+ }
+}
+
+static char *
+vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
+ unsigned word_count, unsigned *words_used)
+{
+ char *dup = ralloc_strndup(b, (char *)words, word_count * sizeof(*words));
+ if (words_used) {
+ /* Ammount of space taken by the string (including the null) */
+ unsigned len = strlen(dup) + 1;
+ *words_used = DIV_ROUND_UP(len, sizeof(*words));
+ }
+ return dup;
+}
+
+const uint32_t *
+vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
+ const uint32_t *end, vtn_instruction_handler handler)
+{
+ b->file = NULL;
+ b->line = -1;
+ b->col = -1;
+
+ const uint32_t *w = start;
+ while (w < end) {
+ SpvOp opcode = w[0] & SpvOpCodeMask;
+ unsigned count = w[0] >> SpvWordCountShift;
+ assert(count >= 1 && w + count <= end);
+
+ switch (opcode) {
+ case SpvOpNop:
+ break; /* Do nothing */
+
+ case SpvOpLine:
+ b->file = vtn_value(b, w[1], vtn_value_type_string)->str;
+ b->line = w[2];
+ b->col = w[3];
+ break;
+
+ case SpvOpNoLine:
+ b->file = NULL;
+ b->line = -1;
+ b->col = -1;
+ break;
+
+ default:
+ if (!handler(b, opcode, w, count))
+ return w;
+ break;
+ }
+
+ w += count;
+ }
+ assert(w == end);
+ return w;
+}
+
+static void
+vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpExtInstImport: {
+ struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
+ if (strcmp((const char *)&w[2], "GLSL.std.450") == 0) {
+ val->ext_handler = vtn_handle_glsl450_instruction;
+ } else {
+ assert(!"Unsupported extension");
+ }
+ break;
+ }
+
+ case SpvOpExtInst: {
+ struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
+ bool handled = val->ext_handler(b, w[4], w, count);
+ (void)handled;
+ assert(handled);
+ break;
+ }
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+static void
+_foreach_decoration_helper(struct vtn_builder *b,
+ struct vtn_value *base_value,
+ int parent_member,
+ struct vtn_value *value,
+ vtn_decoration_foreach_cb cb, void *data)
+{
+ for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
+ int member;
+ if (dec->scope == VTN_DEC_DECORATION) {
+ member = parent_member;
+ } else if (dec->scope >= VTN_DEC_STRUCT_MEMBER0) {
+ assert(parent_member == -1);
+ member = dec->scope - VTN_DEC_STRUCT_MEMBER0;
+ } else {
+ /* Not a decoration */
+ continue;
+ }
+
+ if (dec->group) {
+ assert(dec->group->value_type == vtn_value_type_decoration_group);
+ _foreach_decoration_helper(b, base_value, member, dec->group,
+ cb, data);
+ } else {
+ cb(b, base_value, member, dec, data);
+ }
+ }
+}
+
+/** Iterates (recursively if needed) over all of the decorations on a value
+ *
+ * This function iterates over all of the decorations applied to a given
+ * value. If it encounters a decoration group, it recurses into the group
+ * and iterates over all of those decorations as well.
+ */
+void
+vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
+ vtn_decoration_foreach_cb cb, void *data)
+{
+ _foreach_decoration_helper(b, value, -1, value, cb, data);
+}
+
+void
+vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value,
+ vtn_execution_mode_foreach_cb cb, void *data)
+{
+ for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
+ if (dec->scope != VTN_DEC_EXECUTION_MODE)
+ continue;
+
+ assert(dec->group == NULL);
+ cb(b, value, dec, data);
+ }
+}
+
+static void
+vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ const uint32_t *w_end = w + count;
+ const uint32_t target = w[1];
+ w += 2;
+
+ switch (opcode) {
+ case SpvOpDecorationGroup:
+ vtn_push_value(b, target, vtn_value_type_decoration_group);
+ break;
+
+ case SpvOpDecorate:
+ case SpvOpMemberDecorate:
+ case SpvOpExecutionMode: {
+ struct vtn_value *val = &b->values[target];
+
+ struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
+ switch (opcode) {
+ case SpvOpDecorate:
+ dec->scope = VTN_DEC_DECORATION;
+ break;
+ case SpvOpMemberDecorate:
+ dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
+ break;
+ case SpvOpExecutionMode:
+ dec->scope = VTN_DEC_EXECUTION_MODE;
+ break;
+ default:
+ unreachable("Invalid decoration opcode");
+ }
+ dec->decoration = *(w++);
+ dec->literals = w;
+
+ /* Link into the list */
+ dec->next = val->decoration;
+ val->decoration = dec;
+ break;
+ }
+
+ case SpvOpGroupMemberDecorate:
+ case SpvOpGroupDecorate: {
+ struct vtn_value *group =
+ vtn_value(b, target, vtn_value_type_decoration_group);
+
+ for (; w < w_end; w++) {
+ struct vtn_value *val = vtn_untyped_value(b, *w);
+ struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
+
+ dec->group = group;
+ if (opcode == SpvOpGroupDecorate) {
+ dec->scope = VTN_DEC_DECORATION;
+ } else {
+ dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(++w);
+ }
+
+ /* Link into the list */
+ dec->next = val->decoration;
+ val->decoration = dec;
+ }
+ break;
+ }
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+struct member_decoration_ctx {
+ unsigned num_fields;
+ struct glsl_struct_field *fields;
+ struct vtn_type *type;
+};
+
+/* does a shallow copy of a vtn_type */
+
+static struct vtn_type *
+vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
+{
+ struct vtn_type *dest = ralloc(b, struct vtn_type);
+ dest->type = src->type;
+ dest->is_builtin = src->is_builtin;
+ if (src->is_builtin)
+ dest->builtin = src->builtin;
+
+ if (!glsl_type_is_scalar(src->type)) {
+ switch (glsl_get_base_type(src->type)) {
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_ARRAY:
+ dest->row_major = src->row_major;
+ dest->stride = src->stride;
+ dest->array_element = src->array_element;
+ break;
+
+ case GLSL_TYPE_STRUCT: {
+ unsigned elems = glsl_get_length(src->type);
+
+ dest->members = ralloc_array(b, struct vtn_type *, elems);
+ memcpy(dest->members, src->members, elems * sizeof(struct vtn_type *));
+
+ dest->offsets = ralloc_array(b, unsigned, elems);
+ memcpy(dest->offsets, src->offsets, elems * sizeof(unsigned));
+ break;
+ }
+
+ default:
+ unreachable("unhandled type");
+ }
+ }
+
+ return dest;
+}
+
+static struct vtn_type *
+mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member)
+{
+ type->members[member] = vtn_type_copy(b, type->members[member]);
+ type = type->members[member];
+
+ /* We may have an array of matrices.... Oh, joy! */
+ while (glsl_type_is_array(type->type)) {
+ type->array_element = vtn_type_copy(b, type->array_element);
+ type = type->array_element;
+ }
+
+ assert(glsl_type_is_matrix(type->type));
+
+ return type;
+}
+
+static void
+struct_member_decoration_cb(struct vtn_builder *b,
+ struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *void_ctx)
+{
+ struct member_decoration_ctx *ctx = void_ctx;
+
+ if (member < 0)
+ return;
+
+ assert(member < ctx->num_fields);
+
+ switch (dec->decoration) {
+ case SpvDecorationRelaxedPrecision:
+ break; /* FIXME: Do nothing with this for now. */
+ case SpvDecorationNoPerspective:
+ ctx->fields[member].interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
+ break;
+ case SpvDecorationFlat:
+ ctx->fields[member].interpolation = INTERP_QUALIFIER_FLAT;
+ break;
+ case SpvDecorationCentroid:
+ ctx->fields[member].centroid = true;
+ break;
+ case SpvDecorationSample:
+ ctx->fields[member].sample = true;
+ break;
+ case SpvDecorationLocation:
+ ctx->fields[member].location = dec->literals[0];
+ break;
+ case SpvDecorationBuiltIn:
+ ctx->type->members[member] = vtn_type_copy(b, ctx->type->members[member]);
+ ctx->type->members[member]->is_builtin = true;
+ ctx->type->members[member]->builtin = dec->literals[0];
+ ctx->type->builtin_block = true;
+ break;
+ case SpvDecorationOffset:
+ ctx->type->offsets[member] = dec->literals[0];
+ break;
+ case SpvDecorationMatrixStride:
+ mutable_matrix_member(b, ctx->type, member)->stride = dec->literals[0];
+ break;
+ case SpvDecorationColMajor:
+ break; /* Nothing to do here. Column-major is the default. */
+ case SpvDecorationRowMajor:
+ mutable_matrix_member(b, ctx->type, member)->row_major = true;
+ break;
+ default:
+ unreachable("Unhandled member decoration");
+ }
+}
+
+static void
+type_decoration_cb(struct vtn_builder *b,
+ struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *ctx)
+{
+ struct vtn_type *type = val->type;
+
+ if (member != -1)
+ return;
+
+ switch (dec->decoration) {
+ case SpvDecorationArrayStride:
+ type->stride = dec->literals[0];
+ break;
+ case SpvDecorationBlock:
+ type->block = true;
+ break;
+ case SpvDecorationBufferBlock:
+ type->buffer_block = true;
+ break;
+ case SpvDecorationGLSLShared:
+ case SpvDecorationGLSLPacked:
+ /* Ignore these, since we get explicit offsets anyways */
+ break;
+
+ case SpvDecorationStream:
+ assert(dec->literals[0] == 0);
+ break;
+
+ default:
+ unreachable("Unhandled type decoration");
+ }
+}
+
+static unsigned
+translate_image_format(SpvImageFormat format)
+{
+ switch (format) {
+ case SpvImageFormatUnknown: return 0; /* GL_NONE */
+ case SpvImageFormatRgba32f: return 0x8814; /* GL_RGBA32F */
+ case SpvImageFormatRgba16f: return 0x881A; /* GL_RGBA16F */
+ case SpvImageFormatR32f: return 0x822E; /* GL_R32F */
+ case SpvImageFormatRgba8: return 0x8058; /* GL_RGBA8 */
+ case SpvImageFormatRgba8Snorm: return 0x8F97; /* GL_RGBA8_SNORM */
+ case SpvImageFormatRg32f: return 0x8230; /* GL_RG32F */
+ case SpvImageFormatRg16f: return 0x822F; /* GL_RG16F */
+ case SpvImageFormatR11fG11fB10f: return 0x8C3A; /* GL_R11F_G11F_B10F */
+ case SpvImageFormatR16f: return 0x822D; /* GL_R16F */
+ case SpvImageFormatRgba16: return 0x805B; /* GL_RGBA16 */
+ case SpvImageFormatRgb10A2: return 0x8059; /* GL_RGB10_A2 */
+ case SpvImageFormatRg16: return 0x822C; /* GL_RG16 */
+ case SpvImageFormatRg8: return 0x822B; /* GL_RG8 */
+ case SpvImageFormatR16: return 0x822A; /* GL_R16 */
+ case SpvImageFormatR8: return 0x8229; /* GL_R8 */
+ case SpvImageFormatRgba16Snorm: return 0x8F9B; /* GL_RGBA16_SNORM */
+ case SpvImageFormatRg16Snorm: return 0x8F99; /* GL_RG16_SNORM */
+ case SpvImageFormatRg8Snorm: return 0x8F95; /* GL_RG8_SNORM */
+ case SpvImageFormatR16Snorm: return 0x8F98; /* GL_R16_SNORM */
+ case SpvImageFormatR8Snorm: return 0x8F94; /* GL_R8_SNORM */
+ case SpvImageFormatRgba32i: return 0x8D82; /* GL_RGBA32I */
+ case SpvImageFormatRgba16i: return 0x8D88; /* GL_RGBA16I */
+ case SpvImageFormatRgba8i: return 0x8D8E; /* GL_RGBA8I */
+ case SpvImageFormatR32i: return 0x8235; /* GL_R32I */
+ case SpvImageFormatRg32i: return 0x823B; /* GL_RG32I */
+ case SpvImageFormatRg16i: return 0x8239; /* GL_RG16I */
+ case SpvImageFormatRg8i: return 0x8237; /* GL_RG8I */
+ case SpvImageFormatR16i: return 0x8233; /* GL_R16I */
+ case SpvImageFormatR8i: return 0x8231; /* GL_R8I */
+ case SpvImageFormatRgba32ui: return 0x8D70; /* GL_RGBA32UI */
+ case SpvImageFormatRgba16ui: return 0x8D76; /* GL_RGBA16UI */
+ case SpvImageFormatRgba8ui: return 0x8D7C; /* GL_RGBA8UI */
+ case SpvImageFormatR32ui: return 0x8236; /* GL_R32UI */
+ case SpvImageFormatRgb10a2ui: return 0x906F; /* GL_RGB10_A2UI */
+ case SpvImageFormatRg32ui: return 0x823C; /* GL_RG32UI */
+ case SpvImageFormatRg16ui: return 0x823A; /* GL_RG16UI */
+ case SpvImageFormatRg8ui: return 0x8238; /* GL_RG8UI */
+ case SpvImageFormatR16ui: return 0x823A; /* GL_RG16UI */
+ case SpvImageFormatR8ui: return 0x8232; /* GL_R8UI */
+ default:
+ assert(!"Invalid image format");
+ return 0;
+ }
+}
+
+static void
+vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type);
+
+ val->type = rzalloc(b, struct vtn_type);
+ val->type->is_builtin = false;
+ val->type->val = val;
+
+ switch (opcode) {
+ case SpvOpTypeVoid:
+ val->type->type = glsl_void_type();
+ break;
+ case SpvOpTypeBool:
+ val->type->type = glsl_bool_type();
+ break;
+ case SpvOpTypeInt: {
+ const bool signedness = w[3];
+ val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
+ break;
+ }
+ case SpvOpTypeFloat:
+ val->type->type = glsl_float_type();
+ break;
+
+ case SpvOpTypeVector: {
+ struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
+ unsigned elems = w[3];
+
+ assert(glsl_type_is_scalar(base->type));
+ val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
+
+ /* Vectors implicitly have sizeof(base_type) stride. For now, this
+ * is always 4 bytes. This will have to change if we want to start
+ * supporting doubles or half-floats.
+ */
+ val->type->stride = 4;
+ val->type->array_element = base;
+ break;
+ }
+
+ case SpvOpTypeMatrix: {
+ struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
+ unsigned columns = w[3];
+
+ assert(glsl_type_is_vector(base->type));
+ val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
+ glsl_get_vector_elements(base->type),
+ columns);
+ assert(!glsl_type_is_error(val->type->type));
+ val->type->array_element = base;
+ val->type->row_major = false;
+ val->type->stride = 0;
+ break;
+ }
+
+ case SpvOpTypeRuntimeArray:
+ case SpvOpTypeArray: {
+ struct vtn_type *array_element =
+ vtn_value(b, w[2], vtn_value_type_type)->type;
+
+ unsigned length;
+ if (opcode == SpvOpTypeRuntimeArray) {
+ /* A length of 0 is used to denote unsized arrays */
+ length = 0;
+ } else {
+ length =
+ vtn_value(b, w[3], vtn_value_type_constant)->constant->value.u[0];
+ }
+
+ val->type->type = glsl_array_type(array_element->type, length);
+ val->type->array_element = array_element;
+ val->type->stride = 0;
+ break;
+ }
+
+ case SpvOpTypeStruct: {
+ unsigned num_fields = count - 2;
+ val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
+ val->type->offsets = ralloc_array(b, unsigned, num_fields);
+
+ NIR_VLA(struct glsl_struct_field, fields, count);
+ for (unsigned i = 0; i < num_fields; i++) {
+ val->type->members[i] =
+ vtn_value(b, w[i + 2], vtn_value_type_type)->type;
+ fields[i] = (struct glsl_struct_field) {
+ .type = val->type->members[i]->type,
+ .name = ralloc_asprintf(b, "field%d", i),
+ .location = -1,
+ };
+ }
+
+ struct member_decoration_ctx ctx = {
+ .num_fields = num_fields,
+ .fields = fields,
+ .type = val->type
+ };
+
+ vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
+
+ const char *name = val->name ? val->name : "struct";
+
+ val->type->type = glsl_struct_type(fields, num_fields, name);
+ break;
+ }
+
+ case SpvOpTypeFunction: {
+ const struct glsl_type *return_type =
+ vtn_value(b, w[2], vtn_value_type_type)->type->type;
+ NIR_VLA(struct glsl_function_param, params, count - 3);
+ for (unsigned i = 0; i < count - 3; i++) {
+ params[i].type = vtn_value(b, w[i + 3], vtn_value_type_type)->type->type;
+
+ /* FIXME: */
+ params[i].in = true;
+ params[i].out = true;
+ }
+ val->type->type = glsl_function_type(return_type, params, count - 3);
+ break;
+ }
+
+ case SpvOpTypePointer:
+ /* FIXME: For now, we'll just do the really lame thing and return
+ * the same type. The validator should ensure that the proper number
+ * of dereferences happen
+ */
+ val->type = vtn_value(b, w[3], vtn_value_type_type)->type;
+ break;
+
+ case SpvOpTypeImage: {
+ const struct glsl_type *sampled_type =
+ vtn_value(b, w[2], vtn_value_type_type)->type->type;
+
+ assert(glsl_type_is_vector_or_scalar(sampled_type));
+
+ enum glsl_sampler_dim dim;
+ switch ((SpvDim)w[3]) {
+ case SpvDim1D: dim = GLSL_SAMPLER_DIM_1D; break;
+ case SpvDim2D: dim = GLSL_SAMPLER_DIM_2D; break;
+ case SpvDim3D: dim = GLSL_SAMPLER_DIM_3D; break;
+ case SpvDimCube: dim = GLSL_SAMPLER_DIM_CUBE; break;
+ case SpvDimRect: dim = GLSL_SAMPLER_DIM_RECT; break;
+ case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
+ default:
+ unreachable("Invalid SPIR-V Sampler dimension");
+ }
+
+ bool is_shadow = w[4];
+ bool is_array = w[5];
+ bool multisampled = w[6];
+ unsigned sampled = w[7];
+ SpvImageFormat format = w[8];
+
+ if (count > 9)
+ val->type->access_qualifier = w[9];
+ else
+ val->type->access_qualifier = SpvAccessQualifierReadWrite;
+
+ if (multisampled) {
+ assert(dim == GLSL_SAMPLER_DIM_2D);
+ dim = GLSL_SAMPLER_DIM_MS;
+ }
+
+ val->type->image_format = translate_image_format(format);
+
+ if (sampled == 1) {
+ val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
+ glsl_get_base_type(sampled_type));
+ } else if (sampled == 2) {
+ assert(format);
+ assert(!is_shadow);
+ val->type->type = glsl_image_type(dim, is_array,
+ glsl_get_base_type(sampled_type));
+ } else {
+ assert(!"We need to know if the image will be sampled");
+ }
+ break;
+ }
+
+ case SpvOpTypeSampledImage:
+ val->type = vtn_value(b, w[2], vtn_value_type_type)->type;
+ break;
+
+ case SpvOpTypeSampler:
+ /* The actual sampler type here doesn't really matter. It gets
+ * thrown away the moment you combine it with an image. What really
+ * matters is that it's a sampler type as opposed to an integer type
+ * so the backend knows what to do.
+ */
+ val->type->type = glsl_bare_sampler_type();
+ break;
+
+ case SpvOpTypeOpaque:
+ case SpvOpTypeEvent:
+ case SpvOpTypeDeviceEvent:
+ case SpvOpTypeReserveId:
+ case SpvOpTypeQueue:
+ case SpvOpTypePipe:
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
+}
+
+static nir_constant *
+vtn_null_constant(struct vtn_builder *b, const struct glsl_type *type)
+{
+ nir_constant *c = rzalloc(b, nir_constant);
+
+ switch (glsl_get_base_type(type)) {
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ /* Nothing to do here. It's already initialized to zero */
+ break;
+
+ case GLSL_TYPE_ARRAY:
+ assert(glsl_get_length(type) > 0);
+ c->num_elements = glsl_get_length(type);
+ c->elements = ralloc_array(b, nir_constant *, c->num_elements);
+
+ c->elements[0] = vtn_null_constant(b, glsl_get_array_element(type));
+ for (unsigned i = 1; i < c->num_elements; i++)
+ c->elements[i] = c->elements[0];
+ break;
+
+ case GLSL_TYPE_STRUCT:
+ c->num_elements = glsl_get_length(type);
+ c->elements = ralloc_array(b, nir_constant *, c->num_elements);
+
+ for (unsigned i = 0; i < c->num_elements; i++) {
+ c->elements[i] = vtn_null_constant(b, glsl_get_struct_field(type, i));
+ }
+ break;
+
+ default:
+ unreachable("Invalid type for null constant");
+ }
+
+ return c;
+}
+
+static void
+spec_constant_deocoration_cb(struct vtn_builder *b, struct vtn_value *v,
+ int member, const struct vtn_decoration *dec,
+ void *data)
+{
+ assert(member == -1);
+ if (dec->decoration != SpvDecorationSpecId)
+ return;
+
+ uint32_t *const_value = data;
+
+ for (unsigned i = 0; i < b->num_specializations; i++) {
+ if (b->specializations[i].id == dec->literals[0]) {
+ *const_value = b->specializations[i].data;
+ return;
+ }
+ }
+}
+
+static uint32_t
+get_specialization(struct vtn_builder *b, struct vtn_value *val,
+ uint32_t const_value)
+{
+ vtn_foreach_decoration(b, val, spec_constant_deocoration_cb, &const_value);
+ return const_value;
+}
+
+static void
+vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
+ val->const_type = vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ val->constant = rzalloc(b, nir_constant);
+ switch (opcode) {
+ case SpvOpConstantTrue:
+ assert(val->const_type == glsl_bool_type());
+ val->constant->value.u[0] = NIR_TRUE;
+ break;
+ case SpvOpConstantFalse:
+ assert(val->const_type == glsl_bool_type());
+ val->constant->value.u[0] = NIR_FALSE;
+ break;
+
+ case SpvOpSpecConstantTrue:
+ case SpvOpSpecConstantFalse: {
+ assert(val->const_type == glsl_bool_type());
+ uint32_t int_val =
+ get_specialization(b, val, (opcode == SpvOpSpecConstantTrue));
+ val->constant->value.u[0] = int_val ? NIR_TRUE : NIR_FALSE;
+ break;
+ }
+
+ case SpvOpConstant:
+ assert(glsl_type_is_scalar(val->const_type));
+ val->constant->value.u[0] = w[3];
+ break;
+ case SpvOpSpecConstant:
+ assert(glsl_type_is_scalar(val->const_type));
+ val->constant->value.u[0] = get_specialization(b, val, w[3]);
+ break;
+ case SpvOpSpecConstantComposite:
+ case SpvOpConstantComposite: {
+ unsigned elem_count = count - 3;
+ nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
+ for (unsigned i = 0; i < elem_count; i++)
+ elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant;
+
+ switch (glsl_get_base_type(val->const_type)) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_BOOL:
+ if (glsl_type_is_matrix(val->const_type)) {
+ unsigned rows = glsl_get_vector_elements(val->const_type);
+ assert(glsl_get_matrix_columns(val->const_type) == elem_count);
+ for (unsigned i = 0; i < elem_count; i++)
+ for (unsigned j = 0; j < rows; j++)
+ val->constant->value.u[rows * i + j] = elems[i]->value.u[j];
+ } else {
+ assert(glsl_type_is_vector(val->const_type));
+ assert(glsl_get_vector_elements(val->const_type) == elem_count);
+ for (unsigned i = 0; i < elem_count; i++)
+ val->constant->value.u[i] = elems[i]->value.u[0];
+ }
+ ralloc_free(elems);
+ break;
+
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_ARRAY:
+ ralloc_steal(val->constant, elems);
+ val->constant->num_elements = elem_count;
+ val->constant->elements = elems;
+ break;
+
+ default:
+ unreachable("Unsupported type for constants");
+ }
+ break;
+ }
+
+ case SpvOpSpecConstantOp: {
+ SpvOp opcode = get_specialization(b, val, w[3]);
+ switch (opcode) {
+ case SpvOpVectorShuffle: {
+ struct vtn_value *v0 = vtn_value(b, w[4], vtn_value_type_constant);
+ struct vtn_value *v1 = vtn_value(b, w[5], vtn_value_type_constant);
+ unsigned len0 = glsl_get_vector_elements(v0->const_type);
+ unsigned len1 = glsl_get_vector_elements(v1->const_type);
+
+ uint32_t u[8];
+ for (unsigned i = 0; i < len0; i++)
+ u[i] = v0->constant->value.u[i];
+ for (unsigned i = 0; i < len1; i++)
+ u[len0 + i] = v1->constant->value.u[i];
+
+ for (unsigned i = 0; i < count - 6; i++) {
+ uint32_t comp = w[i + 6];
+ if (comp == (uint32_t)-1) {
+ val->constant->value.u[i] = 0xdeadbeef;
+ } else {
+ val->constant->value.u[i] = u[comp];
+ }
+ }
+ return;
+ }
+
+ case SpvOpCompositeExtract:
+ case SpvOpCompositeInsert: {
+ struct vtn_value *comp;
+ unsigned deref_start;
+ struct nir_constant **c;
+ if (opcode == SpvOpCompositeExtract) {
+ comp = vtn_value(b, w[4], vtn_value_type_constant);
+ deref_start = 5;
+ c = &comp->constant;
+ } else {
+ comp = vtn_value(b, w[5], vtn_value_type_constant);
+ deref_start = 6;
+ val->constant = nir_constant_clone(comp->constant,
+ (nir_variable *)b);
+ c = &val->constant;
+ }
+
+ int elem = -1;
+ const struct glsl_type *type = comp->const_type;
+ for (unsigned i = deref_start; i < count; i++) {
+ switch (glsl_get_base_type(type)) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_BOOL:
+ /* If we hit this granularity, we're picking off an element */
+ if (elem < 0)
+ elem = 0;
+
+ if (glsl_type_is_matrix(type)) {
+ elem += w[i] * glsl_get_vector_elements(type);
+ type = glsl_get_column_type(type);
+ } else {
+ assert(glsl_type_is_vector(type));
+ elem += w[i];
+ type = glsl_scalar_type(glsl_get_base_type(type));
+ }
+ continue;
+
+ case GLSL_TYPE_ARRAY:
+ c = &(*c)->elements[w[i]];
+ type = glsl_get_array_element(type);
+ continue;
+
+ case GLSL_TYPE_STRUCT:
+ c = &(*c)->elements[w[i]];
+ type = glsl_get_struct_field(type, w[i]);
+ continue;
+
+ default:
+ unreachable("Invalid constant type");
+ }
+ }
+
+ if (opcode == SpvOpCompositeExtract) {
+ if (elem == -1) {
+ val->constant = *c;
+ } else {
+ unsigned num_components = glsl_get_vector_elements(type);
+ for (unsigned i = 0; i < num_components; i++)
+ val->constant->value.u[i] = (*c)->value.u[elem + i];
+ }
+ } else {
+ struct vtn_value *insert =
+ vtn_value(b, w[4], vtn_value_type_constant);
+ assert(insert->const_type == type);
+ if (elem == -1) {
+ *c = insert->constant;
+ } else {
+ unsigned num_components = glsl_get_vector_elements(type);
+ for (unsigned i = 0; i < num_components; i++)
+ (*c)->value.u[elem + i] = insert->constant->value.u[i];
+ }
+ }
+ return;
+ }
+
+ default: {
+ bool swap;
+ nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap);
+
+ unsigned num_components = glsl_get_vector_elements(val->const_type);
+ unsigned bit_size =
+ glsl_get_bit_size(glsl_get_base_type(val->const_type));
+
+ nir_const_value src[3];
+ assert(count <= 7);
+ for (unsigned i = 0; i < count - 4; i++) {
+ nir_constant *c =
+ vtn_value(b, w[4 + i], vtn_value_type_constant)->constant;
+
+ unsigned j = swap ? 1 - i : i;
+ assert(bit_size == 32);
+ for (unsigned k = 0; k < num_components; k++)
+ src[j].u32[k] = c->value.u[k];
+ }
+
+ nir_const_value res = nir_eval_const_opcode(op, num_components,
+ bit_size, src);
+
+ for (unsigned k = 0; k < num_components; k++)
+ val->constant->value.u[k] = res.u32[k];
+
+ return;
+ } /* default */
+ }
+ }
+
+ case SpvOpConstantNull:
+ val->constant = vtn_null_constant(b, val->const_type);
+ break;
+
+ case SpvOpConstantSampler:
+ assert(!"OpConstantSampler requires Kernel Capability");
+ break;
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+static void
+vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct nir_function *callee =
+ vtn_value(b, w[3], vtn_value_type_function)->func->impl->function;
+
+ nir_call_instr *call = nir_call_instr_create(b->nb.shader, callee);
+ for (unsigned i = 0; i < call->num_params; i++) {
+ unsigned arg_id = w[4 + i];
+ struct vtn_value *arg = vtn_untyped_value(b, arg_id);
+ if (arg->value_type == vtn_value_type_access_chain) {
+ nir_deref_var *d = vtn_access_chain_to_deref(b, arg->access_chain);
+ call->params[i] = nir_deref_as_var(nir_copy_deref(call, &d->deref));
+ } else {
+ struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id);
+
+ /* Make a temporary to store the argument in */
+ nir_variable *tmp =
+ nir_local_variable_create(b->impl, arg_ssa->type, "arg_tmp");
+ call->params[i] = nir_deref_var_create(call, tmp);
+
+ vtn_local_store(b, arg_ssa, call->params[i]);
+ }
+ }
+
+ nir_variable *out_tmp = NULL;
+ if (!glsl_type_is_void(callee->return_type)) {
+ out_tmp = nir_local_variable_create(b->impl, callee->return_type,
+ "out_tmp");
+ call->return_deref = nir_deref_var_create(call, out_tmp);
+ }
+
+ nir_builder_instr_insert(&b->nb, &call->instr);
+
+ if (glsl_type_is_void(callee->return_type)) {
+ vtn_push_value(b, w[2], vtn_value_type_undef);
+ } else {
+ struct vtn_value *retval = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ retval->ssa = vtn_local_load(b, call->return_deref);
+ }
+}
+
+struct vtn_ssa_value *
+vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
+{
+ struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
+ val->type = type;
+
+ if (!glsl_type_is_vector_or_scalar(type)) {
+ unsigned elems = glsl_get_length(type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ for (unsigned i = 0; i < elems; i++) {
+ const struct glsl_type *child_type;
+
+ switch (glsl_get_base_type(type)) {
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ child_type = glsl_get_column_type(type);
+ break;
+ case GLSL_TYPE_ARRAY:
+ child_type = glsl_get_array_element(type);
+ break;
+ case GLSL_TYPE_STRUCT:
+ child_type = glsl_get_struct_field(type, i);
+ break;
+ default:
+ unreachable("unkown base type");
+ }
+
+ val->elems[i] = vtn_create_ssa_value(b, child_type);
+ }
+ }
+
+ return val;
+}
+
+static nir_tex_src
+vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
+{
+ nir_tex_src src;
+ src.src = nir_src_for_ssa(vtn_ssa_value(b, index)->def);
+ src.src_type = type;
+ return src;
+}
+
+static void
+vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ if (opcode == SpvOpSampledImage) {
+ struct vtn_value *val =
+ vtn_push_value(b, w[2], vtn_value_type_sampled_image);
+ val->sampled_image = ralloc(b, struct vtn_sampled_image);
+ val->sampled_image->image =
+ vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ val->sampled_image->sampler =
+ vtn_value(b, w[4], vtn_value_type_access_chain)->access_chain;
+ return;
+ } else if (opcode == SpvOpImage) {
+ struct vtn_value *val =
+ vtn_push_value(b, w[2], vtn_value_type_access_chain);
+ struct vtn_value *src_val = vtn_untyped_value(b, w[3]);
+ if (src_val->value_type == vtn_value_type_sampled_image) {
+ val->access_chain = src_val->sampled_image->image;
+ } else {
+ assert(src_val->value_type == vtn_value_type_access_chain);
+ val->access_chain = src_val->access_chain;
+ }
+ return;
+ }
+
+ struct vtn_type *ret_type = vtn_value(b, w[1], vtn_value_type_type)->type;
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+
+ struct vtn_sampled_image sampled;
+ struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
+ if (sampled_val->value_type == vtn_value_type_sampled_image) {
+ sampled = *sampled_val->sampled_image;
+ } else {
+ assert(sampled_val->value_type == vtn_value_type_access_chain);
+ sampled.image = NULL;
+ sampled.sampler = sampled_val->access_chain;
+ }
+
+ const struct glsl_type *image_type;
+ if (sampled.image) {
+ image_type = sampled.image->var->var->interface_type;
+ } else {
+ image_type = sampled.sampler->var->var->interface_type;
+ }
+
+ nir_tex_src srcs[8]; /* 8 should be enough */
+ nir_tex_src *p = srcs;
+
+ unsigned idx = 4;
+
+ bool has_coord = false;
+ switch (opcode) {
+ case SpvOpImageSampleImplicitLod:
+ case SpvOpImageSampleExplicitLod:
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ case SpvOpImageFetch:
+ case SpvOpImageGather:
+ case SpvOpImageDrefGather:
+ case SpvOpImageQueryLod: {
+ /* All these types have the coordinate as their first real argument */
+ struct vtn_ssa_value *coord = vtn_ssa_value(b, w[idx++]);
+ has_coord = true;
+ p->src = nir_src_for_ssa(coord->def);
+ p->src_type = nir_tex_src_coord;
+ p++;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ /* These all have an explicit depth value as their next source */
+ switch (opcode) {
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparitor);
+ break;
+ default:
+ break;
+ }
+
+ /* For OpImageQuerySizeLod, we always have an LOD */
+ if (opcode == SpvOpImageQuerySizeLod)
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
+
+ /* Figure out the base texture operation */
+ nir_texop texop;
+ switch (opcode) {
+ case SpvOpImageSampleImplicitLod:
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ texop = nir_texop_tex;
+ break;
+
+ case SpvOpImageSampleExplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ texop = nir_texop_txl;
+ break;
+
+ case SpvOpImageFetch:
+ if (glsl_get_sampler_dim(image_type) == GLSL_SAMPLER_DIM_MS) {
+ texop = nir_texop_txf_ms;
+ } else {
+ texop = nir_texop_txf;
+ }
+ break;
+
+ case SpvOpImageGather:
+ case SpvOpImageDrefGather:
+ texop = nir_texop_tg4;
+ break;
+
+ case SpvOpImageQuerySizeLod:
+ case SpvOpImageQuerySize:
+ texop = nir_texop_txs;
+ break;
+
+ case SpvOpImageQueryLod:
+ texop = nir_texop_lod;
+ break;
+
+ case SpvOpImageQueryLevels:
+ texop = nir_texop_query_levels;
+ break;
+
+ case SpvOpImageQuerySamples:
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ /* Now we need to handle some number of optional arguments */
+ if (idx < count) {
+ uint32_t operands = w[idx++];
+
+ if (operands & SpvImageOperandsBiasMask) {
+ assert(texop == nir_texop_tex);
+ texop = nir_texop_txb;
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_bias);
+ }
+
+ if (operands & SpvImageOperandsLodMask) {
+ assert(texop == nir_texop_txl || texop == nir_texop_txf ||
+ texop == nir_texop_txf_ms || texop == nir_texop_txs);
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
+ }
+
+ if (operands & SpvImageOperandsGradMask) {
+ assert(texop == nir_texop_tex);
+ texop = nir_texop_txd;
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddx);
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddy);
+ }
+
+ if (operands & SpvImageOperandsOffsetMask ||
+ operands & SpvImageOperandsConstOffsetMask)
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset);
+
+ if (operands & SpvImageOperandsConstOffsetsMask)
+ assert(!"Constant offsets to texture gather not yet implemented");
+
+ if (operands & SpvImageOperandsSampleMask) {
+ assert(texop == nir_texop_txf_ms);
+ texop = nir_texop_txf_ms;
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
+ }
+ }
+ /* We should have now consumed exactly all of the arguments */
+ assert(idx == count);
+
+ nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs);
+ instr->op = texop;
+
+ memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
+
+ instr->sampler_dim = glsl_get_sampler_dim(image_type);
+ instr->is_array = glsl_sampler_type_is_array(image_type);
+ instr->is_shadow = glsl_sampler_type_is_shadow(image_type);
+ instr->is_new_style_shadow = instr->is_shadow;
+
+ if (has_coord) {
+ switch (instr->sampler_dim) {
+ case GLSL_SAMPLER_DIM_1D:
+ case GLSL_SAMPLER_DIM_BUF:
+ instr->coord_components = 1;
+ break;
+ case GLSL_SAMPLER_DIM_2D:
+ case GLSL_SAMPLER_DIM_RECT:
+ case GLSL_SAMPLER_DIM_MS:
+ instr->coord_components = 2;
+ break;
+ case GLSL_SAMPLER_DIM_3D:
+ case GLSL_SAMPLER_DIM_CUBE:
+ instr->coord_components = 3;
+ break;
+ default:
+ assert("Invalid sampler type");
+ }
+
+ if (instr->is_array)
+ instr->coord_components++;
+ } else {
+ instr->coord_components = 0;
+ }
+
+ switch (glsl_get_sampler_result_type(image_type)) {
+ case GLSL_TYPE_FLOAT: instr->dest_type = nir_type_float; break;
+ case GLSL_TYPE_INT: instr->dest_type = nir_type_int; break;
+ case GLSL_TYPE_UINT: instr->dest_type = nir_type_uint; break;
+ case GLSL_TYPE_BOOL: instr->dest_type = nir_type_bool; break;
+ default:
+ unreachable("Invalid base type for sampler result");
+ }
+
+ nir_deref_var *sampler = vtn_access_chain_to_deref(b, sampled.sampler);
+ if (sampled.image) {
+ nir_deref_var *image = vtn_access_chain_to_deref(b, sampled.image);
+ instr->texture = nir_deref_as_var(nir_copy_deref(instr, &image->deref));
+ } else {
+ instr->texture = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
+ }
+
+ switch (instr->op) {
+ case nir_texop_tex:
+ case nir_texop_txb:
+ case nir_texop_txl:
+ case nir_texop_txd:
+ /* These operations require a sampler */
+ instr->sampler = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
+ break;
+ case nir_texop_txf:
+ case nir_texop_txf_ms:
+ case nir_texop_txs:
+ case nir_texop_lod:
+ case nir_texop_tg4:
+ case nir_texop_query_levels:
+ case nir_texop_texture_samples:
+ case nir_texop_samples_identical:
+ /* These don't */
+ instr->sampler = NULL;
+ break;
+ }
+
+ nir_ssa_dest_init(&instr->instr, &instr->dest,
+ nir_tex_instr_dest_size(instr), 32, NULL);
+
+ assert(glsl_get_vector_elements(ret_type->type) ==
+ nir_tex_instr_dest_size(instr));
+
+ val->ssa = vtn_create_ssa_value(b, ret_type->type);
+ val->ssa->def = &instr->dest.ssa;
+
+ nir_builder_instr_insert(&b->nb, &instr->instr);
+}
+
+static nir_ssa_def *
+get_image_coord(struct vtn_builder *b, uint32_t value)
+{
+ struct vtn_ssa_value *coord = vtn_ssa_value(b, value);
+
+ /* The image_load_store intrinsics assume a 4-dim coordinate */
+ unsigned dim = glsl_get_vector_elements(coord->type);
+ unsigned swizzle[4];
+ for (unsigned i = 0; i < 4; i++)
+ swizzle[i] = MIN2(i, dim - 1);
+
+ return nir_swizzle(&b->nb, coord->def, swizzle, 4, false);
+}
+
+static void
+vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ /* Just get this one out of the way */
+ if (opcode == SpvOpImageTexelPointer) {
+ struct vtn_value *val =
+ vtn_push_value(b, w[2], vtn_value_type_image_pointer);
+ val->image = ralloc(b, struct vtn_image_pointer);
+
+ val->image->image =
+ vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ val->image->coord = get_image_coord(b, w[4]);
+ val->image->sample = vtn_ssa_value(b, w[5])->def;
+ return;
+ }
+
+ struct vtn_image_pointer image;
+
+ switch (opcode) {
+ case SpvOpAtomicExchange:
+ case SpvOpAtomicCompareExchange:
+ case SpvOpAtomicCompareExchangeWeak:
+ case SpvOpAtomicIIncrement:
+ case SpvOpAtomicIDecrement:
+ case SpvOpAtomicIAdd:
+ case SpvOpAtomicISub:
+ case SpvOpAtomicSMin:
+ case SpvOpAtomicUMin:
+ case SpvOpAtomicSMax:
+ case SpvOpAtomicUMax:
+ case SpvOpAtomicAnd:
+ case SpvOpAtomicOr:
+ case SpvOpAtomicXor:
+ image = *vtn_value(b, w[3], vtn_value_type_image_pointer)->image;
+ break;
+
+ case SpvOpImageQuerySize:
+ image.image =
+ vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ image.coord = NULL;
+ image.sample = NULL;
+ break;
+
+ case SpvOpImageRead:
+ image.image =
+ vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ image.coord = get_image_coord(b, w[4]);
+
+ if (count > 5 && (w[5] & SpvImageOperandsSampleMask)) {
+ assert(w[5] == SpvImageOperandsSampleMask);
+ image.sample = vtn_ssa_value(b, w[6])->def;
+ } else {
+ image.sample = nir_ssa_undef(&b->nb, 1, 32);
+ }
+ break;
+
+ case SpvOpImageWrite:
+ image.image =
+ vtn_value(b, w[1], vtn_value_type_access_chain)->access_chain;
+ image.coord = get_image_coord(b, w[2]);
+
+ /* texel = w[3] */
+
+ if (count > 4 && (w[4] & SpvImageOperandsSampleMask)) {
+ assert(w[4] == SpvImageOperandsSampleMask);
+ image.sample = vtn_ssa_value(b, w[5])->def;
+ } else {
+ image.sample = nir_ssa_undef(&b->nb, 1, 32);
+ }
+ break;
+
+ default:
+ unreachable("Invalid image opcode");
+ }
+
+ nir_intrinsic_op op;
+ switch (opcode) {
+#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
+ OP(ImageQuerySize, size)
+ OP(ImageRead, load)
+ OP(ImageWrite, store)
+ OP(AtomicExchange, atomic_exchange)
+ OP(AtomicCompareExchange, atomic_comp_swap)
+ OP(AtomicIIncrement, atomic_add)
+ OP(AtomicIDecrement, atomic_add)
+ OP(AtomicIAdd, atomic_add)
+ OP(AtomicISub, atomic_add)
+ OP(AtomicSMin, atomic_min)
+ OP(AtomicUMin, atomic_min)
+ OP(AtomicSMax, atomic_max)
+ OP(AtomicUMax, atomic_max)
+ OP(AtomicAnd, atomic_and)
+ OP(AtomicOr, atomic_or)
+ OP(AtomicXor, atomic_xor)
+#undef OP
+ default:
+ unreachable("Invalid image opcode");
+ }
+
+ nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
+
+ nir_deref_var *image_deref = vtn_access_chain_to_deref(b, image.image);
+ intrin->variables[0] =
+ nir_deref_as_var(nir_copy_deref(&intrin->instr, &image_deref->deref));
+
+ /* ImageQuerySize doesn't take any extra parameters */
+ if (opcode != SpvOpImageQuerySize) {
+ /* The image coordinate is always 4 components but we may not have that
+ * many. Swizzle to compensate.
+ */
+ unsigned swiz[4];
+ for (unsigned i = 0; i < 4; i++)
+ swiz[i] = i < image.coord->num_components ? i : 0;
+ intrin->src[0] = nir_src_for_ssa(nir_swizzle(&b->nb, image.coord,
+ swiz, 4, false));
+ intrin->src[1] = nir_src_for_ssa(image.sample);
+ }
+
+ switch (opcode) {
+ case SpvOpImageQuerySize:
+ case SpvOpImageRead:
+ break;
+ case SpvOpImageWrite:
+ intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
+ break;
+ case SpvOpAtomicIIncrement:
+ intrin->src[2] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
+ break;
+ case SpvOpAtomicIDecrement:
+ intrin->src[2] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
+ break;
+
+ case SpvOpAtomicExchange:
+ case SpvOpAtomicIAdd:
+ case SpvOpAtomicSMin:
+ case SpvOpAtomicUMin:
+ case SpvOpAtomicSMax:
+ case SpvOpAtomicUMax:
+ case SpvOpAtomicAnd:
+ case SpvOpAtomicOr:
+ case SpvOpAtomicXor:
+ intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
+ break;
+
+ case SpvOpAtomicCompareExchange:
+ intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
+ intrin->src[3] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
+ break;
+
+ case SpvOpAtomicISub:
+ intrin->src[2] = nir_src_for_ssa(nir_ineg(&b->nb, vtn_ssa_value(b, w[6])->def));
+ break;
+
+ default:
+ unreachable("Invalid image opcode");
+ }
+
+ if (opcode != SpvOpImageWrite) {
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
+ nir_ssa_dest_init(&intrin->instr, &intrin->dest, 4, 32, NULL);
+
+ nir_builder_instr_insert(&b->nb, &intrin->instr);
+
+ /* The image intrinsics always return 4 channels but we may not want
+ * that many. Emit a mov to trim it down.
+ */
+ unsigned swiz[4] = {0, 1, 2, 3};
+ val->ssa = vtn_create_ssa_value(b, type->type);
+ val->ssa->def = nir_swizzle(&b->nb, &intrin->dest.ssa, swiz,
+ glsl_get_vector_elements(type->type), false);
+ } else {
+ nir_builder_instr_insert(&b->nb, &intrin->instr);
+ }
+}
+
+static nir_intrinsic_op
+get_ssbo_nir_atomic_op(SpvOp opcode)
+{
+ switch (opcode) {
+#define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
+ OP(AtomicExchange, atomic_exchange)
+ OP(AtomicCompareExchange, atomic_comp_swap)
+ OP(AtomicIIncrement, atomic_add)
+ OP(AtomicIDecrement, atomic_add)
+ OP(AtomicIAdd, atomic_add)
+ OP(AtomicISub, atomic_add)
+ OP(AtomicSMin, atomic_imin)
+ OP(AtomicUMin, atomic_umin)
+ OP(AtomicSMax, atomic_imax)
+ OP(AtomicUMax, atomic_umax)
+ OP(AtomicAnd, atomic_and)
+ OP(AtomicOr, atomic_or)
+ OP(AtomicXor, atomic_xor)
+#undef OP
+ default:
+ unreachable("Invalid SSBO atomic");
+ }
+}
+
+static nir_intrinsic_op
+get_shared_nir_atomic_op(SpvOp opcode)
+{
+ switch (opcode) {
+#define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
+ OP(AtomicExchange, atomic_exchange)
+ OP(AtomicCompareExchange, atomic_comp_swap)
+ OP(AtomicIIncrement, atomic_add)
+ OP(AtomicIDecrement, atomic_add)
+ OP(AtomicIAdd, atomic_add)
+ OP(AtomicISub, atomic_add)
+ OP(AtomicSMin, atomic_imin)
+ OP(AtomicUMin, atomic_umin)
+ OP(AtomicSMax, atomic_imax)
+ OP(AtomicUMax, atomic_umax)
+ OP(AtomicAnd, atomic_and)
+ OP(AtomicOr, atomic_or)
+ OP(AtomicXor, atomic_xor)
+#undef OP
+ default:
+ unreachable("Invalid shared atomic");
+ }
+}
+
+static void
+fill_common_atomic_sources(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, nir_src *src)
+{
+ switch (opcode) {
+ case SpvOpAtomicIIncrement:
+ src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
+ break;
+
+ case SpvOpAtomicIDecrement:
+ src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
+ break;
+
+ case SpvOpAtomicISub:
+ src[0] =
+ nir_src_for_ssa(nir_ineg(&b->nb, vtn_ssa_value(b, w[6])->def));
+ break;
+
+ case SpvOpAtomicCompareExchange:
+ src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
+ src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[8])->def);
+ break;
+ /* Fall through */
+
+ case SpvOpAtomicExchange:
+ case SpvOpAtomicIAdd:
+ case SpvOpAtomicSMin:
+ case SpvOpAtomicUMin:
+ case SpvOpAtomicSMax:
+ case SpvOpAtomicUMax:
+ case SpvOpAtomicAnd:
+ case SpvOpAtomicOr:
+ case SpvOpAtomicXor:
+ src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
+ break;
+
+ default:
+ unreachable("Invalid SPIR-V atomic");
+ }
+}
+
+static void
+vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_access_chain *chain =
+ vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ nir_intrinsic_instr *atomic;
+
+ /*
+ SpvScope scope = w[4];
+ SpvMemorySemanticsMask semantics = w[5];
+ */
+
+ if (chain->var->mode == vtn_variable_mode_workgroup) {
+ nir_deref *deref = &vtn_access_chain_to_deref(b, chain)->deref;
+ nir_intrinsic_op op = get_shared_nir_atomic_op(opcode);
+ atomic = nir_intrinsic_instr_create(b->nb.shader, op);
+ atomic->variables[0] = nir_deref_as_var(nir_copy_deref(atomic, deref));
+ fill_common_atomic_sources(b, opcode, w, &atomic->src[0]);
+ } else {
+ assert(chain->var->mode == vtn_variable_mode_ssbo);
+ struct vtn_type *type;
+ nir_ssa_def *offset, *index;
+ offset = vtn_access_chain_to_offset(b, chain, &index, &type, NULL, false);
+
+ nir_intrinsic_op op = get_ssbo_nir_atomic_op(opcode);
+
+ atomic = nir_intrinsic_instr_create(b->nb.shader, op);
+ atomic->src[0] = nir_src_for_ssa(index);
+ atomic->src[1] = nir_src_for_ssa(offset);
+ fill_common_atomic_sources(b, opcode, w, &atomic->src[2]);
+ }
+
+ nir_ssa_dest_init(&atomic->instr, &atomic->dest, 1, 32, NULL);
+
+ struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ val->ssa = rzalloc(b, struct vtn_ssa_value);
+ val->ssa->def = &atomic->dest.ssa;
+ val->ssa->type = type->type;
+
+ nir_builder_instr_insert(&b->nb, &atomic->instr);
+}
+
+static nir_alu_instr *
+create_vec(nir_shader *shader, unsigned num_components, unsigned bit_size)
+{
+ nir_op op;
+ switch (num_components) {
+ case 1: op = nir_op_fmov; break;
+ case 2: op = nir_op_vec2; break;
+ case 3: op = nir_op_vec3; break;
+ case 4: op = nir_op_vec4; break;
+ default: unreachable("bad vector size");
+ }
+
+ nir_alu_instr *vec = nir_alu_instr_create(shader, op);
+ nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components,
+ bit_size, NULL);
+ vec->dest.write_mask = (1 << num_components) - 1;
+
+ return vec;
+}
+
+struct vtn_ssa_value *
+vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src)
+{
+ if (src->transposed)
+ return src->transposed;
+
+ struct vtn_ssa_value *dest =
+ vtn_create_ssa_value(b, glsl_transposed_type(src->type));
+
+ for (unsigned i = 0; i < glsl_get_matrix_columns(dest->type); i++) {
+ nir_alu_instr *vec = create_vec(b->shader,
+ glsl_get_matrix_columns(src->type),
+ glsl_get_bit_size(glsl_get_base_type(src->type)));
+ if (glsl_type_is_vector_or_scalar(src->type)) {
+ vec->src[0].src = nir_src_for_ssa(src->def);
+ vec->src[0].swizzle[0] = i;
+ } else {
+ for (unsigned j = 0; j < glsl_get_matrix_columns(src->type); j++) {
+ vec->src[j].src = nir_src_for_ssa(src->elems[j]->def);
+ vec->src[j].swizzle[0] = i;
+ }
+ }
+ nir_builder_instr_insert(&b->nb, &vec->instr);
+ dest->elems[i]->def = &vec->dest.dest.ssa;
+ }
+
+ dest->transposed = src;
+
+ return dest;
+}
+
+nir_ssa_def *
+vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, unsigned index)
+{
+ unsigned swiz[4] = { index };
+ return nir_swizzle(&b->nb, src, swiz, 1, true);
+}
+
+nir_ssa_def *
+vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src, nir_ssa_def *insert,
+ unsigned index)
+{
+ nir_alu_instr *vec = create_vec(b->shader, src->num_components,
+ src->bit_size);
+
+ for (unsigned i = 0; i < src->num_components; i++) {
+ if (i == index) {
+ vec->src[i].src = nir_src_for_ssa(insert);
+ } else {
+ vec->src[i].src = nir_src_for_ssa(src);
+ vec->src[i].swizzle[0] = i;
+ }
+ }
+
+ nir_builder_instr_insert(&b->nb, &vec->instr);
+
+ return &vec->dest.dest.ssa;
+}
+
+nir_ssa_def *
+vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src,
+ nir_ssa_def *index)
+{
+ nir_ssa_def *dest = vtn_vector_extract(b, src, 0);
+ for (unsigned i = 1; i < src->num_components; i++)
+ dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
+ vtn_vector_extract(b, src, i), dest);
+
+ return dest;
+}
+
+nir_ssa_def *
+vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src,
+ nir_ssa_def *insert, nir_ssa_def *index)
+{
+ nir_ssa_def *dest = vtn_vector_insert(b, src, insert, 0);
+ for (unsigned i = 1; i < src->num_components; i++)
+ dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
+ vtn_vector_insert(b, src, insert, i), dest);
+
+ return dest;
+}
+
+static nir_ssa_def *
+vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
+ nir_ssa_def *src0, nir_ssa_def *src1,
+ const uint32_t *indices)
+{
+ nir_alu_instr *vec = create_vec(b->shader, num_components, src0->bit_size);
+
+ for (unsigned i = 0; i < num_components; i++) {
+ uint32_t index = indices[i];
+ if (index == 0xffffffff) {
+ vec->src[i].src =
+ nir_src_for_ssa(nir_ssa_undef(&b->nb, 1, src0->bit_size));
+ } else if (index < src0->num_components) {
+ vec->src[i].src = nir_src_for_ssa(src0);
+ vec->src[i].swizzle[0] = index;
+ } else {
+ vec->src[i].src = nir_src_for_ssa(src1);
+ vec->src[i].swizzle[0] = index - src0->num_components;
+ }
+ }
+
+ nir_builder_instr_insert(&b->nb, &vec->instr);
+
+ return &vec->dest.dest.ssa;
+}
+
+/*
+ * Concatentates a number of vectors/scalars together to produce a vector
+ */
+static nir_ssa_def *
+vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
+ unsigned num_srcs, nir_ssa_def **srcs)
+{
+ nir_alu_instr *vec = create_vec(b->shader, num_components,
+ srcs[0]->bit_size);
+
+ unsigned dest_idx = 0;
+ for (unsigned i = 0; i < num_srcs; i++) {
+ nir_ssa_def *src = srcs[i];
+ for (unsigned j = 0; j < src->num_components; j++) {
+ vec->src[dest_idx].src = nir_src_for_ssa(src);
+ vec->src[dest_idx].swizzle[0] = j;
+ dest_idx++;
+ }
+ }
+
+ nir_builder_instr_insert(&b->nb, &vec->instr);
+
+ return &vec->dest.dest.ssa;
+}
+
+static struct vtn_ssa_value *
+vtn_composite_copy(void *mem_ctx, struct vtn_ssa_value *src)
+{
+ struct vtn_ssa_value *dest = rzalloc(mem_ctx, struct vtn_ssa_value);
+ dest->type = src->type;
+
+ if (glsl_type_is_vector_or_scalar(src->type)) {
+ dest->def = src->def;
+ } else {
+ unsigned elems = glsl_get_length(src->type);
+
+ dest->elems = ralloc_array(mem_ctx, struct vtn_ssa_value *, elems);
+ for (unsigned i = 0; i < elems; i++)
+ dest->elems[i] = vtn_composite_copy(mem_ctx, src->elems[i]);
+ }
+
+ return dest;
+}
+
+static struct vtn_ssa_value *
+vtn_composite_insert(struct vtn_builder *b, struct vtn_ssa_value *src,
+ struct vtn_ssa_value *insert, const uint32_t *indices,
+ unsigned num_indices)
+{
+ struct vtn_ssa_value *dest = vtn_composite_copy(b, src);
+
+ struct vtn_ssa_value *cur = dest;
+ unsigned i;
+ for (i = 0; i < num_indices - 1; i++) {
+ cur = cur->elems[indices[i]];
+ }
+
+ if (glsl_type_is_vector_or_scalar(cur->type)) {
+ /* According to the SPIR-V spec, OpCompositeInsert may work down to
+ * the component granularity. In that case, the last index will be
+ * the index to insert the scalar into the vector.
+ */
+
+ cur->def = vtn_vector_insert(b, cur->def, insert->def, indices[i]);
+ } else {
+ cur->elems[indices[i]] = insert;
+ }
+
+ return dest;
+}
+
+static struct vtn_ssa_value *
+vtn_composite_extract(struct vtn_builder *b, struct vtn_ssa_value *src,
+ const uint32_t *indices, unsigned num_indices)
+{
+ struct vtn_ssa_value *cur = src;
+ for (unsigned i = 0; i < num_indices; i++) {
+ if (glsl_type_is_vector_or_scalar(cur->type)) {
+ assert(i == num_indices - 1);
+ /* According to the SPIR-V spec, OpCompositeExtract may work down to
+ * the component granularity. The last index will be the index of the
+ * vector to extract.
+ */
+
+ struct vtn_ssa_value *ret = rzalloc(b, struct vtn_ssa_value);
+ ret->type = glsl_scalar_type(glsl_get_base_type(cur->type));
+ ret->def = vtn_vector_extract(b, cur->def, indices[i]);
+ return ret;
+ } else {
+ cur = cur->elems[indices[i]];
+ }
+ }
+
+ return cur;
+}
+
+static void
+vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ const struct glsl_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ val->ssa = vtn_create_ssa_value(b, type);
+
+ switch (opcode) {
+ case SpvOpVectorExtractDynamic:
+ val->ssa->def = vtn_vector_extract_dynamic(b, vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def);
+ break;
+
+ case SpvOpVectorInsertDynamic:
+ val->ssa->def = vtn_vector_insert_dynamic(b, vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def,
+ vtn_ssa_value(b, w[5])->def);
+ break;
+
+ case SpvOpVectorShuffle:
+ val->ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(type),
+ vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def,
+ w + 5);
+ break;
+
+ case SpvOpCompositeConstruct: {
+ unsigned elems = count - 3;
+ if (glsl_type_is_vector_or_scalar(type)) {
+ nir_ssa_def *srcs[4];
+ for (unsigned i = 0; i < elems; i++)
+ srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
+ val->ssa->def =
+ vtn_vector_construct(b, glsl_get_vector_elements(type),
+ elems, srcs);
+ } else {
+ val->ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ for (unsigned i = 0; i < elems; i++)
+ val->ssa->elems[i] = vtn_ssa_value(b, w[3 + i]);
+ }
+ break;
+ }
+ case SpvOpCompositeExtract:
+ val->ssa = vtn_composite_extract(b, vtn_ssa_value(b, w[3]),
+ w + 4, count - 4);
+ break;
+
+ case SpvOpCompositeInsert:
+ val->ssa = vtn_composite_insert(b, vtn_ssa_value(b, w[4]),
+ vtn_ssa_value(b, w[3]),
+ w + 5, count - 5);
+ break;
+
+ case SpvOpCopyObject:
+ val->ssa = vtn_composite_copy(b, vtn_ssa_value(b, w[3]));
+ break;
+
+ default:
+ unreachable("unknown composite operation");
+ }
+}
+
+static void
+vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ nir_intrinsic_op intrinsic_op;
+ switch (opcode) {
+ case SpvOpEmitVertex:
+ case SpvOpEmitStreamVertex:
+ intrinsic_op = nir_intrinsic_emit_vertex;
+ break;
+ case SpvOpEndPrimitive:
+ case SpvOpEndStreamPrimitive:
+ intrinsic_op = nir_intrinsic_end_primitive;
+ break;
+ case SpvOpMemoryBarrier:
+ intrinsic_op = nir_intrinsic_memory_barrier;
+ break;
+ case SpvOpControlBarrier:
+ intrinsic_op = nir_intrinsic_barrier;
+ break;
+ default:
+ unreachable("unknown barrier instruction");
+ }
+
+ nir_intrinsic_instr *intrin =
+ nir_intrinsic_instr_create(b->shader, intrinsic_op);
+
+ if (opcode == SpvOpEmitStreamVertex || opcode == SpvOpEndStreamPrimitive)
+ nir_intrinsic_set_stream_id(intrin, w[1]);
+
+ nir_builder_instr_insert(&b->nb, &intrin->instr);
+}
+
+static unsigned
+gl_primitive_from_spv_execution_mode(SpvExecutionMode mode)
+{
+ switch (mode) {
+ case SpvExecutionModeInputPoints:
+ case SpvExecutionModeOutputPoints:
+ return 0; /* GL_POINTS */
+ case SpvExecutionModeInputLines:
+ return 1; /* GL_LINES */
+ case SpvExecutionModeInputLinesAdjacency:
+ return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
+ case SpvExecutionModeTriangles:
+ return 4; /* GL_TRIANGLES */
+ case SpvExecutionModeInputTrianglesAdjacency:
+ return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
+ case SpvExecutionModeQuads:
+ return 7; /* GL_QUADS */
+ case SpvExecutionModeIsolines:
+ return 0x8E7A; /* GL_ISOLINES */
+ case SpvExecutionModeOutputLineStrip:
+ return 3; /* GL_LINE_STRIP */
+ case SpvExecutionModeOutputTriangleStrip:
+ return 5; /* GL_TRIANGLE_STRIP */
+ default:
+ assert(!"Invalid primitive type");
+ return 4;
+ }
+}
+
+static unsigned
+vertices_in_from_spv_execution_mode(SpvExecutionMode mode)
+{
+ switch (mode) {
+ case SpvExecutionModeInputPoints:
+ return 1;
+ case SpvExecutionModeInputLines:
+ return 2;
+ case SpvExecutionModeInputLinesAdjacency:
+ return 4;
+ case SpvExecutionModeTriangles:
+ return 3;
+ case SpvExecutionModeInputTrianglesAdjacency:
+ return 6;
+ default:
+ assert(!"Invalid GS input mode");
+ return 0;
+ }
+}
+
+static gl_shader_stage
+stage_for_execution_model(SpvExecutionModel model)
+{
+ switch (model) {
+ case SpvExecutionModelVertex:
+ return MESA_SHADER_VERTEX;
+ case SpvExecutionModelTessellationControl:
+ return MESA_SHADER_TESS_CTRL;
+ case SpvExecutionModelTessellationEvaluation:
+ return MESA_SHADER_TESS_EVAL;
+ case SpvExecutionModelGeometry:
+ return MESA_SHADER_GEOMETRY;
+ case SpvExecutionModelFragment:
+ return MESA_SHADER_FRAGMENT;
+ case SpvExecutionModelGLCompute:
+ return MESA_SHADER_COMPUTE;
+ default:
+ unreachable("Unsupported execution model");
+ }
+}
+
+static bool
+vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpSource:
+ case SpvOpSourceExtension:
+ case SpvOpSourceContinued:
+ case SpvOpExtension:
+ /* Unhandled, but these are for debug so that's ok. */
+ break;
+
+ case SpvOpCapability: {
+ SpvCapability cap = w[1];
+ switch (cap) {
+ case SpvCapabilityMatrix:
+ case SpvCapabilityShader:
+ case SpvCapabilityGeometry:
+ case SpvCapabilityTessellationPointSize:
+ case SpvCapabilityGeometryPointSize:
+ case SpvCapabilityUniformBufferArrayDynamicIndexing:
+ case SpvCapabilitySampledImageArrayDynamicIndexing:
+ case SpvCapabilityStorageBufferArrayDynamicIndexing:
+ case SpvCapabilityStorageImageArrayDynamicIndexing:
+ case SpvCapabilityImageRect:
+ case SpvCapabilitySampledRect:
+ case SpvCapabilitySampled1D:
+ case SpvCapabilityImage1D:
+ case SpvCapabilitySampledCubeArray:
+ case SpvCapabilitySampledBuffer:
+ case SpvCapabilityImageBuffer:
+ case SpvCapabilityImageQuery:
+ break;
+ case SpvCapabilityClipDistance:
+ case SpvCapabilityCullDistance:
+ case SpvCapabilityGeometryStreams:
+ fprintf(stderr, "WARNING: Unsupported SPIR-V Capability\n");
+ break;
+ default:
+ assert(!"Unsupported capability");
+ }
+ break;
+ }
+
+ case SpvOpExtInstImport:
+ vtn_handle_extension(b, opcode, w, count);
+ break;
+
+ case SpvOpMemoryModel:
+ assert(w[1] == SpvAddressingModelLogical);
+ assert(w[2] == SpvMemoryModelGLSL450);
+ break;
+
+ case SpvOpEntryPoint: {
+ struct vtn_value *entry_point = &b->values[w[2]];
+ /* Let this be a name label regardless */
+ unsigned name_words;
+ entry_point->name = vtn_string_literal(b, &w[3], count - 3, &name_words);
+
+ if (strcmp(entry_point->name, b->entry_point_name) != 0 ||
+ stage_for_execution_model(w[1]) != b->entry_point_stage)
+ break;
+
+ assert(b->entry_point == NULL);
+ b->entry_point = entry_point;
+ break;
+ }
+
+ case SpvOpString:
+ vtn_push_value(b, w[1], vtn_value_type_string)->str =
+ vtn_string_literal(b, &w[2], count - 2, NULL);
+ break;
+
+ case SpvOpName:
+ b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2, NULL);
+ break;
+
+ case SpvOpMemberName:
+ /* TODO */
+ break;
+
+ case SpvOpExecutionMode:
+ case SpvOpDecorationGroup:
+ case SpvOpDecorate:
+ case SpvOpMemberDecorate:
+ case SpvOpGroupDecorate:
+ case SpvOpGroupMemberDecorate:
+ vtn_handle_decoration(b, opcode, w, count);
+ break;
+
+ default:
+ return false; /* End of preamble */
+ }
+
+ return true;
+}
+
+static void
+vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
+ const struct vtn_decoration *mode, void *data)
+{
+ assert(b->entry_point == entry_point);
+
+ switch(mode->exec_mode) {
+ case SpvExecutionModeOriginUpperLeft:
+ case SpvExecutionModeOriginLowerLeft:
+ b->origin_upper_left =
+ (mode->exec_mode == SpvExecutionModeOriginUpperLeft);
+ break;
+
+ case SpvExecutionModeEarlyFragmentTests:
+ assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ b->shader->info.fs.early_fragment_tests = true;
+ break;
+
+ case SpvExecutionModeInvocations:
+ assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
+ break;
+
+ case SpvExecutionModeDepthReplacing:
+ assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
+ break;
+ case SpvExecutionModeDepthGreater:
+ assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
+ break;
+ case SpvExecutionModeDepthLess:
+ assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
+ break;
+ case SpvExecutionModeDepthUnchanged:
+ assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
+ break;
+
+ case SpvExecutionModeLocalSize:
+ assert(b->shader->stage == MESA_SHADER_COMPUTE);
+ b->shader->info.cs.local_size[0] = mode->literals[0];
+ b->shader->info.cs.local_size[1] = mode->literals[1];
+ b->shader->info.cs.local_size[2] = mode->literals[2];
+ break;
+ case SpvExecutionModeLocalSizeHint:
+ break; /* Nothing do do with this */
+
+ case SpvExecutionModeOutputVertices:
+ assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ b->shader->info.gs.vertices_out = mode->literals[0];
+ break;
+
+ case SpvExecutionModeInputPoints:
+ case SpvExecutionModeInputLines:
+ case SpvExecutionModeInputLinesAdjacency:
+ case SpvExecutionModeTriangles:
+ case SpvExecutionModeInputTrianglesAdjacency:
+ case SpvExecutionModeQuads:
+ case SpvExecutionModeIsolines:
+ if (b->shader->stage == MESA_SHADER_GEOMETRY) {
+ b->shader->info.gs.vertices_in =
+ vertices_in_from_spv_execution_mode(mode->exec_mode);
+ } else {
+ assert(!"Tesselation shaders not yet supported");
+ }
+ break;
+
+ case SpvExecutionModeOutputPoints:
+ case SpvExecutionModeOutputLineStrip:
+ case SpvExecutionModeOutputTriangleStrip:
+ assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ b->shader->info.gs.output_primitive =
+ gl_primitive_from_spv_execution_mode(mode->exec_mode);
+ break;
+
+ case SpvExecutionModeSpacingEqual:
+ case SpvExecutionModeSpacingFractionalEven:
+ case SpvExecutionModeSpacingFractionalOdd:
+ case SpvExecutionModeVertexOrderCw:
+ case SpvExecutionModeVertexOrderCcw:
+ case SpvExecutionModePointMode:
+ assert(!"TODO: Add tessellation metadata");
+ break;
+
+ case SpvExecutionModePixelCenterInteger:
+ case SpvExecutionModeXfb:
+ assert(!"Unhandled execution mode");
+ break;
+
+ case SpvExecutionModeVecTypeHint:
+ case SpvExecutionModeContractionOff:
+ break; /* OpenCL */
+ }
+}
+
+static bool
+vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpSource:
+ case SpvOpSourceContinued:
+ case SpvOpSourceExtension:
+ case SpvOpExtension:
+ case SpvOpCapability:
+ case SpvOpExtInstImport:
+ case SpvOpMemoryModel:
+ case SpvOpEntryPoint:
+ case SpvOpExecutionMode:
+ case SpvOpString:
+ case SpvOpName:
+ case SpvOpMemberName:
+ case SpvOpDecorationGroup:
+ case SpvOpDecorate:
+ case SpvOpMemberDecorate:
+ case SpvOpGroupDecorate:
+ case SpvOpGroupMemberDecorate:
+ assert(!"Invalid opcode types and variables section");
+ break;
+
+ case SpvOpTypeVoid:
+ case SpvOpTypeBool:
+ case SpvOpTypeInt:
+ case SpvOpTypeFloat:
+ case SpvOpTypeVector:
+ case SpvOpTypeMatrix:
+ case SpvOpTypeImage:
+ case SpvOpTypeSampler:
+ case SpvOpTypeSampledImage:
+ case SpvOpTypeArray:
+ case SpvOpTypeRuntimeArray:
+ case SpvOpTypeStruct:
+ case SpvOpTypeOpaque:
+ case SpvOpTypePointer:
+ case SpvOpTypeFunction:
+ case SpvOpTypeEvent:
+ case SpvOpTypeDeviceEvent:
+ case SpvOpTypeReserveId:
+ case SpvOpTypeQueue:
+ case SpvOpTypePipe:
+ vtn_handle_type(b, opcode, w, count);
+ break;
+
+ case SpvOpConstantTrue:
+ case SpvOpConstantFalse:
+ case SpvOpConstant:
+ case SpvOpConstantComposite:
+ case SpvOpConstantSampler:
+ case SpvOpConstantNull:
+ case SpvOpSpecConstantTrue:
+ case SpvOpSpecConstantFalse:
+ case SpvOpSpecConstant:
+ case SpvOpSpecConstantComposite:
+ case SpvOpSpecConstantOp:
+ vtn_handle_constant(b, opcode, w, count);
+ break;
+
+ case SpvOpVariable:
+ vtn_handle_variables(b, opcode, w, count);
+ break;
+
+ default:
+ return false; /* End of preamble */
+ }
+
+ return true;
+}
+
+static bool
+vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpLabel:
+ break;
+
+ case SpvOpLoopMerge:
+ case SpvOpSelectionMerge:
+ /* This is handled by cfg pre-pass and walk_blocks */
+ break;
+
+ case SpvOpUndef: {
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
+ val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
+ break;
+ }
+
+ case SpvOpExtInst:
+ vtn_handle_extension(b, opcode, w, count);
+ break;
+
+ case SpvOpVariable:
+ case SpvOpLoad:
+ case SpvOpStore:
+ case SpvOpCopyMemory:
+ case SpvOpCopyMemorySized:
+ case SpvOpAccessChain:
+ case SpvOpInBoundsAccessChain:
+ case SpvOpArrayLength:
+ vtn_handle_variables(b, opcode, w, count);
+ break;
+
+ case SpvOpFunctionCall:
+ vtn_handle_function_call(b, opcode, w, count);
+ break;
+
+ case SpvOpSampledImage:
+ case SpvOpImage:
+ case SpvOpImageSampleImplicitLod:
+ case SpvOpImageSampleExplicitLod:
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ case SpvOpImageFetch:
+ case SpvOpImageGather:
+ case SpvOpImageDrefGather:
+ case SpvOpImageQuerySizeLod:
+ case SpvOpImageQueryLod:
+ case SpvOpImageQueryLevels:
+ case SpvOpImageQuerySamples:
+ vtn_handle_texture(b, opcode, w, count);
+ break;
+
+ case SpvOpImageRead:
+ case SpvOpImageWrite:
+ case SpvOpImageTexelPointer:
+ vtn_handle_image(b, opcode, w, count);
+ break;
+
+ case SpvOpImageQuerySize: {
+ struct vtn_access_chain *image =
+ vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ if (glsl_type_is_image(image->var->var->interface_type)) {
+ vtn_handle_image(b, opcode, w, count);
+ } else {
+ vtn_handle_texture(b, opcode, w, count);
+ }
+ break;
+ }
+
+ case SpvOpAtomicExchange:
+ case SpvOpAtomicCompareExchange:
+ case SpvOpAtomicCompareExchangeWeak:
+ case SpvOpAtomicIIncrement:
+ case SpvOpAtomicIDecrement:
+ case SpvOpAtomicIAdd:
+ case SpvOpAtomicISub:
+ case SpvOpAtomicSMin:
+ case SpvOpAtomicUMin:
+ case SpvOpAtomicSMax:
+ case SpvOpAtomicUMax:
+ case SpvOpAtomicAnd:
+ case SpvOpAtomicOr:
+ case SpvOpAtomicXor: {
+ struct vtn_value *pointer = vtn_untyped_value(b, w[3]);
+ if (pointer->value_type == vtn_value_type_image_pointer) {
+ vtn_handle_image(b, opcode, w, count);
+ } else {
+ assert(pointer->value_type == vtn_value_type_access_chain);
+ vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
+ }
+ break;
+ }
+
+ case SpvOpSNegate:
+ case SpvOpFNegate:
+ case SpvOpNot:
+ case SpvOpAny:
+ case SpvOpAll:
+ case SpvOpConvertFToU:
+ case SpvOpConvertFToS:
+ case SpvOpConvertSToF:
+ case SpvOpConvertUToF:
+ case SpvOpUConvert:
+ case SpvOpSConvert:
+ case SpvOpFConvert:
+ case SpvOpQuantizeToF16:
+ case SpvOpConvertPtrToU:
+ case SpvOpConvertUToPtr:
+ case SpvOpPtrCastToGeneric:
+ case SpvOpGenericCastToPtr:
+ case SpvOpBitcast:
+ case SpvOpIsNan:
+ case SpvOpIsInf:
+ case SpvOpIsFinite:
+ case SpvOpIsNormal:
+ case SpvOpSignBitSet:
+ case SpvOpLessOrGreater:
+ case SpvOpOrdered:
+ case SpvOpUnordered:
+ case SpvOpIAdd:
+ case SpvOpFAdd:
+ case SpvOpISub:
+ case SpvOpFSub:
+ case SpvOpIMul:
+ case SpvOpFMul:
+ case SpvOpUDiv:
+ case SpvOpSDiv:
+ case SpvOpFDiv:
+ case SpvOpUMod:
+ case SpvOpSRem:
+ case SpvOpSMod:
+ case SpvOpFRem:
+ case SpvOpFMod:
+ case SpvOpVectorTimesScalar:
+ case SpvOpDot:
+ case SpvOpIAddCarry:
+ case SpvOpISubBorrow:
+ case SpvOpUMulExtended:
+ case SpvOpSMulExtended:
+ case SpvOpShiftRightLogical:
+ case SpvOpShiftRightArithmetic:
+ case SpvOpShiftLeftLogical:
+ case SpvOpLogicalEqual:
+ case SpvOpLogicalNotEqual:
+ case SpvOpLogicalOr:
+ case SpvOpLogicalAnd:
+ case SpvOpLogicalNot:
+ case SpvOpBitwiseOr:
+ case SpvOpBitwiseXor:
+ case SpvOpBitwiseAnd:
+ case SpvOpSelect:
+ case SpvOpIEqual:
+ case SpvOpFOrdEqual:
+ case SpvOpFUnordEqual:
+ case SpvOpINotEqual:
+ case SpvOpFOrdNotEqual:
+ case SpvOpFUnordNotEqual:
+ case SpvOpULessThan:
+ case SpvOpSLessThan:
+ case SpvOpFOrdLessThan:
+ case SpvOpFUnordLessThan:
+ case SpvOpUGreaterThan:
+ case SpvOpSGreaterThan:
+ case SpvOpFOrdGreaterThan:
+ case SpvOpFUnordGreaterThan:
+ case SpvOpULessThanEqual:
+ case SpvOpSLessThanEqual:
+ case SpvOpFOrdLessThanEqual:
+ case SpvOpFUnordLessThanEqual:
+ case SpvOpUGreaterThanEqual:
+ case SpvOpSGreaterThanEqual:
+ case SpvOpFOrdGreaterThanEqual:
+ case SpvOpFUnordGreaterThanEqual:
+ case SpvOpDPdx:
+ case SpvOpDPdy:
+ case SpvOpFwidth:
+ case SpvOpDPdxFine:
+ case SpvOpDPdyFine:
+ case SpvOpFwidthFine:
+ case SpvOpDPdxCoarse:
+ case SpvOpDPdyCoarse:
+ case SpvOpFwidthCoarse:
+ case SpvOpBitFieldInsert:
+ case SpvOpBitFieldSExtract:
+ case SpvOpBitFieldUExtract:
+ case SpvOpBitReverse:
+ case SpvOpBitCount:
+ case SpvOpTranspose:
+ case SpvOpOuterProduct:
+ case SpvOpMatrixTimesScalar:
+ case SpvOpVectorTimesMatrix:
+ case SpvOpMatrixTimesVector:
+ case SpvOpMatrixTimesMatrix:
+ vtn_handle_alu(b, opcode, w, count);
+ break;
+
+ case SpvOpVectorExtractDynamic:
+ case SpvOpVectorInsertDynamic:
+ case SpvOpVectorShuffle:
+ case SpvOpCompositeConstruct:
+ case SpvOpCompositeExtract:
+ case SpvOpCompositeInsert:
+ case SpvOpCopyObject:
+ vtn_handle_composite(b, opcode, w, count);
+ break;
+
+ case SpvOpEmitVertex:
+ case SpvOpEndPrimitive:
+ case SpvOpEmitStreamVertex:
+ case SpvOpEndStreamPrimitive:
+ case SpvOpControlBarrier:
+ case SpvOpMemoryBarrier:
+ vtn_handle_barrier(b, opcode, w, count);
+ break;
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ return true;
+}
+
+nir_function *
+spirv_to_nir(const uint32_t *words, size_t word_count,
+ struct nir_spirv_specialization *spec, unsigned num_spec,
+ gl_shader_stage stage, const char *entry_point_name,
+ const nir_shader_compiler_options *options)
+{
+ const uint32_t *word_end = words + word_count;
+
+ /* Handle the SPIR-V header (first 4 dwords) */
+ assert(word_count > 5);
+
+ assert(words[0] == SpvMagicNumber);
+ assert(words[1] >= 0x10000);
+ /* words[2] == generator magic */
+ unsigned value_id_bound = words[3];
+ assert(words[4] == 0);
+
+ words+= 5;
+
+ /* Initialize the stn_builder object */
+ struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
+ b->value_id_bound = value_id_bound;
+ b->values = rzalloc_array(b, struct vtn_value, value_id_bound);
+ exec_list_make_empty(&b->functions);
+ b->entry_point_stage = stage;
+ b->entry_point_name = entry_point_name;
+
+ /* Handle all the preamble instructions */
+ words = vtn_foreach_instruction(b, words, word_end,
+ vtn_handle_preamble_instruction);
+
+ if (b->entry_point == NULL) {
+ assert(!"Entry point not found");
+ ralloc_free(b);
+ return NULL;
+ }
+
+ b->shader = nir_shader_create(NULL, stage, options);
+
+ /* Set shader info defaults */
+ b->shader->info.gs.invocations = 1;
+
+ /* Parse execution modes */
+ vtn_foreach_execution_mode(b, b->entry_point,
+ vtn_handle_execution_mode, NULL);
+
+ b->specializations = spec;
+ b->num_specializations = num_spec;
+
+ /* Handle all variable, type, and constant instructions */
+ words = vtn_foreach_instruction(b, words, word_end,
+ vtn_handle_variable_or_type_instruction);
+
+ vtn_build_cfg(b, words, word_end);
+
+ foreach_list_typed(struct vtn_function, func, node, &b->functions) {
+ b->impl = func->impl;
+ b->const_table = _mesa_hash_table_create(b, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+
+ vtn_function_emit(b, func, vtn_handle_body_instruction);
+ }
+
+ assert(b->entry_point->value_type == vtn_value_type_function);
+ nir_function *entry_point = b->entry_point->func->impl->function;
+ assert(entry_point);
+
+ ralloc_free(b);
+
+ return entry_point;
+}
diff --git a/src/compiler/spirv/vtn_alu.c b/src/compiler/spirv/vtn_alu.c
new file mode 100644
index 00000000000..8b9a63ce760
--- /dev/null
+++ b/src/compiler/spirv/vtn_alu.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vtn_private.h"
+
+/*
+ * Normally, column vectors in SPIR-V correspond to a single NIR SSA
+ * definition. But for matrix multiplies, we want to do one routine for
+ * multiplying a matrix by a matrix and then pretend that vectors are matrices
+ * with one column. So we "wrap" these things, and unwrap the result before we
+ * send it off.
+ */
+
+static struct vtn_ssa_value *
+wrap_matrix(struct vtn_builder *b, struct vtn_ssa_value *val)
+{
+ if (val == NULL)
+ return NULL;
+
+ if (glsl_type_is_matrix(val->type))
+ return val;
+
+ struct vtn_ssa_value *dest = rzalloc(b, struct vtn_ssa_value);
+ dest->type = val->type;
+ dest->elems = ralloc_array(b, struct vtn_ssa_value *, 1);
+ dest->elems[0] = val;
+
+ return dest;
+}
+
+static struct vtn_ssa_value *
+unwrap_matrix(struct vtn_ssa_value *val)
+{
+ if (glsl_type_is_matrix(val->type))
+ return val;
+
+ return val->elems[0];
+}
+
+static struct vtn_ssa_value *
+matrix_multiply(struct vtn_builder *b,
+ struct vtn_ssa_value *_src0, struct vtn_ssa_value *_src1)
+{
+
+ struct vtn_ssa_value *src0 = wrap_matrix(b, _src0);
+ struct vtn_ssa_value *src1 = wrap_matrix(b, _src1);
+ struct vtn_ssa_value *src0_transpose = wrap_matrix(b, _src0->transposed);
+ struct vtn_ssa_value *src1_transpose = wrap_matrix(b, _src1->transposed);
+
+ unsigned src0_rows = glsl_get_vector_elements(src0->type);
+ unsigned src0_columns = glsl_get_matrix_columns(src0->type);
+ unsigned src1_columns = glsl_get_matrix_columns(src1->type);
+
+ const struct glsl_type *dest_type;
+ if (src1_columns > 1) {
+ dest_type = glsl_matrix_type(glsl_get_base_type(src0->type),
+ src0_rows, src1_columns);
+ } else {
+ dest_type = glsl_vector_type(glsl_get_base_type(src0->type), src0_rows);
+ }
+ struct vtn_ssa_value *dest = vtn_create_ssa_value(b, dest_type);
+
+ dest = wrap_matrix(b, dest);
+
+ bool transpose_result = false;
+ if (src0_transpose && src1_transpose) {
+ /* transpose(A) * transpose(B) = transpose(B * A) */
+ src1 = src0_transpose;
+ src0 = src1_transpose;
+ src0_transpose = NULL;
+ src1_transpose = NULL;
+ transpose_result = true;
+ }
+
+ if (src0_transpose && !src1_transpose &&
+ glsl_get_base_type(src0->type) == GLSL_TYPE_FLOAT) {
+ /* We already have the rows of src0 and the columns of src1 available,
+ * so we can just take the dot product of each row with each column to
+ * get the result.
+ */
+
+ for (unsigned i = 0; i < src1_columns; i++) {
+ nir_ssa_def *vec_src[4];
+ for (unsigned j = 0; j < src0_rows; j++) {
+ vec_src[j] = nir_fdot(&b->nb, src0_transpose->elems[j]->def,
+ src1->elems[i]->def);
+ }
+ dest->elems[i]->def = nir_vec(&b->nb, vec_src, src0_rows);
+ }
+ } else {
+ /* We don't handle the case where src1 is transposed but not src0, since
+ * the general case only uses individual components of src1 so the
+ * optimizer should chew through the transpose we emitted for src1.
+ */
+
+ for (unsigned i = 0; i < src1_columns; i++) {
+ /* dest[i] = sum(src0[j] * src1[i][j] for all j) */
+ dest->elems[i]->def =
+ nir_fmul(&b->nb, src0->elems[0]->def,
+ nir_channel(&b->nb, src1->elems[i]->def, 0));
+ for (unsigned j = 1; j < src0_columns; j++) {
+ dest->elems[i]->def =
+ nir_fadd(&b->nb, dest->elems[i]->def,
+ nir_fmul(&b->nb, src0->elems[j]->def,
+ nir_channel(&b->nb, src1->elems[i]->def, j)));
+ }
+ }
+ }
+
+ dest = unwrap_matrix(dest);
+
+ if (transpose_result)
+ dest = vtn_ssa_transpose(b, dest);
+
+ return dest;
+}
+
+static struct vtn_ssa_value *
+mat_times_scalar(struct vtn_builder *b,
+ struct vtn_ssa_value *mat,
+ nir_ssa_def *scalar)
+{
+ struct vtn_ssa_value *dest = vtn_create_ssa_value(b, mat->type);
+ for (unsigned i = 0; i < glsl_get_matrix_columns(mat->type); i++) {
+ if (glsl_get_base_type(mat->type) == GLSL_TYPE_FLOAT)
+ dest->elems[i]->def = nir_fmul(&b->nb, mat->elems[i]->def, scalar);
+ else
+ dest->elems[i]->def = nir_imul(&b->nb, mat->elems[i]->def, scalar);
+ }
+
+ return dest;
+}
+
+static void
+vtn_handle_matrix_alu(struct vtn_builder *b, SpvOp opcode,
+ struct vtn_value *dest,
+ struct vtn_ssa_value *src0, struct vtn_ssa_value *src1)
+{
+ switch (opcode) {
+ case SpvOpFNegate: {
+ dest->ssa = vtn_create_ssa_value(b, src0->type);
+ unsigned cols = glsl_get_matrix_columns(src0->type);
+ for (unsigned i = 0; i < cols; i++)
+ dest->ssa->elems[i]->def = nir_fneg(&b->nb, src0->elems[i]->def);
+ break;
+ }
+
+ case SpvOpFAdd: {
+ dest->ssa = vtn_create_ssa_value(b, src0->type);
+ unsigned cols = glsl_get_matrix_columns(src0->type);
+ for (unsigned i = 0; i < cols; i++)
+ dest->ssa->elems[i]->def =
+ nir_fadd(&b->nb, src0->elems[i]->def, src1->elems[i]->def);
+ break;
+ }
+
+ case SpvOpFSub: {
+ dest->ssa = vtn_create_ssa_value(b, src0->type);
+ unsigned cols = glsl_get_matrix_columns(src0->type);
+ for (unsigned i = 0; i < cols; i++)
+ dest->ssa->elems[i]->def =
+ nir_fsub(&b->nb, src0->elems[i]->def, src1->elems[i]->def);
+ break;
+ }
+
+ case SpvOpTranspose:
+ dest->ssa = vtn_ssa_transpose(b, src0);
+ break;
+
+ case SpvOpMatrixTimesScalar:
+ if (src0->transposed) {
+ dest->ssa = vtn_ssa_transpose(b, mat_times_scalar(b, src0->transposed,
+ src1->def));
+ } else {
+ dest->ssa = mat_times_scalar(b, src0, src1->def);
+ }
+ break;
+
+ case SpvOpVectorTimesMatrix:
+ case SpvOpMatrixTimesVector:
+ case SpvOpMatrixTimesMatrix:
+ if (opcode == SpvOpVectorTimesMatrix) {
+ dest->ssa = matrix_multiply(b, vtn_ssa_transpose(b, src1), src0);
+ } else {
+ dest->ssa = matrix_multiply(b, src0, src1);
+ }
+ break;
+
+ default: unreachable("unknown matrix opcode");
+ }
+}
+
+nir_op
+vtn_nir_alu_op_for_spirv_opcode(SpvOp opcode, bool *swap)
+{
+ /* Indicates that the first two arguments should be swapped. This is
+ * used for implementing greater-than and less-than-or-equal.
+ */
+ *swap = false;
+
+ switch (opcode) {
+ case SpvOpSNegate: return nir_op_ineg;
+ case SpvOpFNegate: return nir_op_fneg;
+ case SpvOpNot: return nir_op_inot;
+ case SpvOpIAdd: return nir_op_iadd;
+ case SpvOpFAdd: return nir_op_fadd;
+ case SpvOpISub: return nir_op_isub;
+ case SpvOpFSub: return nir_op_fsub;
+ case SpvOpIMul: return nir_op_imul;
+ case SpvOpFMul: return nir_op_fmul;
+ case SpvOpUDiv: return nir_op_udiv;
+ case SpvOpSDiv: return nir_op_idiv;
+ case SpvOpFDiv: return nir_op_fdiv;
+ case SpvOpUMod: return nir_op_umod;
+ case SpvOpSMod: return nir_op_imod;
+ case SpvOpFMod: return nir_op_fmod;
+ case SpvOpSRem: return nir_op_irem;
+ case SpvOpFRem: return nir_op_frem;
+
+ case SpvOpShiftRightLogical: return nir_op_ushr;
+ case SpvOpShiftRightArithmetic: return nir_op_ishr;
+ case SpvOpShiftLeftLogical: return nir_op_ishl;
+ case SpvOpLogicalOr: return nir_op_ior;
+ case SpvOpLogicalEqual: return nir_op_ieq;
+ case SpvOpLogicalNotEqual: return nir_op_ine;
+ case SpvOpLogicalAnd: return nir_op_iand;
+ case SpvOpLogicalNot: return nir_op_inot;
+ case SpvOpBitwiseOr: return nir_op_ior;
+ case SpvOpBitwiseXor: return nir_op_ixor;
+ case SpvOpBitwiseAnd: return nir_op_iand;
+ case SpvOpSelect: return nir_op_bcsel;
+ case SpvOpIEqual: return nir_op_ieq;
+
+ case SpvOpBitFieldInsert: return nir_op_bitfield_insert;
+ case SpvOpBitFieldSExtract: return nir_op_ibitfield_extract;
+ case SpvOpBitFieldUExtract: return nir_op_ubitfield_extract;
+ case SpvOpBitReverse: return nir_op_bitfield_reverse;
+ case SpvOpBitCount: return nir_op_bit_count;
+
+ /* Comparisons: (TODO: How do we want to handled ordered/unordered?) */
+ case SpvOpFOrdEqual: return nir_op_feq;
+ case SpvOpFUnordEqual: return nir_op_feq;
+ case SpvOpINotEqual: return nir_op_ine;
+ case SpvOpFOrdNotEqual: return nir_op_fne;
+ case SpvOpFUnordNotEqual: return nir_op_fne;
+ case SpvOpULessThan: return nir_op_ult;
+ case SpvOpSLessThan: return nir_op_ilt;
+ case SpvOpFOrdLessThan: return nir_op_flt;
+ case SpvOpFUnordLessThan: return nir_op_flt;
+ case SpvOpUGreaterThan: *swap = true; return nir_op_ult;
+ case SpvOpSGreaterThan: *swap = true; return nir_op_ilt;
+ case SpvOpFOrdGreaterThan: *swap = true; return nir_op_flt;
+ case SpvOpFUnordGreaterThan: *swap = true; return nir_op_flt;
+ case SpvOpULessThanEqual: *swap = true; return nir_op_uge;
+ case SpvOpSLessThanEqual: *swap = true; return nir_op_ige;
+ case SpvOpFOrdLessThanEqual: *swap = true; return nir_op_fge;
+ case SpvOpFUnordLessThanEqual: *swap = true; return nir_op_fge;
+ case SpvOpUGreaterThanEqual: return nir_op_uge;
+ case SpvOpSGreaterThanEqual: return nir_op_ige;
+ case SpvOpFOrdGreaterThanEqual: return nir_op_fge;
+ case SpvOpFUnordGreaterThanEqual: return nir_op_fge;
+
+ /* Conversions: */
+ case SpvOpConvertFToU: return nir_op_f2u;
+ case SpvOpConvertFToS: return nir_op_f2i;
+ case SpvOpConvertSToF: return nir_op_i2f;
+ case SpvOpConvertUToF: return nir_op_u2f;
+ case SpvOpBitcast: return nir_op_imov;
+ case SpvOpUConvert:
+ case SpvOpQuantizeToF16: return nir_op_fquantize2f16;
+ /* TODO: NIR is 32-bit only; these are no-ops. */
+ case SpvOpSConvert: return nir_op_imov;
+ case SpvOpFConvert: return nir_op_fmov;
+
+ /* Derivatives: */
+ case SpvOpDPdx: return nir_op_fddx;
+ case SpvOpDPdy: return nir_op_fddy;
+ case SpvOpDPdxFine: return nir_op_fddx_fine;
+ case SpvOpDPdyFine: return nir_op_fddy_fine;
+ case SpvOpDPdxCoarse: return nir_op_fddx_coarse;
+ case SpvOpDPdyCoarse: return nir_op_fddy_coarse;
+
+ default:
+ unreachable("No NIR equivalent");
+ }
+}
+
+static void
+handle_no_contraction(struct vtn_builder *b, struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *_void)
+{
+ assert(dec->scope == VTN_DEC_DECORATION);
+ if (dec->decoration != SpvDecorationNoContraction)
+ return;
+
+ b->nb.exact = true;
+}
+
+void
+vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ const struct glsl_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+
+ vtn_foreach_decoration(b, val, handle_no_contraction, NULL);
+
+ /* Collect the various SSA sources */
+ const unsigned num_inputs = count - 3;
+ struct vtn_ssa_value *vtn_src[4] = { NULL, };
+ for (unsigned i = 0; i < num_inputs; i++)
+ vtn_src[i] = vtn_ssa_value(b, w[i + 3]);
+
+ if (glsl_type_is_matrix(vtn_src[0]->type) ||
+ (num_inputs >= 2 && glsl_type_is_matrix(vtn_src[1]->type))) {
+ vtn_handle_matrix_alu(b, opcode, val, vtn_src[0], vtn_src[1]);
+ b->nb.exact = false;
+ return;
+ }
+
+ val->ssa = vtn_create_ssa_value(b, type);
+ nir_ssa_def *src[4] = { NULL, };
+ for (unsigned i = 0; i < num_inputs; i++) {
+ assert(glsl_type_is_vector_or_scalar(vtn_src[i]->type));
+ src[i] = vtn_src[i]->def;
+ }
+
+ switch (opcode) {
+ case SpvOpAny:
+ if (src[0]->num_components == 1) {
+ val->ssa->def = nir_imov(&b->nb, src[0]);
+ } else {
+ nir_op op;
+ switch (src[0]->num_components) {
+ case 2: op = nir_op_bany_inequal2; break;
+ case 3: op = nir_op_bany_inequal3; break;
+ case 4: op = nir_op_bany_inequal4; break;
+ }
+ val->ssa->def = nir_build_alu(&b->nb, op, src[0],
+ nir_imm_int(&b->nb, NIR_FALSE),
+ NULL, NULL);
+ }
+ break;
+
+ case SpvOpAll:
+ if (src[0]->num_components == 1) {
+ val->ssa->def = nir_imov(&b->nb, src[0]);
+ } else {
+ nir_op op;
+ switch (src[0]->num_components) {
+ case 2: op = nir_op_ball_iequal2; break;
+ case 3: op = nir_op_ball_iequal3; break;
+ case 4: op = nir_op_ball_iequal4; break;
+ }
+ val->ssa->def = nir_build_alu(&b->nb, op, src[0],
+ nir_imm_int(&b->nb, NIR_TRUE),
+ NULL, NULL);
+ }
+ break;
+
+ case SpvOpOuterProduct: {
+ for (unsigned i = 0; i < src[1]->num_components; i++) {
+ val->ssa->elems[i]->def =
+ nir_fmul(&b->nb, src[0], nir_channel(&b->nb, src[1], i));
+ }
+ break;
+ }
+
+ case SpvOpDot:
+ val->ssa->def = nir_fdot(&b->nb, src[0], src[1]);
+ break;
+
+ case SpvOpIAddCarry:
+ assert(glsl_type_is_struct(val->ssa->type));
+ val->ssa->elems[0]->def = nir_iadd(&b->nb, src[0], src[1]);
+ val->ssa->elems[1]->def = nir_uadd_carry(&b->nb, src[0], src[1]);
+ break;
+
+ case SpvOpISubBorrow:
+ assert(glsl_type_is_struct(val->ssa->type));
+ val->ssa->elems[0]->def = nir_isub(&b->nb, src[0], src[1]);
+ val->ssa->elems[1]->def = nir_usub_borrow(&b->nb, src[0], src[1]);
+ break;
+
+ case SpvOpUMulExtended:
+ assert(glsl_type_is_struct(val->ssa->type));
+ val->ssa->elems[0]->def = nir_imul(&b->nb, src[0], src[1]);
+ val->ssa->elems[1]->def = nir_umul_high(&b->nb, src[0], src[1]);
+ break;
+
+ case SpvOpSMulExtended:
+ assert(glsl_type_is_struct(val->ssa->type));
+ val->ssa->elems[0]->def = nir_imul(&b->nb, src[0], src[1]);
+ val->ssa->elems[1]->def = nir_imul_high(&b->nb, src[0], src[1]);
+ break;
+
+ case SpvOpFwidth:
+ val->ssa->def = nir_fadd(&b->nb,
+ nir_fabs(&b->nb, nir_fddx(&b->nb, src[0])),
+ nir_fabs(&b->nb, nir_fddy(&b->nb, src[0])));
+ break;
+ case SpvOpFwidthFine:
+ val->ssa->def = nir_fadd(&b->nb,
+ nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[0])),
+ nir_fabs(&b->nb, nir_fddy_fine(&b->nb, src[0])));
+ break;
+ case SpvOpFwidthCoarse:
+ val->ssa->def = nir_fadd(&b->nb,
+ nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[0])),
+ nir_fabs(&b->nb, nir_fddy_coarse(&b->nb, src[0])));
+ break;
+
+ case SpvOpVectorTimesScalar:
+ /* The builder will take care of splatting for us. */
+ val->ssa->def = nir_fmul(&b->nb, src[0], src[1]);
+ break;
+
+ case SpvOpIsNan:
+ val->ssa->def = nir_fne(&b->nb, src[0], src[0]);
+ break;
+
+ case SpvOpIsInf:
+ val->ssa->def = nir_feq(&b->nb, nir_fabs(&b->nb, src[0]),
+ nir_imm_float(&b->nb, INFINITY));
+ break;
+
+ default: {
+ bool swap;
+ nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap);
+
+ if (swap) {
+ nir_ssa_def *tmp = src[0];
+ src[0] = src[1];
+ src[1] = tmp;
+ }
+
+ val->ssa->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], src[3]);
+ break;
+ } /* default */
+ }
+
+ b->nb.exact = false;
+}
diff --git a/src/compiler/spirv/vtn_cfg.c b/src/compiler/spirv/vtn_cfg.c
new file mode 100644
index 00000000000..6a43ef8b2dd
--- /dev/null
+++ b/src/compiler/spirv/vtn_cfg.c
@@ -0,0 +1,778 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vtn_private.h"
+#include "nir/nir_vla.h"
+
+static bool
+vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpFunction: {
+ assert(b->func == NULL);
+ b->func = rzalloc(b, struct vtn_function);
+
+ list_inithead(&b->func->body);
+ b->func->control = w[3];
+
+ const struct glsl_type *result_type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_function);
+ val->func = b->func;
+
+ const struct glsl_type *func_type =
+ vtn_value(b, w[4], vtn_value_type_type)->type->type;
+
+ assert(glsl_get_function_return_type(func_type) == result_type);
+
+ nir_function *func =
+ nir_function_create(b->shader, ralloc_strdup(b->shader, val->name));
+
+ func->num_params = glsl_get_length(func_type);
+ func->params = ralloc_array(b->shader, nir_parameter, func->num_params);
+ for (unsigned i = 0; i < func->num_params; i++) {
+ const struct glsl_function_param *param =
+ glsl_get_function_param(func_type, i);
+ func->params[i].type = param->type;
+ if (param->in) {
+ if (param->out) {
+ func->params[i].param_type = nir_parameter_inout;
+ } else {
+ func->params[i].param_type = nir_parameter_in;
+ }
+ } else {
+ if (param->out) {
+ func->params[i].param_type = nir_parameter_out;
+ } else {
+ assert(!"Parameter is neither in nor out");
+ }
+ }
+ }
+
+ func->return_type = glsl_get_function_return_type(func_type);
+
+ b->func->impl = nir_function_impl_create(func);
+
+ b->func_param_idx = 0;
+ break;
+ }
+
+ case SpvOpFunctionEnd:
+ b->func->end = w;
+ b->func = NULL;
+ break;
+
+ case SpvOpFunctionParameter: {
+ struct vtn_value *val =
+ vtn_push_value(b, w[2], vtn_value_type_access_chain);
+
+ struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
+
+ assert(b->func_param_idx < b->func->impl->num_params);
+ nir_variable *param = b->func->impl->params[b->func_param_idx++];
+
+ assert(param->type == type->type);
+
+ /* Name the parameter so it shows up nicely in NIR */
+ param->name = ralloc_strdup(param, val->name);
+
+ struct vtn_variable *vtn_var = rzalloc(b, struct vtn_variable);
+ vtn_var->type = type;
+ vtn_var->var = param;
+ vtn_var->chain.var = vtn_var;
+ vtn_var->chain.length = 0;
+
+ struct vtn_type *without_array = type;
+ while(glsl_type_is_array(without_array->type))
+ without_array = without_array->array_element;
+
+ if (glsl_type_is_image(without_array->type)) {
+ vtn_var->mode = vtn_variable_mode_image;
+ param->interface_type = without_array->type;
+ } else if (glsl_type_is_sampler(without_array->type)) {
+ vtn_var->mode = vtn_variable_mode_sampler;
+ param->interface_type = without_array->type;
+ } else {
+ vtn_var->mode = vtn_variable_mode_param;
+ }
+
+ val->access_chain = &vtn_var->chain;
+ break;
+ }
+
+ case SpvOpLabel: {
+ assert(b->block == NULL);
+ b->block = rzalloc(b, struct vtn_block);
+ b->block->node.type = vtn_cf_node_type_block;
+ b->block->label = w;
+ vtn_push_value(b, w[1], vtn_value_type_block)->block = b->block;
+
+ if (b->func->start_block == NULL) {
+ /* This is the first block encountered for this function. In this
+ * case, we set the start block and add it to the list of
+ * implemented functions that we'll walk later.
+ */
+ b->func->start_block = b->block;
+ exec_list_push_tail(&b->functions, &b->func->node);
+ }
+ break;
+ }
+
+ case SpvOpSelectionMerge:
+ case SpvOpLoopMerge:
+ assert(b->block && b->block->merge == NULL);
+ b->block->merge = w;
+ break;
+
+ case SpvOpBranch:
+ case SpvOpBranchConditional:
+ case SpvOpSwitch:
+ case SpvOpKill:
+ case SpvOpReturn:
+ case SpvOpReturnValue:
+ case SpvOpUnreachable:
+ assert(b->block && b->block->branch == NULL);
+ b->block->branch = w;
+ b->block = NULL;
+ break;
+
+ default:
+ /* Continue on as per normal */
+ return true;
+ }
+
+ return true;
+}
+
+static void
+vtn_add_case(struct vtn_builder *b, struct vtn_switch *swtch,
+ struct vtn_block *break_block,
+ uint32_t block_id, uint32_t val, bool is_default)
+{
+ struct vtn_block *case_block =
+ vtn_value(b, block_id, vtn_value_type_block)->block;
+
+ /* Don't create dummy cases that just break */
+ if (case_block == break_block)
+ return;
+
+ if (case_block->switch_case == NULL) {
+ struct vtn_case *c = ralloc(b, struct vtn_case);
+
+ list_inithead(&c->body);
+ c->start_block = case_block;
+ c->fallthrough = NULL;
+ nir_array_init(&c->values, b);
+ c->is_default = false;
+ c->visited = false;
+
+ list_addtail(&c->link, &swtch->cases);
+
+ case_block->switch_case = c;
+ }
+
+ if (is_default) {
+ case_block->switch_case->is_default = true;
+ } else {
+ nir_array_add(&case_block->switch_case->values, uint32_t, val);
+ }
+}
+
+/* This function performs a depth-first search of the cases and puts them
+ * in fall-through order.
+ */
+static void
+vtn_order_case(struct vtn_switch *swtch, struct vtn_case *cse)
+{
+ if (cse->visited)
+ return;
+
+ cse->visited = true;
+
+ list_del(&cse->link);
+
+ if (cse->fallthrough) {
+ vtn_order_case(swtch, cse->fallthrough);
+
+ /* If we have a fall-through, place this case right before the case it
+ * falls through to. This ensures that fallthroughs come one after
+ * the other. These two can never get separated because that would
+ * imply something else falling through to the same case. Also, this
+ * can't break ordering because the DFS ensures that this case is
+ * visited before anything that falls through to it.
+ */
+ list_addtail(&cse->link, &cse->fallthrough->link);
+ } else {
+ list_add(&cse->link, &swtch->cases);
+ }
+}
+
+static enum vtn_branch_type
+vtn_get_branch_type(struct vtn_block *block,
+ struct vtn_case *swcase, struct vtn_block *switch_break,
+ struct vtn_block *loop_break, struct vtn_block *loop_cont)
+{
+ if (block->switch_case) {
+ /* This branch is actually a fallthrough */
+ assert(swcase->fallthrough == NULL ||
+ swcase->fallthrough == block->switch_case);
+ swcase->fallthrough = block->switch_case;
+ return vtn_branch_type_switch_fallthrough;
+ } else if (block == switch_break) {
+ return vtn_branch_type_switch_break;
+ } else if (block == loop_break) {
+ return vtn_branch_type_loop_break;
+ } else if (block == loop_cont) {
+ return vtn_branch_type_loop_continue;
+ } else {
+ return vtn_branch_type_none;
+ }
+}
+
+static void
+vtn_cfg_walk_blocks(struct vtn_builder *b, struct list_head *cf_list,
+ struct vtn_block *start, struct vtn_case *switch_case,
+ struct vtn_block *switch_break,
+ struct vtn_block *loop_break, struct vtn_block *loop_cont,
+ struct vtn_block *end)
+{
+ struct vtn_block *block = start;
+ while (block != end) {
+ if (block->merge && (*block->merge & SpvOpCodeMask) == SpvOpLoopMerge &&
+ !block->loop) {
+ struct vtn_loop *loop = ralloc(b, struct vtn_loop);
+
+ loop->node.type = vtn_cf_node_type_loop;
+ list_inithead(&loop->body);
+ list_inithead(&loop->cont_body);
+ loop->control = block->merge[3];
+
+ list_addtail(&loop->node.link, cf_list);
+ block->loop = loop;
+
+ struct vtn_block *new_loop_break =
+ vtn_value(b, block->merge[1], vtn_value_type_block)->block;
+ struct vtn_block *new_loop_cont =
+ vtn_value(b, block->merge[2], vtn_value_type_block)->block;
+
+ /* Note: This recursive call will start with the current block as
+ * its start block. If we weren't careful, we would get here
+ * again and end up in infinite recursion. This is why we set
+ * block->loop above and check for it before creating one. This
+ * way, we only create the loop once and the second call that
+ * tries to handle this loop goes to the cases below and gets
+ * handled as a regular block.
+ *
+ * Note: When we make the recursive walk calls, we pass NULL for
+ * the switch break since you have to break out of the loop first.
+ * We do, however, still pass the current switch case because it's
+ * possible that the merge block for the loop is the start of
+ * another case.
+ */
+ vtn_cfg_walk_blocks(b, &loop->body, block, switch_case, NULL,
+ new_loop_break, new_loop_cont, NULL );
+ vtn_cfg_walk_blocks(b, &loop->cont_body, new_loop_cont, NULL, NULL,
+ new_loop_break, NULL, block);
+
+ block = new_loop_break;
+ continue;
+ }
+
+ assert(block->node.link.next == NULL);
+ list_addtail(&block->node.link, cf_list);
+
+ switch (*block->branch & SpvOpCodeMask) {
+ case SpvOpBranch: {
+ struct vtn_block *branch_block =
+ vtn_value(b, block->branch[1], vtn_value_type_block)->block;
+
+ block->branch_type = vtn_get_branch_type(branch_block,
+ switch_case, switch_break,
+ loop_break, loop_cont);
+
+ if (block->branch_type != vtn_branch_type_none)
+ return;
+
+ block = branch_block;
+ continue;
+ }
+
+ case SpvOpReturn:
+ case SpvOpReturnValue:
+ block->branch_type = vtn_branch_type_return;
+ return;
+
+ case SpvOpKill:
+ block->branch_type = vtn_branch_type_discard;
+ return;
+
+ case SpvOpBranchConditional: {
+ struct vtn_block *then_block =
+ vtn_value(b, block->branch[2], vtn_value_type_block)->block;
+ struct vtn_block *else_block =
+ vtn_value(b, block->branch[3], vtn_value_type_block)->block;
+
+ struct vtn_if *if_stmt = ralloc(b, struct vtn_if);
+
+ if_stmt->node.type = vtn_cf_node_type_if;
+ if_stmt->condition = block->branch[1];
+ list_inithead(&if_stmt->then_body);
+ list_inithead(&if_stmt->else_body);
+
+ list_addtail(&if_stmt->node.link, cf_list);
+
+ if (block->merge &&
+ (*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge) {
+ if_stmt->control = block->merge[2];
+ }
+
+ if_stmt->then_type = vtn_get_branch_type(then_block,
+ switch_case, switch_break,
+ loop_break, loop_cont);
+ if_stmt->else_type = vtn_get_branch_type(else_block,
+ switch_case, switch_break,
+ loop_break, loop_cont);
+
+ if (if_stmt->then_type == vtn_branch_type_none &&
+ if_stmt->else_type == vtn_branch_type_none) {
+ /* Neither side of the if is something we can short-circuit. */
+ assert((*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge);
+ struct vtn_block *merge_block =
+ vtn_value(b, block->merge[1], vtn_value_type_block)->block;
+
+ vtn_cfg_walk_blocks(b, &if_stmt->then_body, then_block,
+ switch_case, switch_break,
+ loop_break, loop_cont, merge_block);
+ vtn_cfg_walk_blocks(b, &if_stmt->else_body, else_block,
+ switch_case, switch_break,
+ loop_break, loop_cont, merge_block);
+
+ enum vtn_branch_type merge_type =
+ vtn_get_branch_type(merge_block, switch_case, switch_break,
+ loop_break, loop_cont);
+ if (merge_type == vtn_branch_type_none) {
+ block = merge_block;
+ continue;
+ } else {
+ return;
+ }
+ } else if (if_stmt->then_type != vtn_branch_type_none &&
+ if_stmt->else_type != vtn_branch_type_none) {
+ /* Both sides were short-circuited. We're done here. */
+ return;
+ } else {
+ /* Exeactly one side of the branch could be short-circuited.
+ * We set the branch up as a predicated break/continue and we
+ * continue on with the other side as if it were what comes
+ * after the if.
+ */
+ if (if_stmt->then_type == vtn_branch_type_none) {
+ block = then_block;
+ } else {
+ block = else_block;
+ }
+ continue;
+ }
+ unreachable("Should have returned or continued");
+ }
+
+ case SpvOpSwitch: {
+ assert((*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge);
+ struct vtn_block *break_block =
+ vtn_value(b, block->merge[1], vtn_value_type_block)->block;
+
+ struct vtn_switch *swtch = ralloc(b, struct vtn_switch);
+
+ swtch->node.type = vtn_cf_node_type_switch;
+ swtch->selector = block->branch[1];
+ list_inithead(&swtch->cases);
+
+ list_addtail(&swtch->node.link, cf_list);
+
+ /* First, we go through and record all of the cases. */
+ const uint32_t *branch_end =
+ block->branch + (block->branch[0] >> SpvWordCountShift);
+
+ vtn_add_case(b, swtch, break_block, block->branch[2], 0, true);
+ for (const uint32_t *w = block->branch + 3; w < branch_end; w += 2)
+ vtn_add_case(b, swtch, break_block, w[1], w[0], false);
+
+ /* Now, we go through and walk the blocks. While we walk through
+ * the blocks, we also gather the much-needed fall-through
+ * information.
+ */
+ list_for_each_entry(struct vtn_case, cse, &swtch->cases, link) {
+ assert(cse->start_block != break_block);
+ vtn_cfg_walk_blocks(b, &cse->body, cse->start_block, cse,
+ break_block, NULL, loop_cont, NULL);
+ }
+
+ /* Finally, we walk over all of the cases one more time and put
+ * them in fall-through order.
+ */
+ for (const uint32_t *w = block->branch + 2; w < branch_end; w += 2) {
+ struct vtn_block *case_block =
+ vtn_value(b, *w, vtn_value_type_block)->block;
+
+ if (case_block == break_block)
+ continue;
+
+ assert(case_block->switch_case);
+
+ vtn_order_case(swtch, case_block->switch_case);
+ }
+
+ block = break_block;
+ continue;
+ }
+
+ case SpvOpUnreachable:
+ return;
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+ }
+}
+
+void
+vtn_build_cfg(struct vtn_builder *b, const uint32_t *words, const uint32_t *end)
+{
+ vtn_foreach_instruction(b, words, end,
+ vtn_cfg_handle_prepass_instruction);
+
+ foreach_list_typed(struct vtn_function, func, node, &b->functions) {
+ vtn_cfg_walk_blocks(b, &func->body, func->start_block,
+ NULL, NULL, NULL, NULL, NULL);
+ }
+}
+
+static bool
+vtn_handle_phis_first_pass(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ if (opcode == SpvOpLabel)
+ return true; /* Nothing to do */
+
+ /* If this isn't a phi node, stop. */
+ if (opcode != SpvOpPhi)
+ return false;
+
+ /* For handling phi nodes, we do a poor-man's out-of-ssa on the spot.
+ * For each phi, we create a variable with the appropreate type and
+ * do a load from that variable. Then, in a second pass, we add
+ * stores to that variable to each of the predecessor blocks.
+ *
+ * We could do something more intelligent here. However, in order to
+ * handle loops and things properly, we really need dominance
+ * information. It would end up basically being the into-SSA
+ * algorithm all over again. It's easier if we just let
+ * lower_vars_to_ssa do that for us instead of repeating it here.
+ */
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+
+ struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
+ nir_variable *phi_var =
+ nir_local_variable_create(b->nb.impl, type->type, "phi");
+ _mesa_hash_table_insert(b->phi_table, w, phi_var);
+
+ val->ssa = vtn_local_load(b, nir_deref_var_create(b, phi_var));
+
+ return true;
+}
+
+static bool
+vtn_handle_phi_second_pass(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ if (opcode != SpvOpPhi)
+ return true;
+
+ struct hash_entry *phi_entry = _mesa_hash_table_search(b->phi_table, w);
+ assert(phi_entry);
+ nir_variable *phi_var = phi_entry->data;
+
+ for (unsigned i = 3; i < count; i += 2) {
+ struct vtn_ssa_value *src = vtn_ssa_value(b, w[i]);
+ struct vtn_block *pred =
+ vtn_value(b, w[i + 1], vtn_value_type_block)->block;
+
+ b->nb.cursor = nir_after_block_before_jump(pred->end_block);
+
+ vtn_local_store(b, src, nir_deref_var_create(b, phi_var));
+ }
+
+ return true;
+}
+
+static void
+vtn_emit_branch(struct vtn_builder *b, enum vtn_branch_type branch_type,
+ nir_variable *switch_fall_var, bool *has_switch_break)
+{
+ switch (branch_type) {
+ case vtn_branch_type_switch_break:
+ nir_store_var(&b->nb, switch_fall_var, nir_imm_int(&b->nb, NIR_FALSE), 1);
+ *has_switch_break = true;
+ break;
+ case vtn_branch_type_switch_fallthrough:
+ break; /* Nothing to do */
+ case vtn_branch_type_loop_break:
+ nir_jump(&b->nb, nir_jump_break);
+ break;
+ case vtn_branch_type_loop_continue:
+ nir_jump(&b->nb, nir_jump_continue);
+ break;
+ case vtn_branch_type_return:
+ nir_jump(&b->nb, nir_jump_return);
+ break;
+ case vtn_branch_type_discard: {
+ nir_intrinsic_instr *discard =
+ nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_discard);
+ nir_builder_instr_insert(&b->nb, &discard->instr);
+ break;
+ }
+ default:
+ unreachable("Invalid branch type");
+ }
+}
+
+static void
+vtn_emit_cf_list(struct vtn_builder *b, struct list_head *cf_list,
+ nir_variable *switch_fall_var, bool *has_switch_break,
+ vtn_instruction_handler handler)
+{
+ list_for_each_entry(struct vtn_cf_node, node, cf_list, link) {
+ switch (node->type) {
+ case vtn_cf_node_type_block: {
+ struct vtn_block *block = (struct vtn_block *)node;
+
+ const uint32_t *block_start = block->label;
+ const uint32_t *block_end = block->merge ? block->merge :
+ block->branch;
+
+ block_start = vtn_foreach_instruction(b, block_start, block_end,
+ vtn_handle_phis_first_pass);
+
+ vtn_foreach_instruction(b, block_start, block_end, handler);
+
+ block->end_block = nir_cursor_current_block(b->nb.cursor);
+
+ if ((*block->branch & SpvOpCodeMask) == SpvOpReturnValue) {
+ struct vtn_ssa_value *src = vtn_ssa_value(b, block->branch[1]);
+ vtn_local_store(b, src,
+ nir_deref_var_create(b, b->impl->return_var));
+ }
+
+ if (block->branch_type != vtn_branch_type_none) {
+ vtn_emit_branch(b, block->branch_type,
+ switch_fall_var, has_switch_break);
+ }
+
+ break;
+ }
+
+ case vtn_cf_node_type_if: {
+ struct vtn_if *vtn_if = (struct vtn_if *)node;
+
+ nir_if *if_stmt = nir_if_create(b->shader);
+ if_stmt->condition =
+ nir_src_for_ssa(vtn_ssa_value(b, vtn_if->condition)->def);
+ nir_cf_node_insert(b->nb.cursor, &if_stmt->cf_node);
+
+ bool sw_break = false;
+
+ b->nb.cursor = nir_after_cf_list(&if_stmt->then_list);
+ if (vtn_if->then_type == vtn_branch_type_none) {
+ vtn_emit_cf_list(b, &vtn_if->then_body,
+ switch_fall_var, &sw_break, handler);
+ } else {
+ vtn_emit_branch(b, vtn_if->then_type, switch_fall_var, &sw_break);
+ }
+
+ b->nb.cursor = nir_after_cf_list(&if_stmt->else_list);
+ if (vtn_if->else_type == vtn_branch_type_none) {
+ vtn_emit_cf_list(b, &vtn_if->else_body,
+ switch_fall_var, &sw_break, handler);
+ } else {
+ vtn_emit_branch(b, vtn_if->else_type, switch_fall_var, &sw_break);
+ }
+
+ b->nb.cursor = nir_after_cf_node(&if_stmt->cf_node);
+
+ /* If we encountered a switch break somewhere inside of the if,
+ * then it would have been handled correctly by calling
+ * emit_cf_list or emit_branch for the interrior. However, we
+ * need to predicate everything following on wether or not we're
+ * still going.
+ */
+ if (sw_break) {
+ *has_switch_break = true;
+
+ nir_if *switch_if = nir_if_create(b->shader);
+ switch_if->condition =
+ nir_src_for_ssa(nir_load_var(&b->nb, switch_fall_var));
+ nir_cf_node_insert(b->nb.cursor, &switch_if->cf_node);
+
+ b->nb.cursor = nir_after_cf_list(&if_stmt->then_list);
+ }
+ break;
+ }
+
+ case vtn_cf_node_type_loop: {
+ struct vtn_loop *vtn_loop = (struct vtn_loop *)node;
+
+ nir_loop *loop = nir_loop_create(b->shader);
+ nir_cf_node_insert(b->nb.cursor, &loop->cf_node);
+
+ b->nb.cursor = nir_after_cf_list(&loop->body);
+ vtn_emit_cf_list(b, &vtn_loop->body, NULL, NULL, handler);
+
+ if (!list_empty(&vtn_loop->cont_body)) {
+ /* If we have a non-trivial continue body then we need to put
+ * it at the beginning of the loop with a flag to ensure that
+ * it doesn't get executed in the first iteration.
+ */
+ nir_variable *do_cont =
+ nir_local_variable_create(b->nb.impl, glsl_bool_type(), "cont");
+
+ b->nb.cursor = nir_before_cf_node(&loop->cf_node);
+ nir_store_var(&b->nb, do_cont, nir_imm_int(&b->nb, NIR_FALSE), 1);
+
+ b->nb.cursor = nir_before_cf_list(&loop->body);
+ nir_if *cont_if = nir_if_create(b->shader);
+ cont_if->condition = nir_src_for_ssa(nir_load_var(&b->nb, do_cont));
+ nir_cf_node_insert(b->nb.cursor, &cont_if->cf_node);
+
+ b->nb.cursor = nir_after_cf_list(&cont_if->then_list);
+ vtn_emit_cf_list(b, &vtn_loop->cont_body, NULL, NULL, handler);
+
+ b->nb.cursor = nir_after_cf_node(&cont_if->cf_node);
+ nir_store_var(&b->nb, do_cont, nir_imm_int(&b->nb, NIR_TRUE), 1);
+
+ b->has_loop_continue = true;
+ }
+
+ b->nb.cursor = nir_after_cf_node(&loop->cf_node);
+ break;
+ }
+
+ case vtn_cf_node_type_switch: {
+ struct vtn_switch *vtn_switch = (struct vtn_switch *)node;
+
+ /* First, we create a variable to keep track of whether or not the
+ * switch is still going at any given point. Any switch breaks
+ * will set this variable to false.
+ */
+ nir_variable *fall_var =
+ nir_local_variable_create(b->nb.impl, glsl_bool_type(), "fall");
+ nir_store_var(&b->nb, fall_var, nir_imm_int(&b->nb, NIR_FALSE), 1);
+
+ /* Next, we gather up all of the conditions. We have to do this
+ * up-front because we also need to build an "any" condition so
+ * that we can use !any for default.
+ */
+ const int num_cases = list_length(&vtn_switch->cases);
+ NIR_VLA(nir_ssa_def *, conditions, num_cases);
+
+ nir_ssa_def *sel = vtn_ssa_value(b, vtn_switch->selector)->def;
+ /* An accumulation of all conditions. Used for the default */
+ nir_ssa_def *any = NULL;
+
+ int i = 0;
+ list_for_each_entry(struct vtn_case, cse, &vtn_switch->cases, link) {
+ if (cse->is_default) {
+ conditions[i++] = NULL;
+ continue;
+ }
+
+ nir_ssa_def *cond = NULL;
+ nir_array_foreach(&cse->values, uint32_t, val) {
+ nir_ssa_def *is_val =
+ nir_ieq(&b->nb, sel, nir_imm_int(&b->nb, *val));
+
+ cond = cond ? nir_ior(&b->nb, cond, is_val) : is_val;
+ }
+
+ any = any ? nir_ior(&b->nb, any, cond) : cond;
+ conditions[i++] = cond;
+ }
+ assert(i == num_cases);
+
+ /* Now we can walk the list of cases and actually emit code */
+ i = 0;
+ list_for_each_entry(struct vtn_case, cse, &vtn_switch->cases, link) {
+ /* Figure out the condition */
+ nir_ssa_def *cond = conditions[i++];
+ if (cse->is_default) {
+ assert(cond == NULL);
+ cond = nir_inot(&b->nb, any);
+ }
+ /* Take fallthrough into account */
+ cond = nir_ior(&b->nb, cond, nir_load_var(&b->nb, fall_var));
+
+ nir_if *case_if = nir_if_create(b->nb.shader);
+ case_if->condition = nir_src_for_ssa(cond);
+ nir_cf_node_insert(b->nb.cursor, &case_if->cf_node);
+
+ bool has_break = false;
+ b->nb.cursor = nir_after_cf_list(&case_if->then_list);
+ nir_store_var(&b->nb, fall_var, nir_imm_int(&b->nb, NIR_TRUE), 1);
+ vtn_emit_cf_list(b, &cse->body, fall_var, &has_break, handler);
+ (void)has_break; /* We don't care */
+
+ b->nb.cursor = nir_after_cf_node(&case_if->cf_node);
+ }
+ assert(i == num_cases);
+
+ break;
+ }
+
+ default:
+ unreachable("Invalid CF node type");
+ }
+ }
+}
+
+void
+vtn_function_emit(struct vtn_builder *b, struct vtn_function *func,
+ vtn_instruction_handler instruction_handler)
+{
+ nir_builder_init(&b->nb, func->impl);
+ b->nb.cursor = nir_after_cf_list(&func->impl->body);
+ b->has_loop_continue = false;
+ b->phi_table = _mesa_hash_table_create(b, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+
+ vtn_emit_cf_list(b, &func->body, NULL, NULL, instruction_handler);
+
+ vtn_foreach_instruction(b, func->start_block->label, func->end,
+ vtn_handle_phi_second_pass);
+
+ /* Continue blocks for loops get inserted before the body of the loop
+ * but instructions in the continue may use SSA defs in the loop body.
+ * Therefore, we need to repair SSA to insert the needed phi nodes.
+ */
+ if (b->has_loop_continue)
+ nir_repair_ssa_impl(func->impl);
+}
diff --git a/src/compiler/spirv/vtn_glsl450.c b/src/compiler/spirv/vtn_glsl450.c
new file mode 100644
index 00000000000..e05d28ffede
--- /dev/null
+++ b/src/compiler/spirv/vtn_glsl450.c
@@ -0,0 +1,666 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jason Ekstrand ([email protected])
+ *
+ */
+
+#include "vtn_private.h"
+#include "GLSL.std.450.h"
+
+#define M_PIf ((float) M_PI)
+#define M_PI_2f ((float) M_PI_2)
+#define M_PI_4f ((float) M_PI_4)
+
+static nir_ssa_def *
+build_mat2_det(nir_builder *b, nir_ssa_def *col[2])
+{
+ unsigned swiz[4] = {1, 0, 0, 0};
+ nir_ssa_def *p = nir_fmul(b, col[0], nir_swizzle(b, col[1], swiz, 2, true));
+ return nir_fsub(b, nir_channel(b, p, 0), nir_channel(b, p, 1));
+}
+
+static nir_ssa_def *
+build_mat3_det(nir_builder *b, nir_ssa_def *col[3])
+{
+ unsigned yzx[4] = {1, 2, 0, 0};
+ unsigned zxy[4] = {2, 0, 1, 0};
+
+ nir_ssa_def *prod0 =
+ nir_fmul(b, col[0],
+ nir_fmul(b, nir_swizzle(b, col[1], yzx, 3, true),
+ nir_swizzle(b, col[2], zxy, 3, true)));
+ nir_ssa_def *prod1 =
+ nir_fmul(b, col[0],
+ nir_fmul(b, nir_swizzle(b, col[1], zxy, 3, true),
+ nir_swizzle(b, col[2], yzx, 3, true)));
+
+ nir_ssa_def *diff = nir_fsub(b, prod0, prod1);
+
+ return nir_fadd(b, nir_channel(b, diff, 0),
+ nir_fadd(b, nir_channel(b, diff, 1),
+ nir_channel(b, diff, 2)));
+}
+
+static nir_ssa_def *
+build_mat4_det(nir_builder *b, nir_ssa_def **col)
+{
+ nir_ssa_def *subdet[4];
+ for (unsigned i = 0; i < 4; i++) {
+ unsigned swiz[3];
+ for (unsigned j = 0; j < 3; j++)
+ swiz[j] = j + (j >= i);
+
+ nir_ssa_def *subcol[3];
+ subcol[0] = nir_swizzle(b, col[1], swiz, 3, true);
+ subcol[1] = nir_swizzle(b, col[2], swiz, 3, true);
+ subcol[2] = nir_swizzle(b, col[3], swiz, 3, true);
+
+ subdet[i] = build_mat3_det(b, subcol);
+ }
+
+ nir_ssa_def *prod = nir_fmul(b, col[0], nir_vec(b, subdet, 4));
+
+ return nir_fadd(b, nir_fsub(b, nir_channel(b, prod, 0),
+ nir_channel(b, prod, 1)),
+ nir_fsub(b, nir_channel(b, prod, 2),
+ nir_channel(b, prod, 3)));
+}
+
+static nir_ssa_def *
+build_mat_det(struct vtn_builder *b, struct vtn_ssa_value *src)
+{
+ unsigned size = glsl_get_vector_elements(src->type);
+
+ nir_ssa_def *cols[4];
+ for (unsigned i = 0; i < size; i++)
+ cols[i] = src->elems[i]->def;
+
+ switch(size) {
+ case 2: return build_mat2_det(&b->nb, cols);
+ case 3: return build_mat3_det(&b->nb, cols);
+ case 4: return build_mat4_det(&b->nb, cols);
+ default:
+ unreachable("Invalid matrix size");
+ }
+}
+
+/* Computes the determinate of the submatrix given by taking src and
+ * removing the specified row and column.
+ */
+static nir_ssa_def *
+build_mat_subdet(struct nir_builder *b, struct vtn_ssa_value *src,
+ unsigned size, unsigned row, unsigned col)
+{
+ assert(row < size && col < size);
+ if (size == 2) {
+ return nir_channel(b, src->elems[1 - col]->def, 1 - row);
+ } else {
+ /* Swizzle to get all but the specified row */
+ unsigned swiz[3];
+ for (unsigned j = 0; j < 3; j++)
+ swiz[j] = j + (j >= row);
+
+ /* Grab all but the specified column */
+ nir_ssa_def *subcol[3];
+ for (unsigned j = 0; j < size; j++) {
+ if (j != col) {
+ subcol[j - (j > col)] = nir_swizzle(b, src->elems[j]->def,
+ swiz, size - 1, true);
+ }
+ }
+
+ if (size == 3) {
+ return build_mat2_det(b, subcol);
+ } else {
+ assert(size == 4);
+ return build_mat3_det(b, subcol);
+ }
+ }
+}
+
+static struct vtn_ssa_value *
+matrix_inverse(struct vtn_builder *b, struct vtn_ssa_value *src)
+{
+ nir_ssa_def *adj_col[4];
+ unsigned size = glsl_get_vector_elements(src->type);
+
+ /* Build up an adjugate matrix */
+ for (unsigned c = 0; c < size; c++) {
+ nir_ssa_def *elem[4];
+ for (unsigned r = 0; r < size; r++) {
+ elem[r] = build_mat_subdet(&b->nb, src, size, c, r);
+
+ if ((r + c) % 2)
+ elem[r] = nir_fneg(&b->nb, elem[r]);
+ }
+
+ adj_col[c] = nir_vec(&b->nb, elem, size);
+ }
+
+ nir_ssa_def *det_inv = nir_frcp(&b->nb, build_mat_det(b, src));
+
+ struct vtn_ssa_value *val = vtn_create_ssa_value(b, src->type);
+ for (unsigned i = 0; i < size; i++)
+ val->elems[i]->def = nir_fmul(&b->nb, adj_col[i], det_inv);
+
+ return val;
+}
+
+static nir_ssa_def*
+build_length(nir_builder *b, nir_ssa_def *vec)
+{
+ switch (vec->num_components) {
+ case 1: return nir_fsqrt(b, nir_fmul(b, vec, vec));
+ case 2: return nir_fsqrt(b, nir_fdot2(b, vec, vec));
+ case 3: return nir_fsqrt(b, nir_fdot3(b, vec, vec));
+ case 4: return nir_fsqrt(b, nir_fdot4(b, vec, vec));
+ default:
+ unreachable("Invalid number of components");
+ }
+}
+
+static inline nir_ssa_def *
+build_fclamp(nir_builder *b,
+ nir_ssa_def *x, nir_ssa_def *min_val, nir_ssa_def *max_val)
+{
+ return nir_fmin(b, nir_fmax(b, x, min_val), max_val);
+}
+
+/**
+ * Return e^x.
+ */
+static nir_ssa_def *
+build_exp(nir_builder *b, nir_ssa_def *x)
+{
+ return nir_fexp2(b, nir_fmul(b, x, nir_imm_float(b, M_LOG2E)));
+}
+
+/**
+ * Return ln(x) - the natural logarithm of x.
+ */
+static nir_ssa_def *
+build_log(nir_builder *b, nir_ssa_def *x)
+{
+ return nir_fmul(b, nir_flog2(b, x), nir_imm_float(b, 1.0 / M_LOG2E));
+}
+
+/**
+ * Approximate asin(x) by the formula:
+ * asin~(x) = sign(x) * (pi/2 - sqrt(1 - |x|) * (pi/2 + |x|(pi/4 - 1 + |x|(p0 + |x|p1))))
+ *
+ * which is correct to first order at x=0 and x=±1 regardless of the p
+ * coefficients but can be made second-order correct at both ends by selecting
+ * the fit coefficients appropriately. Different p coefficients can be used
+ * in the asin and acos implementation to minimize some relative error metric
+ * in each case.
+ */
+static nir_ssa_def *
+build_asin(nir_builder *b, nir_ssa_def *x, float p0, float p1)
+{
+ nir_ssa_def *abs_x = nir_fabs(b, x);
+ return nir_fmul(b, nir_fsign(b, x),
+ nir_fsub(b, nir_imm_float(b, M_PI_2f),
+ nir_fmul(b, nir_fsqrt(b, nir_fsub(b, nir_imm_float(b, 1.0f), abs_x)),
+ nir_fadd(b, nir_imm_float(b, M_PI_2f),
+ nir_fmul(b, abs_x,
+ nir_fadd(b, nir_imm_float(b, M_PI_4f - 1.0f),
+ nir_fmul(b, abs_x,
+ nir_fadd(b, nir_imm_float(b, p0),
+ nir_fmul(b, abs_x,
+ nir_imm_float(b, p1))))))))));
+}
+
+/**
+ * Compute xs[0] + xs[1] + xs[2] + ... using fadd.
+ */
+static nir_ssa_def *
+build_fsum(nir_builder *b, nir_ssa_def **xs, int terms)
+{
+ nir_ssa_def *accum = xs[0];
+
+ for (int i = 1; i < terms; i++)
+ accum = nir_fadd(b, accum, xs[i]);
+
+ return accum;
+}
+
+static nir_ssa_def *
+build_atan(nir_builder *b, nir_ssa_def *y_over_x)
+{
+ nir_ssa_def *abs_y_over_x = nir_fabs(b, y_over_x);
+ nir_ssa_def *one = nir_imm_float(b, 1.0f);
+
+ /*
+ * range-reduction, first step:
+ *
+ * / y_over_x if |y_over_x| <= 1.0;
+ * x = <
+ * \ 1.0 / y_over_x otherwise
+ */
+ nir_ssa_def *x = nir_fdiv(b, nir_fmin(b, abs_y_over_x, one),
+ nir_fmax(b, abs_y_over_x, one));
+
+ /*
+ * approximate atan by evaluating polynomial:
+ *
+ * x * 0.9999793128310355 - x^3 * 0.3326756418091246 +
+ * x^5 * 0.1938924977115610 - x^7 * 0.1173503194786851 +
+ * x^9 * 0.0536813784310406 - x^11 * 0.0121323213173444
+ */
+ nir_ssa_def *x_2 = nir_fmul(b, x, x);
+ nir_ssa_def *x_3 = nir_fmul(b, x_2, x);
+ nir_ssa_def *x_5 = nir_fmul(b, x_3, x_2);
+ nir_ssa_def *x_7 = nir_fmul(b, x_5, x_2);
+ nir_ssa_def *x_9 = nir_fmul(b, x_7, x_2);
+ nir_ssa_def *x_11 = nir_fmul(b, x_9, x_2);
+
+ nir_ssa_def *polynomial_terms[] = {
+ nir_fmul(b, x, nir_imm_float(b, 0.9999793128310355f)),
+ nir_fmul(b, x_3, nir_imm_float(b, -0.3326756418091246f)),
+ nir_fmul(b, x_5, nir_imm_float(b, 0.1938924977115610f)),
+ nir_fmul(b, x_7, nir_imm_float(b, -0.1173503194786851f)),
+ nir_fmul(b, x_9, nir_imm_float(b, 0.0536813784310406f)),
+ nir_fmul(b, x_11, nir_imm_float(b, -0.0121323213173444f)),
+ };
+
+ nir_ssa_def *tmp =
+ build_fsum(b, polynomial_terms, ARRAY_SIZE(polynomial_terms));
+
+ /* range-reduction fixup */
+ tmp = nir_fadd(b, tmp,
+ nir_fmul(b,
+ nir_b2f(b, nir_flt(b, one, abs_y_over_x)),
+ nir_fadd(b, nir_fmul(b, tmp,
+ nir_imm_float(b, -2.0f)),
+ nir_imm_float(b, M_PI_2f))));
+
+ /* sign fixup */
+ return nir_fmul(b, tmp, nir_fsign(b, y_over_x));
+}
+
+static nir_ssa_def *
+build_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
+{
+ nir_ssa_def *zero = nir_imm_float(b, 0.0f);
+
+ /* If |x| >= 1.0e-8 * |y|: */
+ nir_ssa_def *condition =
+ nir_fge(b, nir_fabs(b, x),
+ nir_fmul(b, nir_imm_float(b, 1.0e-8f), nir_fabs(b, y)));
+
+ /* Then...call atan(y/x) and fix it up: */
+ nir_ssa_def *atan1 = build_atan(b, nir_fdiv(b, y, x));
+ nir_ssa_def *r_then =
+ nir_bcsel(b, nir_flt(b, x, zero),
+ nir_fadd(b, atan1,
+ nir_bcsel(b, nir_fge(b, y, zero),
+ nir_imm_float(b, M_PIf),
+ nir_imm_float(b, -M_PIf))),
+ atan1);
+
+ /* Else... */
+ nir_ssa_def *r_else =
+ nir_fmul(b, nir_fsign(b, y), nir_imm_float(b, M_PI_2f));
+
+ return nir_bcsel(b, condition, r_then, r_else);
+}
+
+static nir_ssa_def *
+build_frexp(nir_builder *b, nir_ssa_def *x, nir_ssa_def **exponent)
+{
+ nir_ssa_def *abs_x = nir_fabs(b, x);
+ nir_ssa_def *zero = nir_imm_float(b, 0.0f);
+
+ /* Single-precision floating-point values are stored as
+ * 1 sign bit;
+ * 8 exponent bits;
+ * 23 mantissa bits.
+ *
+ * An exponent shift of 23 will shift the mantissa out, leaving only the
+ * exponent and sign bit (which itself may be zero, if the absolute value
+ * was taken before the bitcast and shift.
+ */
+ nir_ssa_def *exponent_shift = nir_imm_int(b, 23);
+ nir_ssa_def *exponent_bias = nir_imm_int(b, -126);
+
+ nir_ssa_def *sign_mantissa_mask = nir_imm_int(b, 0x807fffffu);
+
+ /* Exponent of floating-point values in the range [0.5, 1.0). */
+ nir_ssa_def *exponent_value = nir_imm_int(b, 0x3f000000u);
+
+ nir_ssa_def *is_not_zero = nir_fne(b, abs_x, zero);
+
+ *exponent =
+ nir_iadd(b, nir_ushr(b, abs_x, exponent_shift),
+ nir_bcsel(b, is_not_zero, exponent_bias, zero));
+
+ return nir_ior(b, nir_iand(b, x, sign_mantissa_mask),
+ nir_bcsel(b, is_not_zero, exponent_value, zero));
+}
+
+static nir_op
+vtn_nir_alu_op_for_spirv_glsl_opcode(enum GLSLstd450 opcode)
+{
+ switch (opcode) {
+ case GLSLstd450Round: return nir_op_fround_even;
+ case GLSLstd450RoundEven: return nir_op_fround_even;
+ case GLSLstd450Trunc: return nir_op_ftrunc;
+ case GLSLstd450FAbs: return nir_op_fabs;
+ case GLSLstd450SAbs: return nir_op_iabs;
+ case GLSLstd450FSign: return nir_op_fsign;
+ case GLSLstd450SSign: return nir_op_isign;
+ case GLSLstd450Floor: return nir_op_ffloor;
+ case GLSLstd450Ceil: return nir_op_fceil;
+ case GLSLstd450Fract: return nir_op_ffract;
+ case GLSLstd450Sin: return nir_op_fsin;
+ case GLSLstd450Cos: return nir_op_fcos;
+ case GLSLstd450Pow: return nir_op_fpow;
+ case GLSLstd450Exp2: return nir_op_fexp2;
+ case GLSLstd450Log2: return nir_op_flog2;
+ case GLSLstd450Sqrt: return nir_op_fsqrt;
+ case GLSLstd450InverseSqrt: return nir_op_frsq;
+ case GLSLstd450FMin: return nir_op_fmin;
+ case GLSLstd450UMin: return nir_op_umin;
+ case GLSLstd450SMin: return nir_op_imin;
+ case GLSLstd450FMax: return nir_op_fmax;
+ case GLSLstd450UMax: return nir_op_umax;
+ case GLSLstd450SMax: return nir_op_imax;
+ case GLSLstd450FMix: return nir_op_flrp;
+ case GLSLstd450Fma: return nir_op_ffma;
+ case GLSLstd450Ldexp: return nir_op_ldexp;
+ case GLSLstd450FindILsb: return nir_op_find_lsb;
+ case GLSLstd450FindSMsb: return nir_op_ifind_msb;
+ case GLSLstd450FindUMsb: return nir_op_ufind_msb;
+
+ /* Packing/Unpacking functions */
+ case GLSLstd450PackSnorm4x8: return nir_op_pack_snorm_4x8;
+ case GLSLstd450PackUnorm4x8: return nir_op_pack_unorm_4x8;
+ case GLSLstd450PackSnorm2x16: return nir_op_pack_snorm_2x16;
+ case GLSLstd450PackUnorm2x16: return nir_op_pack_unorm_2x16;
+ case GLSLstd450PackHalf2x16: return nir_op_pack_half_2x16;
+ case GLSLstd450UnpackSnorm4x8: return nir_op_unpack_snorm_4x8;
+ case GLSLstd450UnpackUnorm4x8: return nir_op_unpack_unorm_4x8;
+ case GLSLstd450UnpackSnorm2x16: return nir_op_unpack_snorm_2x16;
+ case GLSLstd450UnpackUnorm2x16: return nir_op_unpack_unorm_2x16;
+ case GLSLstd450UnpackHalf2x16: return nir_op_unpack_half_2x16;
+
+ default:
+ unreachable("No NIR equivalent");
+ }
+}
+
+static void
+handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
+ const uint32_t *w, unsigned count)
+{
+ struct nir_builder *nb = &b->nb;
+ const struct glsl_type *dest_type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ val->ssa = vtn_create_ssa_value(b, dest_type);
+
+ /* Collect the various SSA sources */
+ unsigned num_inputs = count - 5;
+ nir_ssa_def *src[3] = { NULL, };
+ for (unsigned i = 0; i < num_inputs; i++)
+ src[i] = vtn_ssa_value(b, w[i + 5])->def;
+
+ switch (entrypoint) {
+ case GLSLstd450Radians:
+ val->ssa->def = nir_fmul(nb, src[0], nir_imm_float(nb, 0.01745329251));
+ return;
+ case GLSLstd450Degrees:
+ val->ssa->def = nir_fmul(nb, src[0], nir_imm_float(nb, 57.2957795131));
+ return;
+ case GLSLstd450Tan:
+ val->ssa->def = nir_fdiv(nb, nir_fsin(nb, src[0]),
+ nir_fcos(nb, src[0]));
+ return;
+
+ case GLSLstd450Modf: {
+ nir_ssa_def *sign = nir_fsign(nb, src[0]);
+ nir_ssa_def *abs = nir_fabs(nb, src[0]);
+ val->ssa->def = nir_fmul(nb, sign, nir_ffract(nb, abs));
+ nir_store_deref_var(nb, vtn_nir_deref(b, w[6]),
+ nir_fmul(nb, sign, nir_ffloor(nb, abs)), 0xf);
+ return;
+ }
+
+ case GLSLstd450ModfStruct: {
+ nir_ssa_def *sign = nir_fsign(nb, src[0]);
+ nir_ssa_def *abs = nir_fabs(nb, src[0]);
+ assert(glsl_type_is_struct(val->ssa->type));
+ val->ssa->elems[0]->def = nir_fmul(nb, sign, nir_ffract(nb, abs));
+ val->ssa->elems[1]->def = nir_fmul(nb, sign, nir_ffloor(nb, abs));
+ return;
+ }
+
+ case GLSLstd450Step:
+ val->ssa->def = nir_sge(nb, src[1], src[0]);
+ return;
+
+ case GLSLstd450Length:
+ val->ssa->def = build_length(nb, src[0]);
+ return;
+ case GLSLstd450Distance:
+ val->ssa->def = build_length(nb, nir_fsub(nb, src[0], src[1]));
+ return;
+ case GLSLstd450Normalize:
+ val->ssa->def = nir_fdiv(nb, src[0], build_length(nb, src[0]));
+ return;
+
+ case GLSLstd450Exp:
+ val->ssa->def = build_exp(nb, src[0]);
+ return;
+
+ case GLSLstd450Log:
+ val->ssa->def = build_log(nb, src[0]);
+ return;
+
+ case GLSLstd450FClamp:
+ val->ssa->def = build_fclamp(nb, src[0], src[1], src[2]);
+ return;
+ case GLSLstd450UClamp:
+ val->ssa->def = nir_umin(nb, nir_umax(nb, src[0], src[1]), src[2]);
+ return;
+ case GLSLstd450SClamp:
+ val->ssa->def = nir_imin(nb, nir_imax(nb, src[0], src[1]), src[2]);
+ return;
+
+ case GLSLstd450Cross: {
+ unsigned yzx[4] = { 1, 2, 0, 0 };
+ unsigned zxy[4] = { 2, 0, 1, 0 };
+ val->ssa->def =
+ nir_fsub(nb, nir_fmul(nb, nir_swizzle(nb, src[0], yzx, 3, true),
+ nir_swizzle(nb, src[1], zxy, 3, true)),
+ nir_fmul(nb, nir_swizzle(nb, src[0], zxy, 3, true),
+ nir_swizzle(nb, src[1], yzx, 3, true)));
+ return;
+ }
+
+ case GLSLstd450SmoothStep: {
+ /* t = clamp((x - edge0) / (edge1 - edge0), 0, 1) */
+ nir_ssa_def *t =
+ build_fclamp(nb, nir_fdiv(nb, nir_fsub(nb, src[2], src[0]),
+ nir_fsub(nb, src[1], src[0])),
+ nir_imm_float(nb, 0.0), nir_imm_float(nb, 1.0));
+ /* result = t * t * (3 - 2 * t) */
+ val->ssa->def =
+ nir_fmul(nb, t, nir_fmul(nb, t,
+ nir_fsub(nb, nir_imm_float(nb, 3.0),
+ nir_fmul(nb, nir_imm_float(nb, 2.0), t))));
+ return;
+ }
+
+ case GLSLstd450FaceForward:
+ val->ssa->def =
+ nir_bcsel(nb, nir_flt(nb, nir_fdot(nb, src[2], src[1]),
+ nir_imm_float(nb, 0.0)),
+ src[0], nir_fneg(nb, src[0]));
+ return;
+
+ case GLSLstd450Reflect:
+ /* I - 2 * dot(N, I) * N */
+ val->ssa->def =
+ nir_fsub(nb, src[0], nir_fmul(nb, nir_imm_float(nb, 2.0),
+ nir_fmul(nb, nir_fdot(nb, src[0], src[1]),
+ src[1])));
+ return;
+
+ case GLSLstd450Refract: {
+ nir_ssa_def *I = src[0];
+ nir_ssa_def *N = src[1];
+ nir_ssa_def *eta = src[2];
+ nir_ssa_def *n_dot_i = nir_fdot(nb, N, I);
+ nir_ssa_def *one = nir_imm_float(nb, 1.0);
+ nir_ssa_def *zero = nir_imm_float(nb, 0.0);
+ /* k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) */
+ nir_ssa_def *k =
+ nir_fsub(nb, one, nir_fmul(nb, eta, nir_fmul(nb, eta,
+ nir_fsub(nb, one, nir_fmul(nb, n_dot_i, n_dot_i)))));
+ nir_ssa_def *result =
+ nir_fsub(nb, nir_fmul(nb, eta, I),
+ nir_fmul(nb, nir_fadd(nb, nir_fmul(nb, eta, n_dot_i),
+ nir_fsqrt(nb, k)), N));
+ /* XXX: bcsel, or if statement? */
+ val->ssa->def = nir_bcsel(nb, nir_flt(nb, k, zero), zero, result);
+ return;
+ }
+
+ case GLSLstd450Sinh:
+ /* 0.5 * (e^x - e^(-x)) */
+ val->ssa->def =
+ nir_fmul(nb, nir_imm_float(nb, 0.5f),
+ nir_fsub(nb, build_exp(nb, src[0]),
+ build_exp(nb, nir_fneg(nb, src[0]))));
+ return;
+
+ case GLSLstd450Cosh:
+ /* 0.5 * (e^x + e^(-x)) */
+ val->ssa->def =
+ nir_fmul(nb, nir_imm_float(nb, 0.5f),
+ nir_fadd(nb, build_exp(nb, src[0]),
+ build_exp(nb, nir_fneg(nb, src[0]))));
+ return;
+
+ case GLSLstd450Tanh:
+ /* (0.5 * (e^x - e^(-x))) / (0.5 * (e^x + e^(-x))) */
+ val->ssa->def =
+ nir_fdiv(nb, nir_fmul(nb, nir_imm_float(nb, 0.5f),
+ nir_fsub(nb, build_exp(nb, src[0]),
+ build_exp(nb, nir_fneg(nb, src[0])))),
+ nir_fmul(nb, nir_imm_float(nb, 0.5f),
+ nir_fadd(nb, build_exp(nb, src[0]),
+ build_exp(nb, nir_fneg(nb, src[0])))));
+ return;
+
+ case GLSLstd450Asinh:
+ val->ssa->def = nir_fmul(nb, nir_fsign(nb, src[0]),
+ build_log(nb, nir_fadd(nb, nir_fabs(nb, src[0]),
+ nir_fsqrt(nb, nir_fadd(nb, nir_fmul(nb, src[0], src[0]),
+ nir_imm_float(nb, 1.0f))))));
+ return;
+ case GLSLstd450Acosh:
+ val->ssa->def = build_log(nb, nir_fadd(nb, src[0],
+ nir_fsqrt(nb, nir_fsub(nb, nir_fmul(nb, src[0], src[0]),
+ nir_imm_float(nb, 1.0f)))));
+ return;
+ case GLSLstd450Atanh: {
+ nir_ssa_def *one = nir_imm_float(nb, 1.0);
+ val->ssa->def = nir_fmul(nb, nir_imm_float(nb, 0.5f),
+ build_log(nb, nir_fdiv(nb, nir_fadd(nb, one, src[0]),
+ nir_fsub(nb, one, src[0]))));
+ return;
+ }
+
+ case GLSLstd450Asin:
+ val->ssa->def = build_asin(nb, src[0], 0.086566724, -0.03102955);
+ return;
+
+ case GLSLstd450Acos:
+ val->ssa->def = nir_fsub(nb, nir_imm_float(nb, M_PI_2f),
+ build_asin(nb, src[0], 0.08132463, -0.02363318));
+ return;
+
+ case GLSLstd450Atan:
+ val->ssa->def = build_atan(nb, src[0]);
+ return;
+
+ case GLSLstd450Atan2:
+ val->ssa->def = build_atan2(nb, src[0], src[1]);
+ return;
+
+ case GLSLstd450Frexp: {
+ nir_ssa_def *exponent;
+ val->ssa->def = build_frexp(nb, src[0], &exponent);
+ nir_store_deref_var(nb, vtn_nir_deref(b, w[6]), exponent, 0xf);
+ return;
+ }
+
+ case GLSLstd450FrexpStruct: {
+ assert(glsl_type_is_struct(val->ssa->type));
+ val->ssa->elems[0]->def = build_frexp(nb, src[0],
+ &val->ssa->elems[1]->def);
+ return;
+ }
+
+ default:
+ val->ssa->def =
+ nir_build_alu(&b->nb, vtn_nir_alu_op_for_spirv_glsl_opcode(entrypoint),
+ src[0], src[1], src[2], NULL);
+ return;
+ }
+}
+
+bool
+vtn_handle_glsl450_instruction(struct vtn_builder *b, uint32_t ext_opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch ((enum GLSLstd450)ext_opcode) {
+ case GLSLstd450Determinant: {
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ val->ssa = rzalloc(b, struct vtn_ssa_value);
+ val->ssa->type = vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ val->ssa->def = build_mat_det(b, vtn_ssa_value(b, w[5]));
+ break;
+ }
+
+ case GLSLstd450MatrixInverse: {
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ val->ssa = matrix_inverse(b, vtn_ssa_value(b, w[5]));
+ break;
+ }
+
+ case GLSLstd450InterpolateAtCentroid:
+ case GLSLstd450InterpolateAtSample:
+ case GLSLstd450InterpolateAtOffset:
+ unreachable("Unhandled opcode");
+
+ default:
+ handle_glsl450_alu(b, (enum GLSLstd450)ext_opcode, w, count);
+ }
+
+ return true;
+}
diff --git a/src/compiler/spirv/vtn_private.h b/src/compiler/spirv/vtn_private.h
new file mode 100644
index 00000000000..3840d8c4b65
--- /dev/null
+++ b/src/compiler/spirv/vtn_private.h
@@ -0,0 +1,484 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jason Ekstrand ([email protected])
+ *
+ */
+
+#include "nir/nir.h"
+#include "nir/nir_builder.h"
+#include "nir/nir_array.h"
+#include "nir_spirv.h"
+#include "spirv.h"
+
+struct vtn_builder;
+struct vtn_decoration;
+
+enum vtn_value_type {
+ vtn_value_type_invalid = 0,
+ vtn_value_type_undef,
+ vtn_value_type_string,
+ vtn_value_type_decoration_group,
+ vtn_value_type_type,
+ vtn_value_type_constant,
+ vtn_value_type_access_chain,
+ vtn_value_type_function,
+ vtn_value_type_block,
+ vtn_value_type_ssa,
+ vtn_value_type_extension,
+ vtn_value_type_image_pointer,
+ vtn_value_type_sampled_image,
+};
+
+enum vtn_branch_type {
+ vtn_branch_type_none,
+ vtn_branch_type_switch_break,
+ vtn_branch_type_switch_fallthrough,
+ vtn_branch_type_loop_break,
+ vtn_branch_type_loop_continue,
+ vtn_branch_type_discard,
+ vtn_branch_type_return,
+};
+
+enum vtn_cf_node_type {
+ vtn_cf_node_type_block,
+ vtn_cf_node_type_if,
+ vtn_cf_node_type_loop,
+ vtn_cf_node_type_switch,
+};
+
+struct vtn_cf_node {
+ struct list_head link;
+ enum vtn_cf_node_type type;
+};
+
+struct vtn_loop {
+ struct vtn_cf_node node;
+
+ /* The main body of the loop */
+ struct list_head body;
+
+ /* The "continue" part of the loop. This gets executed after the body
+ * and is where you go when you hit a continue.
+ */
+ struct list_head cont_body;
+
+ SpvLoopControlMask control;
+};
+
+struct vtn_if {
+ struct vtn_cf_node node;
+
+ uint32_t condition;
+
+ enum vtn_branch_type then_type;
+ struct list_head then_body;
+
+ enum vtn_branch_type else_type;
+ struct list_head else_body;
+
+ SpvSelectionControlMask control;
+};
+
+struct vtn_case {
+ struct list_head link;
+
+ struct list_head body;
+
+ /* The block that starts this case */
+ struct vtn_block *start_block;
+
+ /* The fallthrough case, if any */
+ struct vtn_case *fallthrough;
+
+ /* The uint32_t values that map to this case */
+ nir_array values;
+
+ /* True if this is the default case */
+ bool is_default;
+
+ /* Initialized to false; used when sorting the list of cases */
+ bool visited;
+};
+
+struct vtn_switch {
+ struct vtn_cf_node node;
+
+ uint32_t selector;
+
+ struct list_head cases;
+};
+
+struct vtn_block {
+ struct vtn_cf_node node;
+
+ /** A pointer to the label instruction */
+ const uint32_t *label;
+
+ /** A pointer to the merge instruction (or NULL if non exists) */
+ const uint32_t *merge;
+
+ /** A pointer to the branch instruction that ends this block */
+ const uint32_t *branch;
+
+ enum vtn_branch_type branch_type;
+
+ /** Points to the loop that this block starts (if it starts a loop) */
+ struct vtn_loop *loop;
+
+ /** Points to the switch case started by this block (if any) */
+ struct vtn_case *switch_case;
+
+ /** The last block in this SPIR-V block. */
+ nir_block *end_block;
+};
+
+struct vtn_function {
+ struct exec_node node;
+
+ nir_function_impl *impl;
+ struct vtn_block *start_block;
+
+ struct list_head body;
+
+ const uint32_t *end;
+
+ SpvFunctionControlMask control;
+};
+
+typedef bool (*vtn_instruction_handler)(struct vtn_builder *, uint32_t,
+ const uint32_t *, unsigned);
+
+void vtn_build_cfg(struct vtn_builder *b, const uint32_t *words,
+ const uint32_t *end);
+void vtn_function_emit(struct vtn_builder *b, struct vtn_function *func,
+ vtn_instruction_handler instruction_handler);
+
+const uint32_t *
+vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
+ const uint32_t *end, vtn_instruction_handler handler);
+
+struct vtn_ssa_value {
+ union {
+ nir_ssa_def *def;
+ struct vtn_ssa_value **elems;
+ };
+
+ /* For matrices, if this is non-NULL, then this value is actually the
+ * transpose of some other value. The value that `transposed` points to
+ * always dominates this value.
+ */
+ struct vtn_ssa_value *transposed;
+
+ const struct glsl_type *type;
+};
+
+struct vtn_type {
+ const struct glsl_type *type;
+
+ /* The value that declares this type. Used for finding decorations */
+ struct vtn_value *val;
+
+ /* for matrices, whether the matrix is stored row-major */
+ bool row_major;
+
+ /* for structs, the offset of each member */
+ unsigned *offsets;
+
+ /* for structs, whether it was decorated as a "non-SSBO-like" block */
+ bool block;
+
+ /* for structs, whether it was decorated as an "SSBO-like" block */
+ bool buffer_block;
+
+ /* for structs with block == true, whether this is a builtin block (i.e. a
+ * block that contains only builtins).
+ */
+ bool builtin_block;
+
+ /* Image format for image_load_store type images */
+ unsigned image_format;
+
+ /* Access qualifier for storage images */
+ SpvAccessQualifier access_qualifier;
+
+ /* for arrays and matrices, the array stride */
+ unsigned stride;
+
+ /* for arrays, the vtn_type for the elements of the array */
+ struct vtn_type *array_element;
+
+ /* for structures, the vtn_type for each member */
+ struct vtn_type **members;
+
+ /* Whether this type, or a parent type, has been decorated as a builtin */
+ bool is_builtin;
+
+ SpvBuiltIn builtin;
+};
+
+struct vtn_variable;
+
+enum vtn_access_mode {
+ vtn_access_mode_id,
+ vtn_access_mode_literal,
+};
+
+struct vtn_access_link {
+ enum vtn_access_mode mode;
+ uint32_t id;
+};
+
+struct vtn_access_chain {
+ struct vtn_variable *var;
+
+ uint32_t length;
+
+ /* Struct elements and array offsets */
+ struct vtn_access_link link[0];
+};
+
+enum vtn_variable_mode {
+ vtn_variable_mode_local,
+ vtn_variable_mode_global,
+ vtn_variable_mode_param,
+ vtn_variable_mode_ubo,
+ vtn_variable_mode_ssbo,
+ vtn_variable_mode_push_constant,
+ vtn_variable_mode_image,
+ vtn_variable_mode_sampler,
+ vtn_variable_mode_workgroup,
+ vtn_variable_mode_input,
+ vtn_variable_mode_output,
+};
+
+struct vtn_variable {
+ enum vtn_variable_mode mode;
+
+ struct vtn_type *type;
+
+ unsigned descriptor_set;
+ unsigned binding;
+
+ nir_variable *var;
+ nir_variable **members;
+
+ struct vtn_access_chain chain;
+};
+
+struct vtn_image_pointer {
+ struct vtn_access_chain *image;
+ nir_ssa_def *coord;
+ nir_ssa_def *sample;
+};
+
+struct vtn_sampled_image {
+ struct vtn_access_chain *image; /* Image or array of images */
+ struct vtn_access_chain *sampler; /* Sampler */
+};
+
+struct vtn_value {
+ enum vtn_value_type value_type;
+ const char *name;
+ struct vtn_decoration *decoration;
+ union {
+ void *ptr;
+ char *str;
+ struct vtn_type *type;
+ struct {
+ nir_constant *constant;
+ const struct glsl_type *const_type;
+ };
+ struct vtn_access_chain *access_chain;
+ struct vtn_image_pointer *image;
+ struct vtn_sampled_image *sampled_image;
+ struct vtn_function *func;
+ struct vtn_block *block;
+ struct vtn_ssa_value *ssa;
+ vtn_instruction_handler ext_handler;
+ };
+};
+
+#define VTN_DEC_DECORATION -1
+#define VTN_DEC_EXECUTION_MODE -2
+#define VTN_DEC_STRUCT_MEMBER0 0
+
+struct vtn_decoration {
+ struct vtn_decoration *next;
+
+ /* Specifies how to apply this decoration. Negative values represent a
+ * decoration or execution mode. (See the VTN_DEC_ #defines above.)
+ * Non-negative values specify that it applies to a structure member.
+ */
+ int scope;
+
+ const uint32_t *literals;
+ struct vtn_value *group;
+
+ union {
+ SpvDecoration decoration;
+ SpvExecutionMode exec_mode;
+ };
+};
+
+struct vtn_builder {
+ nir_builder nb;
+
+ nir_shader *shader;
+ nir_function_impl *impl;
+ struct vtn_block *block;
+
+ /* Current file, line, and column. Useful for debugging. Set
+ * automatically by vtn_foreach_instruction.
+ */
+ char *file;
+ int line, col;
+
+ /*
+ * In SPIR-V, constants are global, whereas in NIR, the load_const
+ * instruction we use is per-function. So while we parse each function, we
+ * keep a hash table of constants we've resolved to nir_ssa_value's so
+ * far, and we lazily resolve them when we see them used in a function.
+ */
+ struct hash_table *const_table;
+
+ /*
+ * Map from phi instructions (pointer to the start of the instruction)
+ * to the variable corresponding to it.
+ */
+ struct hash_table *phi_table;
+
+ unsigned num_specializations;
+ struct nir_spirv_specialization *specializations;
+
+ unsigned value_id_bound;
+ struct vtn_value *values;
+
+ gl_shader_stage entry_point_stage;
+ const char *entry_point_name;
+ struct vtn_value *entry_point;
+ bool origin_upper_left;
+
+ struct vtn_function *func;
+ struct exec_list functions;
+
+ /* Current function parameter index */
+ unsigned func_param_idx;
+
+ bool has_loop_continue;
+};
+
+static inline struct vtn_value *
+vtn_push_value(struct vtn_builder *b, uint32_t value_id,
+ enum vtn_value_type value_type)
+{
+ assert(value_id < b->value_id_bound);
+ assert(b->values[value_id].value_type == vtn_value_type_invalid);
+
+ b->values[value_id].value_type = value_type;
+
+ return &b->values[value_id];
+}
+
+static inline struct vtn_value *
+vtn_untyped_value(struct vtn_builder *b, uint32_t value_id)
+{
+ assert(value_id < b->value_id_bound);
+ return &b->values[value_id];
+}
+
+static inline struct vtn_value *
+vtn_value(struct vtn_builder *b, uint32_t value_id,
+ enum vtn_value_type value_type)
+{
+ struct vtn_value *val = vtn_untyped_value(b, value_id);
+ assert(val->value_type == value_type);
+ return val;
+}
+
+struct vtn_ssa_value *vtn_ssa_value(struct vtn_builder *b, uint32_t value_id);
+
+struct vtn_ssa_value *vtn_create_ssa_value(struct vtn_builder *b,
+ const struct glsl_type *type);
+
+struct vtn_ssa_value *vtn_ssa_transpose(struct vtn_builder *b,
+ struct vtn_ssa_value *src);
+
+nir_ssa_def *vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src,
+ unsigned index);
+nir_ssa_def *vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src,
+ nir_ssa_def *index);
+nir_ssa_def *vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src,
+ nir_ssa_def *insert, unsigned index);
+nir_ssa_def *vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src,
+ nir_ssa_def *insert, nir_ssa_def *index);
+
+nir_deref_var *vtn_nir_deref(struct vtn_builder *b, uint32_t id);
+
+nir_deref_var *vtn_access_chain_to_deref(struct vtn_builder *b,
+ struct vtn_access_chain *chain);
+nir_ssa_def *
+vtn_access_chain_to_offset(struct vtn_builder *b,
+ struct vtn_access_chain *chain,
+ nir_ssa_def **index_out, struct vtn_type **type_out,
+ unsigned *end_idx_out, bool stop_at_matrix);
+
+struct vtn_ssa_value *vtn_local_load(struct vtn_builder *b, nir_deref_var *src);
+
+void vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
+ nir_deref_var *dest);
+
+struct vtn_ssa_value *
+vtn_variable_load(struct vtn_builder *b, struct vtn_access_chain *src);
+
+void vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
+ struct vtn_access_chain *dest);
+
+void vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count);
+
+
+typedef void (*vtn_decoration_foreach_cb)(struct vtn_builder *,
+ struct vtn_value *,
+ int member,
+ const struct vtn_decoration *,
+ void *);
+
+void vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
+ vtn_decoration_foreach_cb cb, void *data);
+
+typedef void (*vtn_execution_mode_foreach_cb)(struct vtn_builder *,
+ struct vtn_value *,
+ const struct vtn_decoration *,
+ void *);
+
+void vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value,
+ vtn_execution_mode_foreach_cb cb, void *data);
+
+nir_op vtn_nir_alu_op_for_spirv_opcode(SpvOp opcode, bool *swap);
+
+void vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count);
+
+bool vtn_handle_glsl450_instruction(struct vtn_builder *b, uint32_t ext_opcode,
+ const uint32_t *words, unsigned count);
diff --git a/src/compiler/spirv/vtn_variables.c b/src/compiler/spirv/vtn_variables.c
new file mode 100644
index 00000000000..3cbac1e5da8
--- /dev/null
+++ b/src/compiler/spirv/vtn_variables.c
@@ -0,0 +1,1415 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jason Ekstrand ([email protected])
+ *
+ */
+
+#include "vtn_private.h"
+
+static struct vtn_access_chain *
+vtn_access_chain_extend(struct vtn_builder *b, struct vtn_access_chain *old,
+ unsigned new_ids)
+{
+ struct vtn_access_chain *chain;
+
+ unsigned new_len = old->length + new_ids;
+ chain = ralloc_size(b, sizeof(*chain) + new_len * sizeof(chain->link[0]));
+
+ chain->var = old->var;
+ chain->length = new_len;
+
+ for (unsigned i = 0; i < old->length; i++)
+ chain->link[i] = old->link[i];
+
+ return chain;
+}
+
+static nir_ssa_def *
+vtn_access_link_as_ssa(struct vtn_builder *b, struct vtn_access_link link,
+ unsigned stride)
+{
+ assert(stride > 0);
+ if (link.mode == vtn_access_mode_literal) {
+ return nir_imm_int(&b->nb, link.id * stride);
+ } else if (stride == 1) {
+ return vtn_ssa_value(b, link.id)->def;
+ } else {
+ return nir_imul(&b->nb, vtn_ssa_value(b, link.id)->def,
+ nir_imm_int(&b->nb, stride));
+ }
+}
+
+static struct vtn_type *
+vtn_access_chain_tail_type(struct vtn_builder *b,
+ struct vtn_access_chain *chain)
+{
+ struct vtn_type *type = chain->var->type;
+ for (unsigned i = 0; i < chain->length; i++) {
+ if (glsl_type_is_struct(type->type)) {
+ assert(chain->link[i].mode == vtn_access_mode_literal);
+ type = type->members[chain->link[i].id];
+ } else {
+ type = type->array_element;
+ }
+ }
+ return type;
+}
+
+/* Crawls a chain of array derefs and rewrites the types so that the
+ * lengths stay the same but the terminal type is the one given by
+ * tail_type. This is useful for split structures.
+ */
+static void
+rewrite_deref_types(nir_deref *deref, const struct glsl_type *type)
+{
+ deref->type = type;
+ if (deref->child) {
+ assert(deref->child->deref_type == nir_deref_type_array);
+ assert(glsl_type_is_array(deref->type));
+ rewrite_deref_types(deref->child, glsl_get_array_element(type));
+ }
+}
+
+nir_deref_var *
+vtn_access_chain_to_deref(struct vtn_builder *b, struct vtn_access_chain *chain)
+{
+ nir_deref_var *deref_var;
+ if (chain->var->var) {
+ deref_var = nir_deref_var_create(b, chain->var->var);
+ } else {
+ assert(chain->var->members);
+ /* Create the deref_var manually. It will get filled out later. */
+ deref_var = rzalloc(b, nir_deref_var);
+ deref_var->deref.deref_type = nir_deref_type_var;
+ }
+
+ struct vtn_type *deref_type = chain->var->type;
+ nir_deref *tail = &deref_var->deref;
+ nir_variable **members = chain->var->members;
+
+ for (unsigned i = 0; i < chain->length; i++) {
+ enum glsl_base_type base_type = glsl_get_base_type(deref_type->type);
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_ARRAY: {
+ deref_type = deref_type->array_element;
+
+ nir_deref_array *deref_arr = nir_deref_array_create(b);
+ deref_arr->deref.type = deref_type->type;
+
+ if (chain->link[i].mode == vtn_access_mode_literal) {
+ deref_arr->deref_array_type = nir_deref_array_type_direct;
+ deref_arr->base_offset = chain->link[i].id;
+ } else {
+ assert(chain->link[i].mode == vtn_access_mode_id);
+ deref_arr->deref_array_type = nir_deref_array_type_indirect;
+ deref_arr->base_offset = 0;
+ deref_arr->indirect =
+ nir_src_for_ssa(vtn_ssa_value(b, chain->link[i].id)->def);
+ }
+ tail->child = &deref_arr->deref;
+ tail = tail->child;
+ break;
+ }
+
+ case GLSL_TYPE_STRUCT: {
+ assert(chain->link[i].mode == vtn_access_mode_literal);
+ unsigned idx = chain->link[i].id;
+ deref_type = deref_type->members[idx];
+ if (members) {
+ /* This is a pre-split structure. */
+ deref_var->var = members[idx];
+ rewrite_deref_types(&deref_var->deref, members[idx]->type);
+ assert(tail->type == deref_type->type);
+ members = NULL;
+ } else {
+ nir_deref_struct *deref_struct = nir_deref_struct_create(b, idx);
+ deref_struct->deref.type = deref_type->type;
+ tail->child = &deref_struct->deref;
+ tail = tail->child;
+ }
+ break;
+ }
+ default:
+ unreachable("Invalid type for deref");
+ }
+ }
+
+ assert(members == NULL);
+ return deref_var;
+}
+
+static void
+_vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_var *deref,
+ nir_deref *tail, struct vtn_ssa_value *inout)
+{
+ /* The deref tail may contain a deref to select a component of a vector (in
+ * other words, it might not be an actual tail) so we have to save it away
+ * here since we overwrite it later.
+ */
+ nir_deref *old_child = tail->child;
+
+ if (glsl_type_is_vector_or_scalar(tail->type)) {
+ /* Terminate the deref chain in case there is one more link to pick
+ * off a component of the vector.
+ */
+ tail->child = NULL;
+
+ nir_intrinsic_op op = load ? nir_intrinsic_load_var :
+ nir_intrinsic_store_var;
+
+ nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
+ intrin->variables[0] =
+ nir_deref_as_var(nir_copy_deref(intrin, &deref->deref));
+ intrin->num_components = glsl_get_vector_elements(tail->type);
+
+ if (load) {
+ nir_ssa_dest_init(&intrin->instr, &intrin->dest,
+ intrin->num_components,
+ glsl_get_bit_size(glsl_get_base_type(tail->type)),
+ NULL);
+ inout->def = &intrin->dest.ssa;
+ } else {
+ nir_intrinsic_set_write_mask(intrin, (1 << intrin->num_components) - 1);
+ intrin->src[0] = nir_src_for_ssa(inout->def);
+ }
+
+ nir_builder_instr_insert(&b->nb, &intrin->instr);
+ } else if (glsl_get_base_type(tail->type) == GLSL_TYPE_ARRAY ||
+ glsl_type_is_matrix(tail->type)) {
+ unsigned elems = glsl_get_length(tail->type);
+ nir_deref_array *deref_arr = nir_deref_array_create(b);
+ deref_arr->deref_array_type = nir_deref_array_type_direct;
+ deref_arr->deref.type = glsl_get_array_element(tail->type);
+ tail->child = &deref_arr->deref;
+ for (unsigned i = 0; i < elems; i++) {
+ deref_arr->base_offset = i;
+ _vtn_local_load_store(b, load, deref, tail->child, inout->elems[i]);
+ }
+ } else {
+ assert(glsl_get_base_type(tail->type) == GLSL_TYPE_STRUCT);
+ unsigned elems = glsl_get_length(tail->type);
+ nir_deref_struct *deref_struct = nir_deref_struct_create(b, 0);
+ tail->child = &deref_struct->deref;
+ for (unsigned i = 0; i < elems; i++) {
+ deref_struct->index = i;
+ deref_struct->deref.type = glsl_get_struct_field(tail->type, i);
+ _vtn_local_load_store(b, load, deref, tail->child, inout->elems[i]);
+ }
+ }
+
+ tail->child = old_child;
+}
+
+nir_deref_var *
+vtn_nir_deref(struct vtn_builder *b, uint32_t id)
+{
+ struct vtn_access_chain *chain =
+ vtn_value(b, id, vtn_value_type_access_chain)->access_chain;
+
+ return vtn_access_chain_to_deref(b, chain);
+}
+
+/*
+ * Gets the NIR-level deref tail, which may have as a child an array deref
+ * selecting which component due to OpAccessChain supporting per-component
+ * indexing in SPIR-V.
+ */
+static nir_deref *
+get_deref_tail(nir_deref_var *deref)
+{
+ nir_deref *cur = &deref->deref;
+ while (!glsl_type_is_vector_or_scalar(cur->type) && cur->child)
+ cur = cur->child;
+
+ return cur;
+}
+
+struct vtn_ssa_value *
+vtn_local_load(struct vtn_builder *b, nir_deref_var *src)
+{
+ nir_deref *src_tail = get_deref_tail(src);
+ struct vtn_ssa_value *val = vtn_create_ssa_value(b, src_tail->type);
+ _vtn_local_load_store(b, true, src, src_tail, val);
+
+ if (src_tail->child) {
+ nir_deref_array *vec_deref = nir_deref_as_array(src_tail->child);
+ assert(vec_deref->deref.child == NULL);
+ val->type = vec_deref->deref.type;
+ if (vec_deref->deref_array_type == nir_deref_array_type_direct)
+ val->def = vtn_vector_extract(b, val->def, vec_deref->base_offset);
+ else
+ val->def = vtn_vector_extract_dynamic(b, val->def,
+ vec_deref->indirect.ssa);
+ }
+
+ return val;
+}
+
+void
+vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
+ nir_deref_var *dest)
+{
+ nir_deref *dest_tail = get_deref_tail(dest);
+
+ if (dest_tail->child) {
+ struct vtn_ssa_value *val = vtn_create_ssa_value(b, dest_tail->type);
+ _vtn_local_load_store(b, true, dest, dest_tail, val);
+ nir_deref_array *deref = nir_deref_as_array(dest_tail->child);
+ assert(deref->deref.child == NULL);
+ if (deref->deref_array_type == nir_deref_array_type_direct)
+ val->def = vtn_vector_insert(b, val->def, src->def,
+ deref->base_offset);
+ else
+ val->def = vtn_vector_insert_dynamic(b, val->def, src->def,
+ deref->indirect.ssa);
+ _vtn_local_load_store(b, false, dest, dest_tail, val);
+ } else {
+ _vtn_local_load_store(b, false, dest, dest_tail, src);
+ }
+}
+
+static nir_ssa_def *
+get_vulkan_resource_index(struct vtn_builder *b, struct vtn_access_chain *chain,
+ struct vtn_type **type, unsigned *chain_idx)
+{
+ /* Push constants have no explicit binding */
+ if (chain->var->mode == vtn_variable_mode_push_constant) {
+ *chain_idx = 0;
+ *type = chain->var->type;
+ return NULL;
+ }
+
+ nir_ssa_def *array_index;
+ if (glsl_type_is_array(chain->var->type->type)) {
+ assert(chain->length > 0);
+ array_index = vtn_access_link_as_ssa(b, chain->link[0], 1);
+ *chain_idx = 1;
+ *type = chain->var->type->array_element;
+ } else {
+ array_index = nir_imm_int(&b->nb, 0);
+ *chain_idx = 0;
+ *type = chain->var->type;
+ }
+
+ nir_intrinsic_instr *instr =
+ nir_intrinsic_instr_create(b->nb.shader,
+ nir_intrinsic_vulkan_resource_index);
+ instr->src[0] = nir_src_for_ssa(array_index);
+ nir_intrinsic_set_desc_set(instr, chain->var->descriptor_set);
+ nir_intrinsic_set_binding(instr, chain->var->binding);
+
+ nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
+ nir_builder_instr_insert(&b->nb, &instr->instr);
+
+ return &instr->dest.ssa;
+}
+
+nir_ssa_def *
+vtn_access_chain_to_offset(struct vtn_builder *b,
+ struct vtn_access_chain *chain,
+ nir_ssa_def **index_out, struct vtn_type **type_out,
+ unsigned *end_idx_out, bool stop_at_matrix)
+{
+ unsigned idx = 0;
+ struct vtn_type *type;
+ *index_out = get_vulkan_resource_index(b, chain, &type, &idx);
+
+ nir_ssa_def *offset = nir_imm_int(&b->nb, 0);
+ for (; idx < chain->length; idx++) {
+ enum glsl_base_type base_type = glsl_get_base_type(type->type);
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_BOOL:
+ /* Some users may not want matrix or vector derefs */
+ if (stop_at_matrix)
+ goto end;
+ /* Fall through */
+
+ case GLSL_TYPE_ARRAY:
+ offset = nir_iadd(&b->nb, offset,
+ vtn_access_link_as_ssa(b, chain->link[idx],
+ type->stride));
+
+ type = type->array_element;
+ break;
+
+ case GLSL_TYPE_STRUCT: {
+ assert(chain->link[idx].mode == vtn_access_mode_literal);
+ unsigned member = chain->link[idx].id;
+ offset = nir_iadd(&b->nb, offset,
+ nir_imm_int(&b->nb, type->offsets[member]));
+ type = type->members[member];
+ break;
+ }
+
+ default:
+ unreachable("Invalid type for deref");
+ }
+ }
+
+end:
+ *type_out = type;
+ if (end_idx_out)
+ *end_idx_out = idx;
+
+ return offset;
+}
+
+static void
+_vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load,
+ nir_ssa_def *index, nir_ssa_def *offset,
+ struct vtn_ssa_value **inout, const struct glsl_type *type)
+{
+ nir_intrinsic_instr *instr = nir_intrinsic_instr_create(b->nb.shader, op);
+ instr->num_components = glsl_get_vector_elements(type);
+
+ int src = 0;
+ if (!load) {
+ nir_intrinsic_set_write_mask(instr, (1 << instr->num_components) - 1);
+ instr->src[src++] = nir_src_for_ssa((*inout)->def);
+ }
+
+ /* We set the base and size for push constant load to the entire push
+ * constant block for now.
+ */
+ if (op == nir_intrinsic_load_push_constant) {
+ nir_intrinsic_set_base(instr, 0);
+ nir_intrinsic_set_range(instr, 128);
+ }
+
+ if (index)
+ instr->src[src++] = nir_src_for_ssa(index);
+
+ instr->src[src++] = nir_src_for_ssa(offset);
+
+ if (load) {
+ nir_ssa_dest_init(&instr->instr, &instr->dest,
+ instr->num_components,
+ glsl_get_bit_size(glsl_get_base_type(type)), NULL);
+ (*inout)->def = &instr->dest.ssa;
+ }
+
+ nir_builder_instr_insert(&b->nb, &instr->instr);
+
+ if (load && glsl_get_base_type(type) == GLSL_TYPE_BOOL)
+ (*inout)->def = nir_ine(&b->nb, (*inout)->def, nir_imm_int(&b->nb, 0));
+}
+
+static void
+_vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
+ nir_ssa_def *index, nir_ssa_def *offset,
+ struct vtn_access_chain *chain, unsigned chain_idx,
+ struct vtn_type *type, struct vtn_ssa_value **inout)
+{
+ if (chain && chain_idx >= chain->length)
+ chain = NULL;
+
+ if (load && chain == NULL && *inout == NULL)
+ *inout = vtn_create_ssa_value(b, type->type);
+
+ enum glsl_base_type base_type = glsl_get_base_type(type->type);
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_BOOL:
+ /* This is where things get interesting. At this point, we've hit
+ * a vector, a scalar, or a matrix.
+ */
+ if (glsl_type_is_matrix(type->type)) {
+ if (chain == NULL) {
+ /* Loading the whole matrix */
+ struct vtn_ssa_value *transpose;
+ unsigned num_ops, vec_width;
+ if (type->row_major) {
+ num_ops = glsl_get_vector_elements(type->type);
+ vec_width = glsl_get_matrix_columns(type->type);
+ if (load) {
+ const struct glsl_type *transpose_type =
+ glsl_matrix_type(base_type, vec_width, num_ops);
+ *inout = vtn_create_ssa_value(b, transpose_type);
+ } else {
+ transpose = vtn_ssa_transpose(b, *inout);
+ inout = &transpose;
+ }
+ } else {
+ num_ops = glsl_get_matrix_columns(type->type);
+ vec_width = glsl_get_vector_elements(type->type);
+ }
+
+ for (unsigned i = 0; i < num_ops; i++) {
+ nir_ssa_def *elem_offset =
+ nir_iadd(&b->nb, offset,
+ nir_imm_int(&b->nb, i * type->stride));
+ _vtn_load_store_tail(b, op, load, index, elem_offset,
+ &(*inout)->elems[i],
+ glsl_vector_type(base_type, vec_width));
+ }
+
+ if (load && type->row_major)
+ *inout = vtn_ssa_transpose(b, *inout);
+ } else if (type->row_major) {
+ /* Row-major but with an access chiain. */
+ nir_ssa_def *col_offset =
+ vtn_access_link_as_ssa(b, chain->link[chain_idx],
+ type->array_element->stride);
+ offset = nir_iadd(&b->nb, offset, col_offset);
+
+ if (chain_idx + 1 < chain->length) {
+ /* Picking off a single element */
+ nir_ssa_def *row_offset =
+ vtn_access_link_as_ssa(b, chain->link[chain_idx + 1],
+ type->stride);
+ offset = nir_iadd(&b->nb, offset, row_offset);
+ if (load)
+ *inout = vtn_create_ssa_value(b, glsl_scalar_type(base_type));
+ _vtn_load_store_tail(b, op, load, index, offset, inout,
+ glsl_scalar_type(base_type));
+ } else {
+ /* Grabbing a column; picking one element off each row */
+ unsigned num_comps = glsl_get_vector_elements(type->type);
+ const struct glsl_type *column_type =
+ glsl_get_column_type(type->type);
+
+ nir_ssa_def *comps[4];
+ for (unsigned i = 0; i < num_comps; i++) {
+ nir_ssa_def *elem_offset =
+ nir_iadd(&b->nb, offset,
+ nir_imm_int(&b->nb, i * type->stride));
+
+ struct vtn_ssa_value *comp, temp_val;
+ if (!load) {
+ temp_val.def = nir_channel(&b->nb, (*inout)->def, i);
+ temp_val.type = glsl_scalar_type(base_type);
+ }
+ comp = &temp_val;
+ _vtn_load_store_tail(b, op, load, index, elem_offset,
+ &comp, glsl_scalar_type(base_type));
+ comps[i] = comp->def;
+ }
+
+ if (load) {
+ if (*inout == NULL)
+ *inout = vtn_create_ssa_value(b, column_type);
+
+ (*inout)->def = nir_vec(&b->nb, comps, num_comps);
+ }
+ }
+ } else {
+ /* Column-major with a deref. Fall through to array case. */
+ nir_ssa_def *col_offset =
+ vtn_access_link_as_ssa(b, chain->link[chain_idx], type->stride);
+ offset = nir_iadd(&b->nb, offset, col_offset);
+
+ _vtn_block_load_store(b, op, load, index, offset,
+ chain, chain_idx + 1,
+ type->array_element, inout);
+ }
+ } else if (chain == NULL) {
+ /* Single whole vector */
+ assert(glsl_type_is_vector_or_scalar(type->type));
+ _vtn_load_store_tail(b, op, load, index, offset, inout, type->type);
+ } else {
+ /* Single component of a vector. Fall through to array case. */
+ nir_ssa_def *elem_offset =
+ vtn_access_link_as_ssa(b, chain->link[chain_idx], type->stride);
+ offset = nir_iadd(&b->nb, offset, elem_offset);
+
+ _vtn_block_load_store(b, op, load, index, offset, NULL, 0,
+ type->array_element, inout);
+ }
+ return;
+
+ case GLSL_TYPE_ARRAY: {
+ unsigned elems = glsl_get_length(type->type);
+ for (unsigned i = 0; i < elems; i++) {
+ nir_ssa_def *elem_off =
+ nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * type->stride));
+ _vtn_block_load_store(b, op, load, index, elem_off, NULL, 0,
+ type->array_element, &(*inout)->elems[i]);
+ }
+ return;
+ }
+
+ case GLSL_TYPE_STRUCT: {
+ unsigned elems = glsl_get_length(type->type);
+ for (unsigned i = 0; i < elems; i++) {
+ nir_ssa_def *elem_off =
+ nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, type->offsets[i]));
+ _vtn_block_load_store(b, op, load, index, elem_off, NULL, 0,
+ type->members[i], &(*inout)->elems[i]);
+ }
+ return;
+ }
+
+ default:
+ unreachable("Invalid block member type");
+ }
+}
+
+static struct vtn_ssa_value *
+vtn_block_load(struct vtn_builder *b, struct vtn_access_chain *src)
+{
+ nir_intrinsic_op op;
+ switch (src->var->mode) {
+ case vtn_variable_mode_ubo:
+ op = nir_intrinsic_load_ubo;
+ break;
+ case vtn_variable_mode_ssbo:
+ op = nir_intrinsic_load_ssbo;
+ break;
+ case vtn_variable_mode_push_constant:
+ op = nir_intrinsic_load_push_constant;
+ break;
+ default:
+ assert(!"Invalid block variable mode");
+ }
+
+ nir_ssa_def *offset, *index = NULL;
+ struct vtn_type *type;
+ unsigned chain_idx;
+ offset = vtn_access_chain_to_offset(b, src, &index, &type, &chain_idx, true);
+
+ struct vtn_ssa_value *value = NULL;
+ _vtn_block_load_store(b, op, true, index, offset,
+ src, chain_idx, type, &value);
+ return value;
+}
+
+static void
+vtn_block_store(struct vtn_builder *b, struct vtn_ssa_value *src,
+ struct vtn_access_chain *dst)
+{
+ nir_ssa_def *offset, *index = NULL;
+ struct vtn_type *type;
+ unsigned chain_idx;
+ offset = vtn_access_chain_to_offset(b, dst, &index, &type, &chain_idx, true);
+
+ _vtn_block_load_store(b, nir_intrinsic_store_ssbo, false, index, offset,
+ dst, chain_idx, type, &src);
+}
+
+static bool
+vtn_variable_is_external_block(struct vtn_variable *var)
+{
+ return var->mode == vtn_variable_mode_ssbo ||
+ var->mode == vtn_variable_mode_ubo ||
+ var->mode == vtn_variable_mode_push_constant;
+}
+
+static void
+_vtn_variable_load_store(struct vtn_builder *b, bool load,
+ struct vtn_access_chain *chain,
+ struct vtn_type *tail_type,
+ struct vtn_ssa_value **inout)
+{
+ enum glsl_base_type base_type = glsl_get_base_type(tail_type->type);
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_BOOL:
+ /* At this point, we have a scalar, vector, or matrix so we know that
+ * there cannot be any structure splitting still in the way. By
+ * stopping at the matrix level rather than the vector level, we
+ * ensure that matrices get loaded in the optimal way even if they
+ * are storred row-major in a UBO.
+ */
+ if (load) {
+ *inout = vtn_local_load(b, vtn_access_chain_to_deref(b, chain));
+ } else {
+ vtn_local_store(b, *inout, vtn_access_chain_to_deref(b, chain));
+ }
+ return;
+
+ case GLSL_TYPE_ARRAY:
+ case GLSL_TYPE_STRUCT: {
+ struct vtn_access_chain *new_chain =
+ vtn_access_chain_extend(b, chain, 1);
+ new_chain->link[chain->length].mode = vtn_access_mode_literal;
+ unsigned elems = glsl_get_length(tail_type->type);
+ if (load) {
+ assert(*inout == NULL);
+ *inout = rzalloc(b, struct vtn_ssa_value);
+ (*inout)->type = tail_type->type;
+ (*inout)->elems = rzalloc_array(b, struct vtn_ssa_value *, elems);
+ }
+ for (unsigned i = 0; i < elems; i++) {
+ new_chain->link[chain->length].id = i;
+ struct vtn_type *elem_type = base_type == GLSL_TYPE_ARRAY ?
+ tail_type->array_element : tail_type->members[i];
+ _vtn_variable_load_store(b, load, new_chain, elem_type,
+ &(*inout)->elems[i]);
+ }
+ return;
+ }
+
+ default:
+ unreachable("Invalid access chain type");
+ }
+}
+
+struct vtn_ssa_value *
+vtn_variable_load(struct vtn_builder *b, struct vtn_access_chain *src)
+{
+ if (vtn_variable_is_external_block(src->var)) {
+ return vtn_block_load(b, src);
+ } else {
+ struct vtn_type *tail_type = vtn_access_chain_tail_type(b, src);
+ struct vtn_ssa_value *val = NULL;
+ _vtn_variable_load_store(b, true, src, tail_type, &val);
+ return val;
+ }
+}
+
+void
+vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
+ struct vtn_access_chain *dest)
+{
+ if (vtn_variable_is_external_block(dest->var)) {
+ assert(dest->var->mode == vtn_variable_mode_ssbo);
+ vtn_block_store(b, src, dest);
+ } else {
+ struct vtn_type *tail_type = vtn_access_chain_tail_type(b, dest);
+ _vtn_variable_load_store(b, false, dest, tail_type, &src);
+ }
+}
+
+static void
+_vtn_variable_copy(struct vtn_builder *b, struct vtn_access_chain *dest,
+ struct vtn_access_chain *src, struct vtn_type *tail_type)
+{
+ enum glsl_base_type base_type = glsl_get_base_type(tail_type->type);
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_BOOL:
+ /* At this point, we have a scalar, vector, or matrix so we know that
+ * there cannot be any structure splitting still in the way. By
+ * stopping at the matrix level rather than the vector level, we
+ * ensure that matrices get loaded in the optimal way even if they
+ * are storred row-major in a UBO.
+ */
+ vtn_variable_store(b, vtn_variable_load(b, src), dest);
+ return;
+
+ case GLSL_TYPE_ARRAY:
+ case GLSL_TYPE_STRUCT: {
+ struct vtn_access_chain *new_src, *new_dest;
+ new_src = vtn_access_chain_extend(b, src, 1);
+ new_dest = vtn_access_chain_extend(b, dest, 1);
+ new_src->link[src->length].mode = vtn_access_mode_literal;
+ new_dest->link[dest->length].mode = vtn_access_mode_literal;
+ unsigned elems = glsl_get_length(tail_type->type);
+ for (unsigned i = 0; i < elems; i++) {
+ new_src->link[src->length].id = i;
+ new_dest->link[dest->length].id = i;
+ struct vtn_type *elem_type = base_type == GLSL_TYPE_ARRAY ?
+ tail_type->array_element : tail_type->members[i];
+ _vtn_variable_copy(b, new_dest, new_src, elem_type);
+ }
+ return;
+ }
+
+ default:
+ unreachable("Invalid access chain type");
+ }
+}
+
+static void
+vtn_variable_copy(struct vtn_builder *b, struct vtn_access_chain *dest,
+ struct vtn_access_chain *src)
+{
+ struct vtn_type *tail_type = vtn_access_chain_tail_type(b, src);
+ assert(vtn_access_chain_tail_type(b, dest)->type == tail_type->type);
+
+ /* TODO: At some point, we should add a special-case for when we can
+ * just emit a copy_var intrinsic.
+ */
+ _vtn_variable_copy(b, dest, src, tail_type);
+}
+
+static void
+set_mode_system_value(nir_variable_mode *mode)
+{
+ assert(*mode == nir_var_system_value || *mode == nir_var_shader_in);
+ *mode = nir_var_system_value;
+}
+
+static void
+vtn_get_builtin_location(struct vtn_builder *b,
+ SpvBuiltIn builtin, int *location,
+ nir_variable_mode *mode)
+{
+ switch (builtin) {
+ case SpvBuiltInPosition:
+ *location = VARYING_SLOT_POS;
+ break;
+ case SpvBuiltInPointSize:
+ *location = VARYING_SLOT_PSIZ;
+ break;
+ case SpvBuiltInClipDistance:
+ *location = VARYING_SLOT_CLIP_DIST0; /* XXX CLIP_DIST1? */
+ break;
+ case SpvBuiltInCullDistance:
+ /* XXX figure this out */
+ break;
+ case SpvBuiltInVertexIndex:
+ *location = SYSTEM_VALUE_VERTEX_ID;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInVertexId:
+ /* Vulkan defines VertexID to be zero-based and reserves the new
+ * builtin keyword VertexIndex to indicate the non-zero-based value.
+ */
+ *location = SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInInstanceIndex:
+ *location = SYSTEM_VALUE_INSTANCE_INDEX;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInInstanceId:
+ *location = SYSTEM_VALUE_INSTANCE_ID;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInPrimitiveId:
+ *location = VARYING_SLOT_PRIMITIVE_ID;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInInvocationId:
+ *location = SYSTEM_VALUE_INVOCATION_ID;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInLayer:
+ *location = VARYING_SLOT_LAYER;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInViewportIndex:
+ *location = VARYING_SLOT_VIEWPORT;
+ if (b->shader->stage == MESA_SHADER_GEOMETRY)
+ *mode = nir_var_shader_out;
+ else if (b->shader->stage == MESA_SHADER_FRAGMENT)
+ *mode = nir_var_shader_in;
+ else
+ unreachable("invalid stage for SpvBuiltInViewportIndex");
+ break;
+ case SpvBuiltInTessLevelOuter:
+ case SpvBuiltInTessLevelInner:
+ case SpvBuiltInTessCoord:
+ case SpvBuiltInPatchVertices:
+ unreachable("no tessellation support");
+ case SpvBuiltInFragCoord:
+ *location = VARYING_SLOT_POS;
+ assert(*mode == nir_var_shader_in);
+ break;
+ case SpvBuiltInPointCoord:
+ *location = VARYING_SLOT_PNTC;
+ assert(*mode == nir_var_shader_in);
+ break;
+ case SpvBuiltInFrontFacing:
+ *location = VARYING_SLOT_FACE;
+ assert(*mode == nir_var_shader_in);
+ break;
+ case SpvBuiltInSampleId:
+ *location = SYSTEM_VALUE_SAMPLE_ID;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInSamplePosition:
+ *location = SYSTEM_VALUE_SAMPLE_POS;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInSampleMask:
+ *location = SYSTEM_VALUE_SAMPLE_MASK_IN; /* XXX out? */
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInFragDepth:
+ *location = FRAG_RESULT_DEPTH;
+ assert(*mode == nir_var_shader_out);
+ break;
+ case SpvBuiltInNumWorkgroups:
+ *location = SYSTEM_VALUE_NUM_WORK_GROUPS;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInWorkgroupSize:
+ /* This should already be handled */
+ unreachable("unsupported builtin");
+ break;
+ case SpvBuiltInWorkgroupId:
+ *location = SYSTEM_VALUE_WORK_GROUP_ID;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInLocalInvocationId:
+ *location = SYSTEM_VALUE_LOCAL_INVOCATION_ID;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInLocalInvocationIndex:
+ *location = SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInGlobalInvocationId:
+ *location = SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInHelperInvocation:
+ default:
+ unreachable("unsupported builtin");
+ }
+}
+
+static void
+var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *void_var)
+{
+ struct vtn_variable *vtn_var = void_var;
+
+ /* Handle decorations that apply to a vtn_variable as a whole */
+ switch (dec->decoration) {
+ case SpvDecorationBinding:
+ vtn_var->binding = dec->literals[0];
+ return;
+ case SpvDecorationDescriptorSet:
+ vtn_var->descriptor_set = dec->literals[0];
+ return;
+
+ case SpvDecorationLocation: {
+ unsigned location = dec->literals[0];
+ bool is_vertex_input;
+ if (b->shader->stage == MESA_SHADER_FRAGMENT &&
+ vtn_var->mode == vtn_variable_mode_output) {
+ is_vertex_input = false;
+ location += FRAG_RESULT_DATA0;
+ } else if (b->shader->stage == MESA_SHADER_VERTEX &&
+ vtn_var->mode == vtn_variable_mode_input) {
+ is_vertex_input = true;
+ location += VERT_ATTRIB_GENERIC0;
+ } else if (vtn_var->mode == vtn_variable_mode_input ||
+ vtn_var->mode == vtn_variable_mode_output) {
+ is_vertex_input = false;
+ location += VARYING_SLOT_VAR0;
+ } else {
+ assert(!"Location must be on input or output variable");
+ }
+
+ if (vtn_var->var) {
+ vtn_var->var->data.location = location;
+ vtn_var->var->data.explicit_location = true;
+ } else {
+ assert(vtn_var->members);
+ unsigned length = glsl_get_length(vtn_var->type->type);
+ for (unsigned i = 0; i < length; i++) {
+ vtn_var->members[i]->data.location = location;
+ vtn_var->members[i]->data.explicit_location = true;
+ location +=
+ glsl_count_attribute_slots(vtn_var->members[i]->interface_type,
+ is_vertex_input);
+ }
+ }
+ return;
+ }
+
+ default:
+ break;
+ }
+
+ /* Now we handle decorations that apply to a particular nir_variable */
+ nir_variable *nir_var = vtn_var->var;
+ if (val->value_type == vtn_value_type_access_chain) {
+ assert(val->access_chain->length == 0);
+ assert(val->access_chain->var == void_var);
+ assert(member == -1);
+ } else {
+ assert(val->value_type == vtn_value_type_type);
+ if (member != -1)
+ nir_var = vtn_var->members[member];
+ }
+
+ if (nir_var == NULL)
+ return;
+
+ switch (dec->decoration) {
+ case SpvDecorationRelaxedPrecision:
+ break; /* FIXME: Do nothing with this for now. */
+ case SpvDecorationNoPerspective:
+ nir_var->data.interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
+ break;
+ case SpvDecorationFlat:
+ nir_var->data.interpolation = INTERP_QUALIFIER_FLAT;
+ break;
+ case SpvDecorationCentroid:
+ nir_var->data.centroid = true;
+ break;
+ case SpvDecorationSample:
+ nir_var->data.sample = true;
+ break;
+ case SpvDecorationInvariant:
+ nir_var->data.invariant = true;
+ break;
+ case SpvDecorationConstant:
+ assert(nir_var->constant_initializer != NULL);
+ nir_var->data.read_only = true;
+ break;
+ case SpvDecorationNonWritable:
+ nir_var->data.read_only = true;
+ break;
+ case SpvDecorationComponent:
+ nir_var->data.location_frac = dec->literals[0];
+ break;
+ case SpvDecorationIndex:
+ nir_var->data.explicit_index = true;
+ nir_var->data.index = dec->literals[0];
+ break;
+ case SpvDecorationBuiltIn: {
+ SpvBuiltIn builtin = dec->literals[0];
+
+ if (builtin == SpvBuiltInWorkgroupSize) {
+ /* This shouldn't be a builtin. It's actually a constant. */
+ nir_var->data.mode = nir_var_global;
+ nir_var->data.read_only = true;
+
+ nir_constant *c = rzalloc(nir_var, nir_constant);
+ c->value.u[0] = b->shader->info.cs.local_size[0];
+ c->value.u[1] = b->shader->info.cs.local_size[1];
+ c->value.u[2] = b->shader->info.cs.local_size[2];
+ nir_var->constant_initializer = c;
+ break;
+ }
+
+ nir_variable_mode mode = nir_var->data.mode;
+ vtn_get_builtin_location(b, builtin, &nir_var->data.location, &mode);
+ nir_var->data.explicit_location = true;
+ nir_var->data.mode = mode;
+
+ if (builtin == SpvBuiltInFragCoord || builtin == SpvBuiltInSamplePosition)
+ nir_var->data.origin_upper_left = b->origin_upper_left;
+ break;
+ }
+ case SpvDecorationRowMajor:
+ case SpvDecorationColMajor:
+ case SpvDecorationGLSLShared:
+ case SpvDecorationPatch:
+ case SpvDecorationRestrict:
+ case SpvDecorationAliased:
+ case SpvDecorationVolatile:
+ case SpvDecorationCoherent:
+ case SpvDecorationNonReadable:
+ case SpvDecorationUniform:
+ /* This is really nice but we have no use for it right now. */
+ case SpvDecorationCPacked:
+ case SpvDecorationSaturatedConversion:
+ case SpvDecorationStream:
+ case SpvDecorationOffset:
+ case SpvDecorationXfbBuffer:
+ case SpvDecorationFuncParamAttr:
+ case SpvDecorationFPRoundingMode:
+ case SpvDecorationFPFastMathMode:
+ case SpvDecorationLinkageAttributes:
+ case SpvDecorationSpecId:
+ break;
+ default:
+ unreachable("Unhandled variable decoration");
+ }
+}
+
+/* Tries to compute the size of an interface block based on the strides and
+ * offsets that are provided to us in the SPIR-V source.
+ */
+static unsigned
+vtn_type_block_size(struct vtn_type *type)
+{
+ enum glsl_base_type base_type = glsl_get_base_type(type->type);
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_DOUBLE: {
+ unsigned cols = type->row_major ? glsl_get_vector_elements(type->type) :
+ glsl_get_matrix_columns(type->type);
+ if (cols > 1) {
+ assert(type->stride > 0);
+ return type->stride * cols;
+ } else if (base_type == GLSL_TYPE_DOUBLE) {
+ return glsl_get_vector_elements(type->type) * 8;
+ } else {
+ return glsl_get_vector_elements(type->type) * 4;
+ }
+ }
+
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE: {
+ unsigned size = 0;
+ unsigned num_fields = glsl_get_length(type->type);
+ for (unsigned f = 0; f < num_fields; f++) {
+ unsigned field_end = type->offsets[f] +
+ vtn_type_block_size(type->members[f]);
+ size = MAX2(size, field_end);
+ }
+ return size;
+ }
+
+ case GLSL_TYPE_ARRAY:
+ assert(type->stride > 0);
+ assert(glsl_get_length(type->type) > 0);
+ return type->stride * glsl_get_length(type->type);
+
+ default:
+ assert(!"Invalid block type");
+ return 0;
+ }
+}
+
+void
+vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpVariable: {
+ struct vtn_variable *var = rzalloc(b, struct vtn_variable);
+ var->type = vtn_value(b, w[1], vtn_value_type_type)->type;
+
+ var->chain.var = var;
+ var->chain.length = 0;
+
+ struct vtn_value *val =
+ vtn_push_value(b, w[2], vtn_value_type_access_chain);
+ val->access_chain = &var->chain;
+
+ struct vtn_type *without_array = var->type;
+ while(glsl_type_is_array(without_array->type))
+ without_array = without_array->array_element;
+
+ nir_variable_mode nir_mode;
+ switch ((SpvStorageClass)w[3]) {
+ case SpvStorageClassUniform:
+ case SpvStorageClassUniformConstant:
+ if (without_array->block) {
+ var->mode = vtn_variable_mode_ubo;
+ b->shader->info.num_ubos++;
+ } else if (without_array->buffer_block) {
+ var->mode = vtn_variable_mode_ssbo;
+ b->shader->info.num_ssbos++;
+ } else if (glsl_type_is_image(without_array->type)) {
+ var->mode = vtn_variable_mode_image;
+ nir_mode = nir_var_uniform;
+ b->shader->info.num_images++;
+ } else if (glsl_type_is_sampler(without_array->type)) {
+ var->mode = vtn_variable_mode_sampler;
+ nir_mode = nir_var_uniform;
+ b->shader->info.num_textures++;
+ } else {
+ assert(!"Invalid uniform variable type");
+ }
+ break;
+ case SpvStorageClassPushConstant:
+ var->mode = vtn_variable_mode_push_constant;
+ assert(b->shader->num_uniforms == 0);
+ b->shader->num_uniforms = vtn_type_block_size(var->type) * 4;
+ break;
+ case SpvStorageClassInput:
+ var->mode = vtn_variable_mode_input;
+ nir_mode = nir_var_shader_in;
+ break;
+ case SpvStorageClassOutput:
+ var->mode = vtn_variable_mode_output;
+ nir_mode = nir_var_shader_out;
+ break;
+ case SpvStorageClassPrivate:
+ var->mode = vtn_variable_mode_global;
+ nir_mode = nir_var_global;
+ break;
+ case SpvStorageClassFunction:
+ var->mode = vtn_variable_mode_local;
+ nir_mode = nir_var_local;
+ break;
+ case SpvStorageClassWorkgroup:
+ var->mode = vtn_variable_mode_workgroup;
+ nir_mode = nir_var_shared;
+ break;
+ case SpvStorageClassCrossWorkgroup:
+ case SpvStorageClassGeneric:
+ case SpvStorageClassAtomicCounter:
+ default:
+ unreachable("Unhandled variable storage class");
+ }
+
+ switch (var->mode) {
+ case vtn_variable_mode_local:
+ case vtn_variable_mode_global:
+ case vtn_variable_mode_image:
+ case vtn_variable_mode_sampler:
+ case vtn_variable_mode_workgroup:
+ /* For these, we create the variable normally */
+ var->var = rzalloc(b->shader, nir_variable);
+ var->var->name = ralloc_strdup(var->var, val->name);
+ var->var->type = var->type->type;
+ var->var->data.mode = nir_mode;
+
+ switch (var->mode) {
+ case vtn_variable_mode_image:
+ case vtn_variable_mode_sampler:
+ var->var->interface_type = without_array->type;
+ break;
+ default:
+ var->var->interface_type = NULL;
+ break;
+ }
+ break;
+
+ case vtn_variable_mode_input:
+ case vtn_variable_mode_output: {
+ /* For inputs and outputs, we immediately split structures. This
+ * is for a couple of reasons. For one, builtins may all come in
+ * a struct and we really want those split out into separate
+ * variables. For another, interpolation qualifiers can be
+ * applied to members of the top-level struct ane we need to be
+ * able to preserve that information.
+ */
+
+ int array_length = -1;
+ struct vtn_type *interface_type = var->type;
+ if (b->shader->stage == MESA_SHADER_GEOMETRY &&
+ glsl_type_is_array(var->type->type)) {
+ /* In Geometry shaders (and some tessellation), inputs come
+ * in per-vertex arrays. However, some builtins come in
+ * non-per-vertex, hence the need for the is_array check. In
+ * any case, there are no non-builtin arrays allowed so this
+ * check should be sufficient.
+ */
+ interface_type = var->type->array_element;
+ array_length = glsl_get_length(var->type->type);
+ }
+
+ if (glsl_type_is_struct(interface_type->type)) {
+ /* It's a struct. Split it. */
+ unsigned num_members = glsl_get_length(interface_type->type);
+ var->members = ralloc_array(b, nir_variable *, num_members);
+
+ for (unsigned i = 0; i < num_members; i++) {
+ const struct glsl_type *mtype = interface_type->members[i]->type;
+ if (array_length >= 0)
+ mtype = glsl_array_type(mtype, array_length);
+
+ var->members[i] = rzalloc(b->shader, nir_variable);
+ var->members[i]->name =
+ ralloc_asprintf(var->members[i], "%s.%d", val->name, i);
+ var->members[i]->type = mtype;
+ var->members[i]->interface_type =
+ interface_type->members[i]->type;
+ var->members[i]->data.mode = nir_mode;
+ }
+ } else {
+ var->var = rzalloc(b->shader, nir_variable);
+ var->var->name = ralloc_strdup(var->var, val->name);
+ var->var->type = var->type->type;
+ var->var->interface_type = interface_type->type;
+ var->var->data.mode = nir_mode;
+ }
+
+ /* For inputs and outputs, we need to grab locations and builtin
+ * information from the interface type.
+ */
+ vtn_foreach_decoration(b, interface_type->val, var_decoration_cb, var);
+ break;
+
+ case vtn_variable_mode_param:
+ unreachable("Not created through OpVariable");
+ }
+
+ case vtn_variable_mode_ubo:
+ case vtn_variable_mode_ssbo:
+ case vtn_variable_mode_push_constant:
+ /* These don't need actual variables. */
+ break;
+ }
+
+ if (count > 4) {
+ assert(count == 5);
+ nir_constant *constant =
+ vtn_value(b, w[4], vtn_value_type_constant)->constant;
+ var->var->constant_initializer =
+ nir_constant_clone(constant, var->var);
+ }
+
+ vtn_foreach_decoration(b, val, var_decoration_cb, var);
+
+ if (var->mode == vtn_variable_mode_image ||
+ var->mode == vtn_variable_mode_sampler) {
+ /* XXX: We still need the binding information in the nir_variable
+ * for these. We should fix that.
+ */
+ var->var->data.binding = var->binding;
+ var->var->data.descriptor_set = var->descriptor_set;
+
+ if (var->mode == vtn_variable_mode_image)
+ var->var->data.image.format = without_array->image_format;
+ }
+
+ if (var->mode == vtn_variable_mode_local) {
+ assert(var->members == NULL && var->var != NULL);
+ nir_function_impl_add_variable(b->impl, var->var);
+ } else if (var->var) {
+ nir_shader_add_variable(b->shader, var->var);
+ } else if (var->members) {
+ unsigned count = glsl_get_length(without_array->type);
+ for (unsigned i = 0; i < count; i++) {
+ assert(var->members[i]->data.mode != nir_var_local);
+ nir_shader_add_variable(b->shader, var->members[i]);
+ }
+ } else {
+ assert(var->mode == vtn_variable_mode_ubo ||
+ var->mode == vtn_variable_mode_ssbo ||
+ var->mode == vtn_variable_mode_push_constant);
+ }
+ break;
+ }
+
+ case SpvOpAccessChain:
+ case SpvOpInBoundsAccessChain: {
+ struct vtn_access_chain *base, *chain;
+ struct vtn_value *base_val = vtn_untyped_value(b, w[3]);
+ if (base_val->value_type == vtn_value_type_sampled_image) {
+ /* This is rather insane. SPIR-V allows you to use OpSampledImage
+ * to combine an array of images with a single sampler to get an
+ * array of sampled images that all share the same sampler.
+ * Fortunately, this means that we can more-or-less ignore the
+ * sampler when crawling the access chain, but it does leave us
+ * with this rather awkward little special-case.
+ */
+ base = base_val->sampled_image->image;
+ } else {
+ assert(base_val->value_type == vtn_value_type_access_chain);
+ base = base_val->access_chain;
+ }
+
+ chain = vtn_access_chain_extend(b, base, count - 4);
+
+ unsigned idx = base->length;
+ for (int i = 4; i < count; i++) {
+ struct vtn_value *link_val = vtn_untyped_value(b, w[i]);
+ if (link_val->value_type == vtn_value_type_constant) {
+ chain->link[idx].mode = vtn_access_mode_literal;
+ chain->link[idx].id = link_val->constant->value.u[0];
+ } else {
+ chain->link[idx].mode = vtn_access_mode_id;
+ chain->link[idx].id = w[i];
+ }
+ idx++;
+ }
+
+ if (base_val->value_type == vtn_value_type_sampled_image) {
+ struct vtn_value *val =
+ vtn_push_value(b, w[2], vtn_value_type_sampled_image);
+ val->sampled_image = ralloc(b, struct vtn_sampled_image);
+ val->sampled_image->image = chain;
+ val->sampled_image->sampler = base_val->sampled_image->sampler;
+ } else {
+ struct vtn_value *val =
+ vtn_push_value(b, w[2], vtn_value_type_access_chain);
+ val->access_chain = chain;
+ }
+ break;
+ }
+
+ case SpvOpCopyMemory: {
+ struct vtn_value *dest = vtn_value(b, w[1], vtn_value_type_access_chain);
+ struct vtn_value *src = vtn_value(b, w[2], vtn_value_type_access_chain);
+
+ vtn_variable_copy(b, dest->access_chain, src->access_chain);
+ break;
+ }
+
+ case SpvOpLoad: {
+ struct vtn_access_chain *src =
+ vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+
+ if (src->var->mode == vtn_variable_mode_image ||
+ src->var->mode == vtn_variable_mode_sampler) {
+ vtn_push_value(b, w[2], vtn_value_type_access_chain)->access_chain = src;
+ return;
+ }
+
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ val->ssa = vtn_variable_load(b, src);
+ break;
+ }
+
+ case SpvOpStore: {
+ struct vtn_access_chain *dest =
+ vtn_value(b, w[1], vtn_value_type_access_chain)->access_chain;
+ struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]);
+ vtn_variable_store(b, src, dest);
+ break;
+ }
+
+ case SpvOpArrayLength: {
+ struct vtn_access_chain *chain =
+ vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+
+ const uint32_t offset = chain->var->type->offsets[w[4]];
+ const uint32_t stride = chain->var->type->members[w[4]]->stride;
+
+ unsigned chain_idx;
+ struct vtn_type *type;
+ nir_ssa_def *index =
+ get_vulkan_resource_index(b, chain, &type, &chain_idx);
+
+ nir_intrinsic_instr *instr =
+ nir_intrinsic_instr_create(b->nb.shader,
+ nir_intrinsic_get_buffer_size);
+ instr->src[0] = nir_src_for_ssa(index);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
+ nir_builder_instr_insert(&b->nb, &instr->instr);
+ nir_ssa_def *buf_size = &instr->dest.ssa;
+
+ /* array_length = max(buffer_size - offset, 0) / stride */
+ nir_ssa_def *array_length =
+ nir_idiv(&b->nb,
+ nir_imax(&b->nb,
+ nir_isub(&b->nb,
+ buf_size,
+ nir_imm_int(&b->nb, offset)),
+ nir_imm_int(&b->nb, 0u)),
+ nir_imm_int(&b->nb, stride));
+
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ val->ssa = vtn_create_ssa_value(b, glsl_uint_type());
+ val->ssa->def = array_length;
+ break;
+ }
+
+ case SpvOpCopyMemorySized:
+ default:
+ unreachable("Unhandled opcode");
+ }
+}