summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/glsl/Makefile.am12
-rw-r--r--src/glsl/Makefile.sources3
-rw-r--r--src/glsl/ast_to_hir.cpp1
-rw-r--r--src/glsl/glsl_types.cpp102
-rw-r--r--src/glsl/glsl_types.h32
-rw-r--r--src/glsl/ir_clone.cpp1
-rw-r--r--src/glsl/link_uniform_initializers.cpp1
-rw-r--r--src/glsl/main.cpp6
-rw-r--r--src/glsl/nir/nir.h9
-rw-r--r--src/glsl/nir/nir_lower_io.c1
-rw-r--r--src/glsl/nir/nir_lower_var_copies.c15
-rw-r--r--src/glsl/nir/nir_spirv.h38
-rw-r--r--src/glsl/nir/nir_split_var_copies.c12
-rw-r--r--src/glsl/nir/nir_types.cpp111
-rw-r--r--src/glsl/nir/nir_types.h29
-rw-r--r--src/glsl/nir/spirv.h1304
-rw-r--r--src/glsl/nir/spirv2nir.c54
-rw-r--r--src/glsl/nir/spirv_glsl450_to_nir.c284
-rw-r--r--src/glsl/nir/spirv_to_nir.c1572
-rw-r--r--src/glsl/nir/spirv_to_nir_private.h148
-rw-r--r--src/glsl/standalone_scaffolding.cpp6
-rw-r--r--src/mesa/drivers/dri/i965/brw_fs.cpp1
-rw-r--r--src/mesa/drivers/dri/i965/brw_fs_visitor.cpp1
-rw-r--r--src/mesa/drivers/dri/i965/brw_shader.cpp1
-rw-r--r--src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp1
-rw-r--r--src/mesa/program/ir_to_mesa.cpp2
26 files changed, 3712 insertions, 35 deletions
diff --git a/src/glsl/Makefile.am b/src/glsl/Makefile.am
index 23c6fe8bb6c..7af9a709d5a 100644
--- a/src/glsl/Makefile.am
+++ b/src/glsl/Makefile.am
@@ -77,7 +77,7 @@ check_PROGRAMS = \
tests/sampler-types-test \
tests/uniform-initializer-test
-noinst_PROGRAMS = glsl_compiler
+noinst_PROGRAMS = glsl_compiler spirv2nir
tests_blob_test_SOURCES = \
tests/blob_test.c
@@ -162,6 +162,16 @@ glsl_compiler_LDADD = \
$(top_builddir)/src/libglsl_util.la \
$(PTHREAD_LIBS)
+spirv2nir_SOURCES = \
+ standalone_scaffolding.cpp \
+ standalone_scaffolding.h \
+ nir/spirv2nir.c
+
+spirv2nir_LDADD = \
+ libglsl.la \
+ $(top_builddir)/src/libglsl_util.la \
+ $(PTHREAD_LIBS)
+
glsl_test_SOURCES = \
standalone_scaffolding.cpp \
tests/common.c \
diff --git a/src/glsl/Makefile.sources b/src/glsl/Makefile.sources
index d784a810723..a234ac6f8e2 100644
--- a/src/glsl/Makefile.sources
+++ b/src/glsl/Makefile.sources
@@ -59,6 +59,7 @@ NIR_FILES = \
nir/nir_remove_dead_variables.c \
nir/nir_search.c \
nir/nir_search.h \
+ nir/nir_spirv.h \
nir/nir_split_var_copies.c \
nir/nir_sweep.c \
nir/nir_to_ssa.c \
@@ -68,6 +69,8 @@ NIR_FILES = \
nir/nir_worklist.c \
nir/nir_worklist.h \
nir/nir_types.cpp \
+ nir/spirv_to_nir.c \
+ nir/spirv_glsl450_to_nir.c \
$(NIR_GENERATED_FILES)
# libglsl
diff --git a/src/glsl/ast_to_hir.cpp b/src/glsl/ast_to_hir.cpp
index f1daee38d7a..8713cd85f0a 100644
--- a/src/glsl/ast_to_hir.cpp
+++ b/src/glsl/ast_to_hir.cpp
@@ -970,6 +970,7 @@ do_comparison(void *mem_ctx, int operation, ir_rvalue *op0, ir_rvalue *op1)
case GLSL_TYPE_SAMPLER:
case GLSL_TYPE_IMAGE:
case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_FUNCTION:
case GLSL_TYPE_ATOMIC_UINT:
/* I assume a comparison of a struct containing a sampler just
* ignores the sampler present in the type.
diff --git a/src/glsl/glsl_types.cpp b/src/glsl/glsl_types.cpp
index 9c9b7efcbc7..0d83ee68e42 100644
--- a/src/glsl/glsl_types.cpp
+++ b/src/glsl/glsl_types.cpp
@@ -32,6 +32,7 @@ mtx_t glsl_type::mutex = _MTX_INITIALIZER_NP;
hash_table *glsl_type::array_types = NULL;
hash_table *glsl_type::record_types = NULL;
hash_table *glsl_type::interface_types = NULL;
+hash_table *glsl_type::function_types = NULL;
void *glsl_type::mem_ctx = NULL;
void
@@ -159,6 +160,39 @@ glsl_type::glsl_type(const glsl_struct_field *fields, unsigned num_fields,
mtx_unlock(&glsl_type::mutex);
}
+glsl_type::glsl_type(const glsl_type *return_type,
+ const glsl_function_param *params, unsigned num_params) :
+ gl_type(0),
+ base_type(GLSL_TYPE_FUNCTION),
+ sampler_dimensionality(0), sampler_shadow(0), sampler_array(0),
+ sampler_type(0), interface_packing(0),
+ vector_elements(0), matrix_columns(0),
+ length(num_params)
+{
+ unsigned int i;
+
+ mtx_lock(&glsl_type::mutex);
+
+ init_ralloc_type_ctx();
+
+ this->fields.parameters = rzalloc_array(this->mem_ctx,
+ glsl_function_param, num_params + 1);
+
+ /* We store the return type as the first parameter */
+ this->fields.parameters[0].type = return_type;
+ this->fields.parameters[0].in = false;
+ this->fields.parameters[0].out = true;
+
+ /* We store the i'th parameter in slot i+1 */
+ for (i = 0; i < length; i++) {
+ this->fields.parameters[i + 1].type = params[i].type;
+ this->fields.parameters[i + 1].in = params[i].in;
+ this->fields.parameters[i + 1].out = params[i].out;
+ }
+
+ mtx_unlock(&glsl_type::mutex);
+}
+
bool
glsl_type::contains_sampler() const
@@ -827,6 +861,72 @@ glsl_type::get_interface_instance(const glsl_struct_field *fields,
}
+static int
+function_key_compare(const void *a, const void *b)
+{
+ const glsl_type *const key1 = (glsl_type *) a;
+ const glsl_type *const key2 = (glsl_type *) b;
+
+ if (key1->length != key2->length)
+ return 1;
+
+ return memcmp(key1->fields.parameters, key2->fields.parameters,
+ (key1->length + 1) * sizeof(*key1->fields.parameters));
+}
+
+
+static unsigned
+function_key_hash(const void *a)
+{
+ const glsl_type *const key = (glsl_type *) a;
+ char hash_key[128];
+ unsigned size = 0;
+
+ size = snprintf(hash_key, sizeof(hash_key), "%08x", key->length);
+
+ for (unsigned i = 0; i < key->length; i++) {
+ if (size >= sizeof(hash_key))
+ break;
+
+ size += snprintf(& hash_key[size], sizeof(hash_key) - size,
+ "%p", (void *) key->fields.structure[i].type);
+ }
+
+ return hash_table_string_hash(& hash_key);
+}
+
+const glsl_type *
+glsl_type::get_function_instance(const glsl_type *return_type,
+ const glsl_function_param *params,
+ unsigned num_params)
+{
+ const glsl_type key(return_type, params, num_params);
+
+ mtx_lock(&glsl_type::mutex);
+
+ if (function_types == NULL) {
+ function_types = hash_table_ctor(64, function_key_hash,
+ function_key_compare);
+ }
+
+ const glsl_type *t = (glsl_type *) hash_table_find(function_types, &key);
+ if (t == NULL) {
+ mtx_unlock(&glsl_type::mutex);
+ t = new glsl_type(return_type, params, num_params);
+ mtx_lock(&glsl_type::mutex);
+
+ hash_table_insert(function_types, (void *) t, t);
+ }
+
+ assert(t->base_type == GLSL_TYPE_FUNCTION);
+ assert(t->length == num_params);
+
+ mtx_unlock(&glsl_type::mutex);
+
+ return t;
+}
+
+
const glsl_type *
glsl_type::get_mul_type(const glsl_type *type_a, const glsl_type *type_b)
{
@@ -955,6 +1055,7 @@ glsl_type::component_slots() const
case GLSL_TYPE_IMAGE:
return 1;
+ case GLSL_TYPE_FUNCTION:
case GLSL_TYPE_SAMPLER:
case GLSL_TYPE_ATOMIC_UINT:
case GLSL_TYPE_VOID:
@@ -1326,6 +1427,7 @@ glsl_type::count_attribute_slots() const
case GLSL_TYPE_ARRAY:
return this->length * this->fields.array->count_attribute_slots();
+ case GLSL_TYPE_FUNCTION:
case GLSL_TYPE_SAMPLER:
case GLSL_TYPE_IMAGE:
case GLSL_TYPE_ATOMIC_UINT:
diff --git a/src/glsl/glsl_types.h b/src/glsl/glsl_types.h
index 5645dcd5011..2d4718572af 100644
--- a/src/glsl/glsl_types.h
+++ b/src/glsl/glsl_types.h
@@ -56,6 +56,7 @@ enum glsl_base_type {
GLSL_TYPE_IMAGE,
GLSL_TYPE_ATOMIC_UINT,
GLSL_TYPE_STRUCT,
+ GLSL_TYPE_FUNCTION,
GLSL_TYPE_INTERFACE,
GLSL_TYPE_ARRAY,
GLSL_TYPE_VOID,
@@ -178,7 +179,7 @@ struct glsl_type {
*/
union {
const struct glsl_type *array; /**< Type of array elements. */
- const struct glsl_type *parameters; /**< Parameters to function. */
+ struct glsl_function_param *parameters; /**< Parameters to function. */
struct glsl_struct_field *structure; /**< List of struct fields. */
} fields;
@@ -276,6 +277,13 @@ struct glsl_type {
const char *block_name);
/**
+ * Get the instance of a function type
+ */
+ static const glsl_type *get_function_instance(const struct glsl_type *return_type,
+ const glsl_function_param *parameters,
+ unsigned num_params);
+
+ /**
* Get the type resulting from a multiplication of \p type_a * \p type_b
*/
static const glsl_type *get_mul_type(const glsl_type *type_a,
@@ -688,6 +696,10 @@ private:
glsl_type(const glsl_struct_field *fields, unsigned num_fields,
enum glsl_interface_packing packing, const char *name);
+ /** Constructor for interface types */
+ glsl_type(const glsl_type *return_type,
+ const glsl_function_param *params, unsigned num_params);
+
/** Constructor for array types */
glsl_type(const glsl_type *array, unsigned length);
@@ -700,6 +712,9 @@ private:
/** Hash table containing the known interface types. */
static struct hash_table *interface_types;
+ /** Hash table containing the known function types. */
+ static struct hash_table *function_types;
+
static int record_key_compare(const void *a, const void *b);
static unsigned record_key_hash(const void *key);
@@ -727,6 +742,10 @@ private:
/*@}*/
};
+#undef DECL_TYPE
+#undef STRUCT_TYPE
+#endif /* __cplusplus */
+
struct glsl_struct_field {
const struct glsl_type *type;
const char *name;
@@ -770,14 +789,17 @@ struct glsl_struct_field {
int stream;
};
+struct glsl_function_param {
+ const struct glsl_type *type;
+
+ bool in;
+ bool out;
+};
+
static inline unsigned int
glsl_align(unsigned int a, unsigned int align)
{
return (a + align - 1) / align * align;
}
-#undef DECL_TYPE
-#undef STRUCT_TYPE
-#endif /* __cplusplus */
-
#endif /* GLSL_TYPES_H */
diff --git a/src/glsl/ir_clone.cpp b/src/glsl/ir_clone.cpp
index 914e0e4d540..636c143ddc2 100644
--- a/src/glsl/ir_clone.cpp
+++ b/src/glsl/ir_clone.cpp
@@ -357,6 +357,7 @@ ir_constant::clone(void *mem_ctx, struct hash_table *ht) const
return c;
}
+ case GLSL_TYPE_FUNCTION:
case GLSL_TYPE_SAMPLER:
case GLSL_TYPE_IMAGE:
case GLSL_TYPE_ATOMIC_UINT:
diff --git a/src/glsl/link_uniform_initializers.cpp b/src/glsl/link_uniform_initializers.cpp
index 69073841ea4..60bfc9c15c9 100644
--- a/src/glsl/link_uniform_initializers.cpp
+++ b/src/glsl/link_uniform_initializers.cpp
@@ -88,6 +88,7 @@ copy_constant_to_storage(union gl_constant_value *storage,
case GLSL_TYPE_IMAGE:
case GLSL_TYPE_ATOMIC_UINT:
case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_FUNCTION:
case GLSL_TYPE_VOID:
case GLSL_TYPE_ERROR:
/* All other types should have already been filtered by other
diff --git a/src/glsl/main.cpp b/src/glsl/main.cpp
index ccac8399646..fc54ddd7eb1 100644
--- a/src/glsl/main.cpp
+++ b/src/glsl/main.cpp
@@ -41,12 +41,6 @@
static int glsl_version = 330;
-extern "C" void
-_mesa_error_no_memory(const char *caller)
-{
- fprintf(stderr, "Mesa error: out of memory in %s", caller);
-}
-
static void
initialize_context(struct gl_context *ctx, gl_api api)
{
diff --git a/src/glsl/nir/nir.h b/src/glsl/nir/nir.h
index 697d37e95ac..61306e9b7e0 100644
--- a/src/glsl/nir/nir.h
+++ b/src/glsl/nir/nir.h
@@ -782,6 +782,15 @@ NIR_DEFINE_CAST(nir_deref_as_var, nir_deref, nir_deref_var, deref)
NIR_DEFINE_CAST(nir_deref_as_array, nir_deref, nir_deref_array, deref)
NIR_DEFINE_CAST(nir_deref_as_struct, nir_deref, nir_deref_struct, deref)
+/** Returns the tail of a deref chain */
+static inline nir_deref *
+nir_deref_tail(nir_deref *deref)
+{
+ while (deref->child)
+ deref = deref->child;
+ return deref;
+}
+
typedef struct {
nir_instr instr;
diff --git a/src/glsl/nir/nir_lower_io.c b/src/glsl/nir/nir_lower_io.c
index 03eed04e1e9..561bebd3a9c 100644
--- a/src/glsl/nir/nir_lower_io.c
+++ b/src/glsl/nir/nir_lower_io.c
@@ -67,6 +67,7 @@ type_size(const struct glsl_type *type)
return 0;
case GLSL_TYPE_IMAGE:
return 0;
+ case GLSL_TYPE_FUNCTION:
case GLSL_TYPE_VOID:
case GLSL_TYPE_ERROR:
case GLSL_TYPE_DOUBLE:
diff --git a/src/glsl/nir/nir_lower_var_copies.c b/src/glsl/nir/nir_lower_var_copies.c
index 21672901f04..98c107aa50e 100644
--- a/src/glsl/nir/nir_lower_var_copies.c
+++ b/src/glsl/nir/nir_lower_var_copies.c
@@ -53,17 +53,6 @@ deref_next_wildcard_parent(nir_deref *deref)
return NULL;
}
-/* Returns the last deref in the chain.
- */
-static nir_deref *
-get_deref_tail(nir_deref *deref)
-{
- while (deref->child)
- deref = deref->child;
-
- return deref;
-}
-
/* This function recursively walks the given deref chain and replaces the
* given copy instruction with an equivalent sequence load/store
* operations.
@@ -121,8 +110,8 @@ emit_copy_load_store(nir_intrinsic_instr *copy_instr,
} else {
/* In this case, we have no wildcards anymore, so all we have to do
* is just emit the load and store operations. */
- src_tail = get_deref_tail(src_tail);
- dest_tail = get_deref_tail(dest_tail);
+ src_tail = nir_deref_tail(src_tail);
+ dest_tail = nir_deref_tail(dest_tail);
assert(src_tail->type == dest_tail->type);
diff --git a/src/glsl/nir/nir_spirv.h b/src/glsl/nir/nir_spirv.h
new file mode 100644
index 00000000000..789d30cd672
--- /dev/null
+++ b/src/glsl/nir/nir_spirv.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jason Ekstrand ([email protected])
+ *
+ */
+
+#pragma once
+
+#ifndef _NIR_SPIRV_H_
+#define _NIR_SPIRV_H_
+
+#include "nir.h"
+
+nir_shader *spirv_to_nir(const uint32_t *words, size_t word_count,
+ const nir_shader_compiler_options *options);
+
+#endif /* _NIR_SPIRV_H_ */
diff --git a/src/glsl/nir/nir_split_var_copies.c b/src/glsl/nir/nir_split_var_copies.c
index fc72c078c77..5c163b59819 100644
--- a/src/glsl/nir/nir_split_var_copies.c
+++ b/src/glsl/nir/nir_split_var_copies.c
@@ -66,14 +66,6 @@ struct split_var_copies_state {
void *dead_ctx;
};
-static nir_deref *
-get_deref_tail(nir_deref *deref)
-{
- while (deref->child != NULL)
- deref = deref->child;
- return deref;
-}
-
/* Recursively constructs deref chains to split a copy instruction into
* multiple (if needed) copy instructions with full-length deref chains.
* External callers of this function should pass the tail and head of the
@@ -225,8 +217,8 @@ split_var_copies_block(nir_block *block, void *void_state)
nir_deref *dest_head = &intrinsic->variables[0]->deref;
nir_deref *src_head = &intrinsic->variables[1]->deref;
- nir_deref *dest_tail = get_deref_tail(dest_head);
- nir_deref *src_tail = get_deref_tail(src_head);
+ nir_deref *dest_tail = nir_deref_tail(dest_head);
+ nir_deref *src_tail = nir_deref_tail(src_head);
switch (glsl_get_base_type(src_tail->type)) {
case GLSL_TYPE_ARRAY:
diff --git a/src/glsl/nir/nir_types.cpp b/src/glsl/nir/nir_types.cpp
index 62176f508a1..35421506545 100644
--- a/src/glsl/nir/nir_types.cpp
+++ b/src/glsl/nir/nir_types.cpp
@@ -70,6 +70,18 @@ glsl_get_struct_field(const glsl_type *type, unsigned index)
return type->fields.structure[index].type;
}
+const glsl_type *
+glsl_get_function_return_type(const glsl_type *type)
+{
+ return type->fields.parameters[0].type;
+}
+
+const glsl_function_param *
+glsl_get_function_param(const glsl_type *type, unsigned index)
+{
+ return &type->fields.parameters[index + 1];
+}
+
const struct glsl_type *
glsl_get_column_type(const struct glsl_type *type)
{
@@ -112,6 +124,20 @@ glsl_get_struct_elem_name(const struct glsl_type *type, unsigned index)
return type->fields.structure[index].name;
}
+glsl_sampler_dim
+glsl_get_sampler_dim(const struct glsl_type *type)
+{
+ assert(glsl_type_is_sampler(type));
+ return (glsl_sampler_dim)type->sampler_dimensionality;
+}
+
+glsl_base_type
+glsl_get_sampler_result_type(const struct glsl_type *type)
+{
+ assert(glsl_type_is_sampler(type));
+ return (glsl_base_type)type->sampler_type;
+}
+
bool
glsl_type_is_void(const glsl_type *type)
{
@@ -131,11 +157,37 @@ glsl_type_is_scalar(const struct glsl_type *type)
}
bool
+glsl_type_is_vector_or_scalar(const struct glsl_type *type)
+{
+ return type->is_vector() || type->is_scalar();
+}
+
+bool
glsl_type_is_matrix(const struct glsl_type *type)
{
return type->is_matrix();
}
+bool
+glsl_type_is_sampler(const struct glsl_type *type)
+{
+ return type->is_sampler();
+}
+
+bool
+glsl_sampler_type_is_shadow(const struct glsl_type *type)
+{
+ assert(glsl_type_is_sampler(type));
+ return type->sampler_shadow;
+}
+
+bool
+glsl_sampler_type_is_array(const struct glsl_type *type)
+{
+ assert(glsl_type_is_sampler(type));
+ return type->sampler_array;
+}
+
const glsl_type *
glsl_void_type(void)
{
@@ -149,13 +201,72 @@ glsl_float_type(void)
}
const glsl_type *
+glsl_int_type(void)
+{
+ return glsl_type::int_type;
+}
+
+const glsl_type *
+glsl_uint_type(void)
+{
+ return glsl_type::uint_type;
+}
+
+const glsl_type *
+glsl_bool_type(void)
+{
+ return glsl_type::bool_type;
+}
+
+const glsl_type *
glsl_vec4_type(void)
{
return glsl_type::vec4_type;
}
const glsl_type *
+glsl_scalar_type(enum glsl_base_type base_type)
+{
+ return glsl_type::get_instance(base_type, 1, 1);
+}
+
+const glsl_type *
+glsl_vector_type(enum glsl_base_type base_type, unsigned components)
+{
+ assert(components > 1 && components <= 4);
+ return glsl_type::get_instance(base_type, components, 1);
+}
+
+const glsl_type *
+glsl_matrix_type(enum glsl_base_type base_type, unsigned rows, unsigned columns)
+{
+ assert(rows > 1 && rows <= 4 && columns > 1 && columns <= 4);
+ return glsl_type::get_instance(base_type, rows, columns);
+}
+
+const glsl_type *
glsl_array_type(const glsl_type *base, unsigned elements)
{
return glsl_type::get_array_instance(base, elements);
}
+
+const glsl_type *
+glsl_struct_type(const glsl_struct_field *fields,
+ unsigned num_fields, const char *name)
+{
+ return glsl_type::get_record_instance(fields, num_fields, name);
+}
+
+const struct glsl_type *
+glsl_sampler_type(enum glsl_sampler_dim dim, bool is_shadow, bool is_array,
+ enum glsl_base_type base_type)
+{
+ return glsl_type::get_sampler_instance(dim, is_shadow, is_array, base_type);
+}
+
+const glsl_type *
+glsl_function_type(const glsl_type *return_type,
+ const glsl_function_param *params, unsigned num_params)
+{
+ return glsl_type::get_function_instance(return_type, params, num_params);
+}
diff --git a/src/glsl/nir/nir_types.h b/src/glsl/nir/nir_types.h
index 276d4ad6234..ceb131c9f47 100644
--- a/src/glsl/nir/nir_types.h
+++ b/src/glsl/nir/nir_types.h
@@ -49,6 +49,12 @@ const struct glsl_type *glsl_get_array_element(const struct glsl_type *type);
const struct glsl_type *glsl_get_column_type(const struct glsl_type *type);
+const struct glsl_type *
+glsl_get_function_return_type(const struct glsl_type *type);
+
+const struct glsl_function_param *
+glsl_get_function_param(const struct glsl_type *type, unsigned index);
+
enum glsl_base_type glsl_get_base_type(const struct glsl_type *type);
unsigned glsl_get_vector_elements(const struct glsl_type *type);
@@ -62,17 +68,40 @@ unsigned glsl_get_length(const struct glsl_type *type);
const char *glsl_get_struct_elem_name(const struct glsl_type *type,
unsigned index);
+enum glsl_sampler_dim glsl_get_sampler_dim(const struct glsl_type *type);
+enum glsl_base_type glsl_get_sampler_result_type(const struct glsl_type *type);
bool glsl_type_is_void(const struct glsl_type *type);
bool glsl_type_is_vector(const struct glsl_type *type);
bool glsl_type_is_scalar(const struct glsl_type *type);
+bool glsl_type_is_vector_or_scalar(const struct glsl_type *type);
bool glsl_type_is_matrix(const struct glsl_type *type);
+bool glsl_type_is_sampler(const struct glsl_type *type);
+bool glsl_sampler_type_is_shadow(const struct glsl_type *type);
+bool glsl_sampler_type_is_array(const struct glsl_type *type);
const struct glsl_type *glsl_void_type(void);
const struct glsl_type *glsl_float_type(void);
+const struct glsl_type *glsl_int_type(void);
+const struct glsl_type *glsl_uint_type(void);
+const struct glsl_type *glsl_bool_type(void);
+
const struct glsl_type *glsl_vec4_type(void);
+const struct glsl_type *glsl_scalar_type(enum glsl_base_type base_type);
+const struct glsl_type *glsl_vector_type(enum glsl_base_type base_type,
+ unsigned components);
+const struct glsl_type *glsl_matrix_type(enum glsl_base_type base_type,
+ unsigned rows, unsigned columns);
const struct glsl_type *glsl_array_type(const struct glsl_type *base,
unsigned elements);
+const struct glsl_type *glsl_struct_type(const struct glsl_struct_field *fields,
+ unsigned num_fields, const char *name);
+const struct glsl_type *glsl_sampler_type(enum glsl_sampler_dim dim,
+ bool is_shadow, bool is_array,
+ enum glsl_base_type base_type);
+const struct glsl_type * glsl_function_type(const struct glsl_type *return_type,
+ const struct glsl_function_param *params,
+ unsigned num_params);
#ifdef __cplusplus
}
diff --git a/src/glsl/nir/spirv.h b/src/glsl/nir/spirv.h
new file mode 100644
index 00000000000..da717ecd342
--- /dev/null
+++ b/src/glsl/nir/spirv.h
@@ -0,0 +1,1304 @@
+/*
+** Copyright (c) 2015 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+/*
+** This header is automatically generated by the same tool that creates
+** the Binary Section of the SPIR-V specification.
+*/
+
+/*
+** Specification revision 30.
+** Enumeration tokens for SPIR-V, in three styles: C, C++, generic.
+** - C++ will have the tokens in the "spv" name space, with no prefix.
+** - C will have tokens with as "Spv" prefix.
+**
+** Some tokens act like mask values, which can be OR'd together,
+** while others are mutually exclusive. The mask-like ones have
+** "Mask" in their name, and a parallel enum that has the shift
+** amount (1 << x) for each corresponding enumerant.
+*/
+
+#ifndef spirv_H
+#define spirv_H
+
+#ifdef __cplusplus
+
+namespace spv {
+
+static const int MagicNumber = 0x07230203;
+static const int Version = 99;
+
+typedef unsigned int Id;
+
+static const unsigned int OpCodeMask = 0xFFFF;
+static const unsigned int WordCountShift = 16;
+
+enum SourceLanguage {
+ SourceLanguageUnknown = 0,
+ SourceLanguageESSL = 1,
+ SourceLanguageGLSL = 2,
+ SourceLanguageOpenCL = 3,
+};
+
+enum ExecutionModel {
+ ExecutionModelVertex = 0,
+ ExecutionModelTessellationControl = 1,
+ ExecutionModelTessellationEvaluation = 2,
+ ExecutionModelGeometry = 3,
+ ExecutionModelFragment = 4,
+ ExecutionModelGLCompute = 5,
+ ExecutionModelKernel = 6,
+};
+
+enum AddressingModel {
+ AddressingModelLogical = 0,
+ AddressingModelPhysical32 = 1,
+ AddressingModelPhysical64 = 2,
+};
+
+enum MemoryModel {
+ MemoryModelSimple = 0,
+ MemoryModelGLSL450 = 1,
+ MemoryModelOpenCL12 = 2,
+ MemoryModelOpenCL20 = 3,
+ MemoryModelOpenCL21 = 4,
+};
+
+enum ExecutionMode {
+ ExecutionModeInvocations = 0,
+ ExecutionModeSpacingEqual = 1,
+ ExecutionModeSpacingFractionalEven = 2,
+ ExecutionModeSpacingFractionalOdd = 3,
+ ExecutionModeVertexOrderCw = 4,
+ ExecutionModeVertexOrderCcw = 5,
+ ExecutionModePixelCenterInteger = 6,
+ ExecutionModeOriginUpperLeft = 7,
+ ExecutionModeEarlyFragmentTests = 8,
+ ExecutionModePointMode = 9,
+ ExecutionModeXfb = 10,
+ ExecutionModeDepthReplacing = 11,
+ ExecutionModeDepthAny = 12,
+ ExecutionModeDepthGreater = 13,
+ ExecutionModeDepthLess = 14,
+ ExecutionModeDepthUnchanged = 15,
+ ExecutionModeLocalSize = 16,
+ ExecutionModeLocalSizeHint = 17,
+ ExecutionModeInputPoints = 18,
+ ExecutionModeInputLines = 19,
+ ExecutionModeInputLinesAdjacency = 20,
+ ExecutionModeInputTriangles = 21,
+ ExecutionModeInputTrianglesAdjacency = 22,
+ ExecutionModeInputQuads = 23,
+ ExecutionModeInputIsolines = 24,
+ ExecutionModeOutputVertices = 25,
+ ExecutionModeOutputPoints = 26,
+ ExecutionModeOutputLineStrip = 27,
+ ExecutionModeOutputTriangleStrip = 28,
+ ExecutionModeVecTypeHint = 29,
+ ExecutionModeContractionOff = 30,
+};
+
+enum StorageClass {
+ StorageClassUniformConstant = 0,
+ StorageClassInput = 1,
+ StorageClassUniform = 2,
+ StorageClassOutput = 3,
+ StorageClassWorkgroupLocal = 4,
+ StorageClassWorkgroupGlobal = 5,
+ StorageClassPrivateGlobal = 6,
+ StorageClassFunction = 7,
+ StorageClassGeneric = 8,
+ StorageClassPrivate = 9,
+ StorageClassAtomicCounter = 10,
+};
+
+enum Dim {
+ Dim1D = 0,
+ Dim2D = 1,
+ Dim3D = 2,
+ DimCube = 3,
+ DimRect = 4,
+ DimBuffer = 5,
+};
+
+enum SamplerAddressingMode {
+ SamplerAddressingModeNone = 0,
+ SamplerAddressingModeClampToEdge = 1,
+ SamplerAddressingModeClamp = 2,
+ SamplerAddressingModeRepeat = 3,
+ SamplerAddressingModeRepeatMirrored = 4,
+};
+
+enum SamplerFilterMode {
+ SamplerFilterModeNearest = 0,
+ SamplerFilterModeLinear = 1,
+};
+
+enum FPFastMathModeShift {
+ FPFastMathModeNotNaNShift = 0,
+ FPFastMathModeNotInfShift = 1,
+ FPFastMathModeNSZShift = 2,
+ FPFastMathModeAllowRecipShift = 3,
+ FPFastMathModeFastShift = 4,
+};
+
+enum FPFastMathModeMask {
+ FPFastMathModeMaskNone = 0,
+ FPFastMathModeNotNaNMask = 0x00000001,
+ FPFastMathModeNotInfMask = 0x00000002,
+ FPFastMathModeNSZMask = 0x00000004,
+ FPFastMathModeAllowRecipMask = 0x00000008,
+ FPFastMathModeFastMask = 0x00000010,
+};
+
+enum FPRoundingMode {
+ FPRoundingModeRTE = 0,
+ FPRoundingModeRTZ = 1,
+ FPRoundingModeRTP = 2,
+ FPRoundingModeRTN = 3,
+};
+
+enum LinkageType {
+ LinkageTypeExport = 0,
+ LinkageTypeImport = 1,
+};
+
+enum AccessQualifier {
+ AccessQualifierReadOnly = 0,
+ AccessQualifierWriteOnly = 1,
+ AccessQualifierReadWrite = 2,
+};
+
+enum FunctionParameterAttribute {
+ FunctionParameterAttributeZext = 0,
+ FunctionParameterAttributeSext = 1,
+ FunctionParameterAttributeByVal = 2,
+ FunctionParameterAttributeSret = 3,
+ FunctionParameterAttributeNoAlias = 4,
+ FunctionParameterAttributeNoCapture = 5,
+ FunctionParameterAttributeSVM = 6,
+ FunctionParameterAttributeNoWrite = 7,
+ FunctionParameterAttributeNoReadWrite = 8,
+};
+
+enum Decoration {
+ DecorationPrecisionLow = 0,
+ DecorationPrecisionMedium = 1,
+ DecorationPrecisionHigh = 2,
+ DecorationBlock = 3,
+ DecorationBufferBlock = 4,
+ DecorationRowMajor = 5,
+ DecorationColMajor = 6,
+ DecorationGLSLShared = 7,
+ DecorationGLSLStd140 = 8,
+ DecorationGLSLStd430 = 9,
+ DecorationGLSLPacked = 10,
+ DecorationSmooth = 11,
+ DecorationNoperspective = 12,
+ DecorationFlat = 13,
+ DecorationPatch = 14,
+ DecorationCentroid = 15,
+ DecorationSample = 16,
+ DecorationInvariant = 17,
+ DecorationRestrict = 18,
+ DecorationAliased = 19,
+ DecorationVolatile = 20,
+ DecorationConstant = 21,
+ DecorationCoherent = 22,
+ DecorationNonwritable = 23,
+ DecorationNonreadable = 24,
+ DecorationUniform = 25,
+ DecorationNoStaticUse = 26,
+ DecorationCPacked = 27,
+ DecorationSaturatedConversion = 28,
+ DecorationStream = 29,
+ DecorationLocation = 30,
+ DecorationComponent = 31,
+ DecorationIndex = 32,
+ DecorationBinding = 33,
+ DecorationDescriptorSet = 34,
+ DecorationOffset = 35,
+ DecorationAlignment = 36,
+ DecorationXfbBuffer = 37,
+ DecorationStride = 38,
+ DecorationBuiltIn = 39,
+ DecorationFuncParamAttr = 40,
+ DecorationFPRoundingMode = 41,
+ DecorationFPFastMathMode = 42,
+ DecorationLinkageAttributes = 43,
+ DecorationSpecId = 44,
+};
+
+enum BuiltIn {
+ BuiltInPosition = 0,
+ BuiltInPointSize = 1,
+ BuiltInClipVertex = 2,
+ BuiltInClipDistance = 3,
+ BuiltInCullDistance = 4,
+ BuiltInVertexId = 5,
+ BuiltInInstanceId = 6,
+ BuiltInPrimitiveId = 7,
+ BuiltInInvocationId = 8,
+ BuiltInLayer = 9,
+ BuiltInViewportIndex = 10,
+ BuiltInTessLevelOuter = 11,
+ BuiltInTessLevelInner = 12,
+ BuiltInTessCoord = 13,
+ BuiltInPatchVertices = 14,
+ BuiltInFragCoord = 15,
+ BuiltInPointCoord = 16,
+ BuiltInFrontFacing = 17,
+ BuiltInSampleId = 18,
+ BuiltInSamplePosition = 19,
+ BuiltInSampleMask = 20,
+ BuiltInFragColor = 21,
+ BuiltInFragDepth = 22,
+ BuiltInHelperInvocation = 23,
+ BuiltInNumWorkgroups = 24,
+ BuiltInWorkgroupSize = 25,
+ BuiltInWorkgroupId = 26,
+ BuiltInLocalInvocationId = 27,
+ BuiltInGlobalInvocationId = 28,
+ BuiltInLocalInvocationIndex = 29,
+ BuiltInWorkDim = 30,
+ BuiltInGlobalSize = 31,
+ BuiltInEnqueuedWorkgroupSize = 32,
+ BuiltInGlobalOffset = 33,
+ BuiltInGlobalLinearId = 34,
+ BuiltInWorkgroupLinearId = 35,
+ BuiltInSubgroupSize = 36,
+ BuiltInSubgroupMaxSize = 37,
+ BuiltInNumSubgroups = 38,
+ BuiltInNumEnqueuedSubgroups = 39,
+ BuiltInSubgroupId = 40,
+ BuiltInSubgroupLocalInvocationId = 41,
+};
+
+enum SelectionControlShift {
+ SelectionControlFlattenShift = 0,
+ SelectionControlDontFlattenShift = 1,
+};
+
+enum SelectionControlMask {
+ SelectionControlMaskNone = 0,
+ SelectionControlFlattenMask = 0x00000001,
+ SelectionControlDontFlattenMask = 0x00000002,
+};
+
+enum LoopControlShift {
+ LoopControlUnrollShift = 0,
+ LoopControlDontUnrollShift = 1,
+};
+
+enum LoopControlMask {
+ LoopControlMaskNone = 0,
+ LoopControlUnrollMask = 0x00000001,
+ LoopControlDontUnrollMask = 0x00000002,
+};
+
+enum FunctionControlShift {
+ FunctionControlInlineShift = 0,
+ FunctionControlDontInlineShift = 1,
+ FunctionControlPureShift = 2,
+ FunctionControlConstShift = 3,
+};
+
+enum FunctionControlMask {
+ FunctionControlMaskNone = 0,
+ FunctionControlInlineMask = 0x00000001,
+ FunctionControlDontInlineMask = 0x00000002,
+ FunctionControlPureMask = 0x00000004,
+ FunctionControlConstMask = 0x00000008,
+};
+
+enum MemorySemanticsShift {
+ MemorySemanticsRelaxedShift = 0,
+ MemorySemanticsSequentiallyConsistentShift = 1,
+ MemorySemanticsAcquireShift = 2,
+ MemorySemanticsReleaseShift = 3,
+ MemorySemanticsUniformMemoryShift = 4,
+ MemorySemanticsSubgroupMemoryShift = 5,
+ MemorySemanticsWorkgroupLocalMemoryShift = 6,
+ MemorySemanticsWorkgroupGlobalMemoryShift = 7,
+ MemorySemanticsAtomicCounterMemoryShift = 8,
+ MemorySemanticsImageMemoryShift = 9,
+};
+
+enum MemorySemanticsMask {
+ MemorySemanticsMaskNone = 0,
+ MemorySemanticsRelaxedMask = 0x00000001,
+ MemorySemanticsSequentiallyConsistentMask = 0x00000002,
+ MemorySemanticsAcquireMask = 0x00000004,
+ MemorySemanticsReleaseMask = 0x00000008,
+ MemorySemanticsUniformMemoryMask = 0x00000010,
+ MemorySemanticsSubgroupMemoryMask = 0x00000020,
+ MemorySemanticsWorkgroupLocalMemoryMask = 0x00000040,
+ MemorySemanticsWorkgroupGlobalMemoryMask = 0x00000080,
+ MemorySemanticsAtomicCounterMemoryMask = 0x00000100,
+ MemorySemanticsImageMemoryMask = 0x00000200,
+};
+
+enum MemoryAccessShift {
+ MemoryAccessVolatileShift = 0,
+ MemoryAccessAlignedShift = 1,
+};
+
+enum MemoryAccessMask {
+ MemoryAccessMaskNone = 0,
+ MemoryAccessVolatileMask = 0x00000001,
+ MemoryAccessAlignedMask = 0x00000002,
+};
+
+enum ExecutionScope {
+ ExecutionScopeCrossDevice = 0,
+ ExecutionScopeDevice = 1,
+ ExecutionScopeWorkgroup = 2,
+ ExecutionScopeSubgroup = 3,
+};
+
+enum GroupOperation {
+ GroupOperationReduce = 0,
+ GroupOperationInclusiveScan = 1,
+ GroupOperationExclusiveScan = 2,
+};
+
+enum KernelEnqueueFlags {
+ KernelEnqueueFlagsNoWait = 0,
+ KernelEnqueueFlagsWaitKernel = 1,
+ KernelEnqueueFlagsWaitWorkGroup = 2,
+};
+
+enum KernelProfilingInfoShift {
+ KernelProfilingInfoCmdExecTimeShift = 0,
+};
+
+enum KernelProfilingInfoMask {
+ KernelProfilingInfoMaskNone = 0,
+ KernelProfilingInfoCmdExecTimeMask = 0x00000001,
+};
+
+enum Op {
+ OpNop = 0,
+ OpSource = 1,
+ OpSourceExtension = 2,
+ OpExtension = 3,
+ OpExtInstImport = 4,
+ OpMemoryModel = 5,
+ OpEntryPoint = 6,
+ OpExecutionMode = 7,
+ OpTypeVoid = 8,
+ OpTypeBool = 9,
+ OpTypeInt = 10,
+ OpTypeFloat = 11,
+ OpTypeVector = 12,
+ OpTypeMatrix = 13,
+ OpTypeSampler = 14,
+ OpTypeFilter = 15,
+ OpTypeArray = 16,
+ OpTypeRuntimeArray = 17,
+ OpTypeStruct = 18,
+ OpTypeOpaque = 19,
+ OpTypePointer = 20,
+ OpTypeFunction = 21,
+ OpTypeEvent = 22,
+ OpTypeDeviceEvent = 23,
+ OpTypeReserveId = 24,
+ OpTypeQueue = 25,
+ OpTypePipe = 26,
+ OpConstantTrue = 27,
+ OpConstantFalse = 28,
+ OpConstant = 29,
+ OpConstantComposite = 30,
+ OpConstantSampler = 31,
+ OpConstantNullPointer = 32,
+ OpConstantNullObject = 33,
+ OpSpecConstantTrue = 34,
+ OpSpecConstantFalse = 35,
+ OpSpecConstant = 36,
+ OpSpecConstantComposite = 37,
+ OpVariable = 38,
+ OpVariableArray = 39,
+ OpFunction = 40,
+ OpFunctionParameter = 41,
+ OpFunctionEnd = 42,
+ OpFunctionCall = 43,
+ OpExtInst = 44,
+ OpUndef = 45,
+ OpLoad = 46,
+ OpStore = 47,
+ OpPhi = 48,
+ OpDecorationGroup = 49,
+ OpDecorate = 50,
+ OpMemberDecorate = 51,
+ OpGroupDecorate = 52,
+ OpGroupMemberDecorate = 53,
+ OpName = 54,
+ OpMemberName = 55,
+ OpString = 56,
+ OpLine = 57,
+ OpVectorExtractDynamic = 58,
+ OpVectorInsertDynamic = 59,
+ OpVectorShuffle = 60,
+ OpCompositeConstruct = 61,
+ OpCompositeExtract = 62,
+ OpCompositeInsert = 63,
+ OpCopyObject = 64,
+ OpCopyMemory = 65,
+ OpCopyMemorySized = 66,
+ OpSampler = 67,
+ OpTextureSample = 68,
+ OpTextureSampleDref = 69,
+ OpTextureSampleLod = 70,
+ OpTextureSampleProj = 71,
+ OpTextureSampleGrad = 72,
+ OpTextureSampleOffset = 73,
+ OpTextureSampleProjLod = 74,
+ OpTextureSampleProjGrad = 75,
+ OpTextureSampleLodOffset = 76,
+ OpTextureSampleProjOffset = 77,
+ OpTextureSampleGradOffset = 78,
+ OpTextureSampleProjLodOffset = 79,
+ OpTextureSampleProjGradOffset = 80,
+ OpTextureFetchTexelLod = 81,
+ OpTextureFetchTexelOffset = 82,
+ OpTextureFetchSample = 83,
+ OpTextureFetchTexel = 84,
+ OpTextureGather = 85,
+ OpTextureGatherOffset = 86,
+ OpTextureGatherOffsets = 87,
+ OpTextureQuerySizeLod = 88,
+ OpTextureQuerySize = 89,
+ OpTextureQueryLod = 90,
+ OpTextureQueryLevels = 91,
+ OpTextureQuerySamples = 92,
+ OpAccessChain = 93,
+ OpInBoundsAccessChain = 94,
+ OpSNegate = 95,
+ OpFNegate = 96,
+ OpNot = 97,
+ OpAny = 98,
+ OpAll = 99,
+ OpConvertFToU = 100,
+ OpConvertFToS = 101,
+ OpConvertSToF = 102,
+ OpConvertUToF = 103,
+ OpUConvert = 104,
+ OpSConvert = 105,
+ OpFConvert = 106,
+ OpConvertPtrToU = 107,
+ OpConvertUToPtr = 108,
+ OpPtrCastToGeneric = 109,
+ OpGenericCastToPtr = 110,
+ OpBitcast = 111,
+ OpTranspose = 112,
+ OpIsNan = 113,
+ OpIsInf = 114,
+ OpIsFinite = 115,
+ OpIsNormal = 116,
+ OpSignBitSet = 117,
+ OpLessOrGreater = 118,
+ OpOrdered = 119,
+ OpUnordered = 120,
+ OpArrayLength = 121,
+ OpIAdd = 122,
+ OpFAdd = 123,
+ OpISub = 124,
+ OpFSub = 125,
+ OpIMul = 126,
+ OpFMul = 127,
+ OpUDiv = 128,
+ OpSDiv = 129,
+ OpFDiv = 130,
+ OpUMod = 131,
+ OpSRem = 132,
+ OpSMod = 133,
+ OpFRem = 134,
+ OpFMod = 135,
+ OpVectorTimesScalar = 136,
+ OpMatrixTimesScalar = 137,
+ OpVectorTimesMatrix = 138,
+ OpMatrixTimesVector = 139,
+ OpMatrixTimesMatrix = 140,
+ OpOuterProduct = 141,
+ OpDot = 142,
+ OpShiftRightLogical = 143,
+ OpShiftRightArithmetic = 144,
+ OpShiftLeftLogical = 145,
+ OpLogicalOr = 146,
+ OpLogicalXor = 147,
+ OpLogicalAnd = 148,
+ OpBitwiseOr = 149,
+ OpBitwiseXor = 150,
+ OpBitwiseAnd = 151,
+ OpSelect = 152,
+ OpIEqual = 153,
+ OpFOrdEqual = 154,
+ OpFUnordEqual = 155,
+ OpINotEqual = 156,
+ OpFOrdNotEqual = 157,
+ OpFUnordNotEqual = 158,
+ OpULessThan = 159,
+ OpSLessThan = 160,
+ OpFOrdLessThan = 161,
+ OpFUnordLessThan = 162,
+ OpUGreaterThan = 163,
+ OpSGreaterThan = 164,
+ OpFOrdGreaterThan = 165,
+ OpFUnordGreaterThan = 166,
+ OpULessThanEqual = 167,
+ OpSLessThanEqual = 168,
+ OpFOrdLessThanEqual = 169,
+ OpFUnordLessThanEqual = 170,
+ OpUGreaterThanEqual = 171,
+ OpSGreaterThanEqual = 172,
+ OpFOrdGreaterThanEqual = 173,
+ OpFUnordGreaterThanEqual = 174,
+ OpDPdx = 175,
+ OpDPdy = 176,
+ OpFwidth = 177,
+ OpDPdxFine = 178,
+ OpDPdyFine = 179,
+ OpFwidthFine = 180,
+ OpDPdxCoarse = 181,
+ OpDPdyCoarse = 182,
+ OpFwidthCoarse = 183,
+ OpEmitVertex = 184,
+ OpEndPrimitive = 185,
+ OpEmitStreamVertex = 186,
+ OpEndStreamPrimitive = 187,
+ OpControlBarrier = 188,
+ OpMemoryBarrier = 189,
+ OpImagePointer = 190,
+ OpAtomicInit = 191,
+ OpAtomicLoad = 192,
+ OpAtomicStore = 193,
+ OpAtomicExchange = 194,
+ OpAtomicCompareExchange = 195,
+ OpAtomicCompareExchangeWeak = 196,
+ OpAtomicIIncrement = 197,
+ OpAtomicIDecrement = 198,
+ OpAtomicIAdd = 199,
+ OpAtomicISub = 200,
+ OpAtomicUMin = 201,
+ OpAtomicUMax = 202,
+ OpAtomicAnd = 203,
+ OpAtomicOr = 204,
+ OpAtomicXor = 205,
+ OpLoopMerge = 206,
+ OpSelectionMerge = 207,
+ OpLabel = 208,
+ OpBranch = 209,
+ OpBranchConditional = 210,
+ OpSwitch = 211,
+ OpKill = 212,
+ OpReturn = 213,
+ OpReturnValue = 214,
+ OpUnreachable = 215,
+ OpLifetimeStart = 216,
+ OpLifetimeStop = 217,
+ OpCompileFlag = 218,
+ OpAsyncGroupCopy = 219,
+ OpWaitGroupEvents = 220,
+ OpGroupAll = 221,
+ OpGroupAny = 222,
+ OpGroupBroadcast = 223,
+ OpGroupIAdd = 224,
+ OpGroupFAdd = 225,
+ OpGroupFMin = 226,
+ OpGroupUMin = 227,
+ OpGroupSMin = 228,
+ OpGroupFMax = 229,
+ OpGroupUMax = 230,
+ OpGroupSMax = 231,
+ OpGenericCastToPtrExplicit = 232,
+ OpGenericPtrMemSemantics = 233,
+ OpReadPipe = 234,
+ OpWritePipe = 235,
+ OpReservedReadPipe = 236,
+ OpReservedWritePipe = 237,
+ OpReserveReadPipePackets = 238,
+ OpReserveWritePipePackets = 239,
+ OpCommitReadPipe = 240,
+ OpCommitWritePipe = 241,
+ OpIsValidReserveId = 242,
+ OpGetNumPipePackets = 243,
+ OpGetMaxPipePackets = 244,
+ OpGroupReserveReadPipePackets = 245,
+ OpGroupReserveWritePipePackets = 246,
+ OpGroupCommitReadPipe = 247,
+ OpGroupCommitWritePipe = 248,
+ OpEnqueueMarker = 249,
+ OpEnqueueKernel = 250,
+ OpGetKernelNDrangeSubGroupCount = 251,
+ OpGetKernelNDrangeMaxSubGroupSize = 252,
+ OpGetKernelWorkGroupSize = 253,
+ OpGetKernelPreferredWorkGroupSizeMultiple = 254,
+ OpRetainEvent = 255,
+ OpReleaseEvent = 256,
+ OpCreateUserEvent = 257,
+ OpIsValidEvent = 258,
+ OpSetUserEventStatus = 259,
+ OpCaptureEventProfilingInfo = 260,
+ OpGetDefaultQueue = 261,
+ OpBuildNDRange = 262,
+ OpSatConvertSToU = 263,
+ OpSatConvertUToS = 264,
+ OpAtomicIMin = 265,
+ OpAtomicIMax = 266,
+};
+
+}; // end namespace spv
+
+#endif // #ifdef __cplusplus
+
+
+#ifndef __cplusplus
+
+static const int SpvMagicNumber = 0x07230203;
+static const int SpvVersion = 99;
+
+typedef unsigned int SpvId;
+
+static const unsigned int SpvOpCodeMask = 0xFFFF;
+static const unsigned int SpvWordCountShift = 16;
+
+typedef enum SpvSourceLanguage_ {
+ SpvSourceLanguageUnknown = 0,
+ SpvSourceLanguageESSL = 1,
+ SpvSourceLanguageGLSL = 2,
+ SpvSourceLanguageOpenCL = 3,
+} SpvSourceLanguage;
+
+typedef enum SpvExecutionModel_ {
+ SpvExecutionModelVertex = 0,
+ SpvExecutionModelTessellationControl = 1,
+ SpvExecutionModelTessellationEvaluation = 2,
+ SpvExecutionModelGeometry = 3,
+ SpvExecutionModelFragment = 4,
+ SpvExecutionModelGLCompute = 5,
+ SpvExecutionModelKernel = 6,
+} SpvExecutionModel;
+
+typedef enum SpvAddressingModel_ {
+ SpvAddressingModelLogical = 0,
+ SpvAddressingModelPhysical32 = 1,
+ SpvAddressingModelPhysical64 = 2,
+} SpvAddressingModel;
+
+typedef enum SpvMemoryModel_ {
+ SpvMemoryModelSimple = 0,
+ SpvMemoryModelGLSL450 = 1,
+ SpvMemoryModelOpenCL12 = 2,
+ SpvMemoryModelOpenCL20 = 3,
+ SpvMemoryModelOpenCL21 = 4,
+} SpvMemoryModel;
+
+typedef enum SpvExecutionMode_ {
+ SpvExecutionModeInvocations = 0,
+ SpvExecutionModeSpacingEqual = 1,
+ SpvExecutionModeSpacingFractionalEven = 2,
+ SpvExecutionModeSpacingFractionalOdd = 3,
+ SpvExecutionModeVertexOrderCw = 4,
+ SpvExecutionModeVertexOrderCcw = 5,
+ SpvExecutionModePixelCenterInteger = 6,
+ SpvExecutionModeOriginUpperLeft = 7,
+ SpvExecutionModeEarlyFragmentTests = 8,
+ SpvExecutionModePointMode = 9,
+ SpvExecutionModeXfb = 10,
+ SpvExecutionModeDepthReplacing = 11,
+ SpvExecutionModeDepthAny = 12,
+ SpvExecutionModeDepthGreater = 13,
+ SpvExecutionModeDepthLess = 14,
+ SpvExecutionModeDepthUnchanged = 15,
+ SpvExecutionModeLocalSize = 16,
+ SpvExecutionModeLocalSizeHint = 17,
+ SpvExecutionModeInputPoints = 18,
+ SpvExecutionModeInputLines = 19,
+ SpvExecutionModeInputLinesAdjacency = 20,
+ SpvExecutionModeInputTriangles = 21,
+ SpvExecutionModeInputTrianglesAdjacency = 22,
+ SpvExecutionModeInputQuads = 23,
+ SpvExecutionModeInputIsolines = 24,
+ SpvExecutionModeOutputVertices = 25,
+ SpvExecutionModeOutputPoints = 26,
+ SpvExecutionModeOutputLineStrip = 27,
+ SpvExecutionModeOutputTriangleStrip = 28,
+ SpvExecutionModeVecTypeHint = 29,
+ SpvExecutionModeContractionOff = 30,
+} SpvExecutionMode;
+
+typedef enum SpvStorageClass_ {
+ SpvStorageClassUniformConstant = 0,
+ SpvStorageClassInput = 1,
+ SpvStorageClassUniform = 2,
+ SpvStorageClassOutput = 3,
+ SpvStorageClassWorkgroupLocal = 4,
+ SpvStorageClassWorkgroupGlobal = 5,
+ SpvStorageClassPrivateGlobal = 6,
+ SpvStorageClassFunction = 7,
+ SpvStorageClassGeneric = 8,
+ SpvStorageClassPrivate = 9,
+ SpvStorageClassAtomicCounter = 10,
+} SpvStorageClass;
+
+typedef enum SpvDim_ {
+ SpvDim1D = 0,
+ SpvDim2D = 1,
+ SpvDim3D = 2,
+ SpvDimCube = 3,
+ SpvDimRect = 4,
+ SpvDimBuffer = 5,
+} SpvDim;
+
+typedef enum SpvSamplerAddressingMode_ {
+ SpvSamplerAddressingModeNone = 0,
+ SpvSamplerAddressingModeClampToEdge = 1,
+ SpvSamplerAddressingModeClamp = 2,
+ SpvSamplerAddressingModeRepeat = 3,
+ SpvSamplerAddressingModeRepeatMirrored = 4,
+} SpvSamplerAddressingMode;
+
+typedef enum SpvSamplerFilterMode_ {
+ SpvSamplerFilterModeNearest = 0,
+ SpvSamplerFilterModeLinear = 1,
+} SpvSamplerFilterMode;
+
+typedef enum SpvFPFastMathModeShift_ {
+ SpvFPFastMathModeNotNaNShift = 0,
+ SpvFPFastMathModeNotInfShift = 1,
+ SpvFPFastMathModeNSZShift = 2,
+ SpvFPFastMathModeAllowRecipShift = 3,
+ SpvFPFastMathModeFastShift = 4,
+} SpvFPFastMathModeShift;
+
+typedef enum SpvFPFastMathModeMask_ {
+ SpvFPFastMathModeMaskNone = 0,
+ SpvFPFastMathModeNotNaNMask = 0x00000001,
+ SpvFPFastMathModeNotInfMask = 0x00000002,
+ SpvFPFastMathModeNSZMask = 0x00000004,
+ SpvFPFastMathModeAllowRecipMask = 0x00000008,
+ SpvFPFastMathModeFastMask = 0x00000010,
+} SpvFPFastMathModeMask;
+
+typedef enum SpvFPRoundingMode_ {
+ SpvFPRoundingModeRTE = 0,
+ SpvFPRoundingModeRTZ = 1,
+ SpvFPRoundingModeRTP = 2,
+ SpvFPRoundingModeRTN = 3,
+} SpvFPRoundingMode;
+
+typedef enum SpvLinkageType_ {
+ SpvLinkageTypeExport = 0,
+ SpvLinkageTypeImport = 1,
+} SpvLinkageType;
+
+typedef enum SpvAccessQualifier_ {
+ SpvAccessQualifierReadOnly = 0,
+ SpvAccessQualifierWriteOnly = 1,
+ SpvAccessQualifierReadWrite = 2,
+} SpvAccessQualifier;
+
+typedef enum SpvFunctionParameterAttribute_ {
+ SpvFunctionParameterAttributeZext = 0,
+ SpvFunctionParameterAttributeSext = 1,
+ SpvFunctionParameterAttributeByVal = 2,
+ SpvFunctionParameterAttributeSret = 3,
+ SpvFunctionParameterAttributeNoAlias = 4,
+ SpvFunctionParameterAttributeNoCapture = 5,
+ SpvFunctionParameterAttributeSVM = 6,
+ SpvFunctionParameterAttributeNoWrite = 7,
+ SpvFunctionParameterAttributeNoReadWrite = 8,
+} SpvFunctionParameterAttribute;
+
+typedef enum SpvDecoration_ {
+ SpvDecorationPrecisionLow = 0,
+ SpvDecorationPrecisionMedium = 1,
+ SpvDecorationPrecisionHigh = 2,
+ SpvDecorationBlock = 3,
+ SpvDecorationBufferBlock = 4,
+ SpvDecorationRowMajor = 5,
+ SpvDecorationColMajor = 6,
+ SpvDecorationGLSLShared = 7,
+ SpvDecorationGLSLStd140 = 8,
+ SpvDecorationGLSLStd430 = 9,
+ SpvDecorationGLSLPacked = 10,
+ SpvDecorationSmooth = 11,
+ SpvDecorationNoperspective = 12,
+ SpvDecorationFlat = 13,
+ SpvDecorationPatch = 14,
+ SpvDecorationCentroid = 15,
+ SpvDecorationSample = 16,
+ SpvDecorationInvariant = 17,
+ SpvDecorationRestrict = 18,
+ SpvDecorationAliased = 19,
+ SpvDecorationVolatile = 20,
+ SpvDecorationConstant = 21,
+ SpvDecorationCoherent = 22,
+ SpvDecorationNonwritable = 23,
+ SpvDecorationNonreadable = 24,
+ SpvDecorationUniform = 25,
+ SpvDecorationNoStaticUse = 26,
+ SpvDecorationCPacked = 27,
+ SpvDecorationSaturatedConversion = 28,
+ SpvDecorationStream = 29,
+ SpvDecorationLocation = 30,
+ SpvDecorationComponent = 31,
+ SpvDecorationIndex = 32,
+ SpvDecorationBinding = 33,
+ SpvDecorationDescriptorSet = 34,
+ SpvDecorationOffset = 35,
+ SpvDecorationAlignment = 36,
+ SpvDecorationXfbBuffer = 37,
+ SpvDecorationStride = 38,
+ SpvDecorationBuiltIn = 39,
+ SpvDecorationFuncParamAttr = 40,
+ SpvDecorationFPRoundingMode = 41,
+ SpvDecorationFPFastMathMode = 42,
+ SpvDecorationLinkageAttributes = 43,
+ SpvDecorationSpecId = 44,
+} SpvDecoration;
+
+typedef enum SpvBuiltIn_ {
+ SpvBuiltInPosition = 0,
+ SpvBuiltInPointSize = 1,
+ SpvBuiltInClipVertex = 2,
+ SpvBuiltInClipDistance = 3,
+ SpvBuiltInCullDistance = 4,
+ SpvBuiltInVertexId = 5,
+ SpvBuiltInInstanceId = 6,
+ SpvBuiltInPrimitiveId = 7,
+ SpvBuiltInInvocationId = 8,
+ SpvBuiltInLayer = 9,
+ SpvBuiltInViewportIndex = 10,
+ SpvBuiltInTessLevelOuter = 11,
+ SpvBuiltInTessLevelInner = 12,
+ SpvBuiltInTessCoord = 13,
+ SpvBuiltInPatchVertices = 14,
+ SpvBuiltInFragCoord = 15,
+ SpvBuiltInPointCoord = 16,
+ SpvBuiltInFrontFacing = 17,
+ SpvBuiltInSampleId = 18,
+ SpvBuiltInSamplePosition = 19,
+ SpvBuiltInSampleMask = 20,
+ SpvBuiltInFragColor = 21,
+ SpvBuiltInFragDepth = 22,
+ SpvBuiltInHelperInvocation = 23,
+ SpvBuiltInNumWorkgroups = 24,
+ SpvBuiltInWorkgroupSize = 25,
+ SpvBuiltInWorkgroupId = 26,
+ SpvBuiltInLocalInvocationId = 27,
+ SpvBuiltInGlobalInvocationId = 28,
+ SpvBuiltInLocalInvocationIndex = 29,
+ SpvBuiltInWorkDim = 30,
+ SpvBuiltInGlobalSize = 31,
+ SpvBuiltInEnqueuedWorkgroupSize = 32,
+ SpvBuiltInGlobalOffset = 33,
+ SpvBuiltInGlobalLinearId = 34,
+ SpvBuiltInWorkgroupLinearId = 35,
+ SpvBuiltInSubgroupSize = 36,
+ SpvBuiltInSubgroupMaxSize = 37,
+ SpvBuiltInNumSubgroups = 38,
+ SpvBuiltInNumEnqueuedSubgroups = 39,
+ SpvBuiltInSubgroupId = 40,
+ SpvBuiltInSubgroupLocalInvocationId = 41,
+} SpvBuiltIn;
+
+typedef enum SpvSelectionControlShift_ {
+ SpvSelectionControlFlattenShift = 0,
+ SpvSelectionControlDontFlattenShift = 1,
+} SpvSelectionControlShift;
+
+typedef enum SpvSelectionControlMask_ {
+ SpvSelectionControlMaskNone = 0,
+ SpvSelectionControlFlattenMask = 0x00000001,
+ SpvSelectionControlDontFlattenMask = 0x00000002,
+} SpvSelectionControlMask;
+
+typedef enum SpvLoopControlShift_ {
+ SpvLoopControlUnrollShift = 0,
+ SpvLoopControlDontUnrollShift = 1,
+} SpvLoopControlShift;
+
+typedef enum SpvLoopControlMask_ {
+ SpvLoopControlMaskNone = 0,
+ SpvLoopControlUnrollMask = 0x00000001,
+ SpvLoopControlDontUnrollMask = 0x00000002,
+} SpvLoopControlMask;
+
+typedef enum SpvFunctionControlShift_ {
+ SpvFunctionControlInlineShift = 0,
+ SpvFunctionControlDontInlineShift = 1,
+ SpvFunctionControlPureShift = 2,
+ SpvFunctionControlConstShift = 3,
+} SpvFunctionControlShift;
+
+typedef enum SpvFunctionControlMask_ {
+ SpvFunctionControlMaskNone = 0,
+ SpvFunctionControlInlineMask = 0x00000001,
+ SpvFunctionControlDontInlineMask = 0x00000002,
+ SpvFunctionControlPureMask = 0x00000004,
+ SpvFunctionControlConstMask = 0x00000008,
+} SpvFunctionControlMask;
+
+typedef enum SpvMemorySemanticsShift_ {
+ SpvMemorySemanticsRelaxedShift = 0,
+ SpvMemorySemanticsSequentiallyConsistentShift = 1,
+ SpvMemorySemanticsAcquireShift = 2,
+ SpvMemorySemanticsReleaseShift = 3,
+ SpvMemorySemanticsUniformMemoryShift = 4,
+ SpvMemorySemanticsSubgroupMemoryShift = 5,
+ SpvMemorySemanticsWorkgroupLocalMemoryShift = 6,
+ SpvMemorySemanticsWorkgroupGlobalMemoryShift = 7,
+ SpvMemorySemanticsAtomicCounterMemoryShift = 8,
+ SpvMemorySemanticsImageMemoryShift = 9,
+} SpvMemorySemanticsShift;
+
+typedef enum SpvMemorySemanticsMask_ {
+ SpvMemorySemanticsMaskNone = 0,
+ SpvMemorySemanticsRelaxedMask = 0x00000001,
+ SpvMemorySemanticsSequentiallyConsistentMask = 0x00000002,
+ SpvMemorySemanticsAcquireMask = 0x00000004,
+ SpvMemorySemanticsReleaseMask = 0x00000008,
+ SpvMemorySemanticsUniformMemoryMask = 0x00000010,
+ SpvMemorySemanticsSubgroupMemoryMask = 0x00000020,
+ SpvMemorySemanticsWorkgroupLocalMemoryMask = 0x00000040,
+ SpvMemorySemanticsWorkgroupGlobalMemoryMask = 0x00000080,
+ SpvMemorySemanticsAtomicCounterMemoryMask = 0x00000100,
+ SpvMemorySemanticsImageMemoryMask = 0x00000200,
+} SpvMemorySemanticsMask;
+
+typedef enum SpvMemoryAccessShift_ {
+ SpvMemoryAccessVolatileShift = 0,
+ SpvMemoryAccessAlignedShift = 1,
+} SpvMemoryAccessShift;
+
+typedef enum SpvMemoryAccessMask_ {
+ SpvMemoryAccessMaskNone = 0,
+ SpvMemoryAccessVolatileMask = 0x00000001,
+ SpvMemoryAccessAlignedMask = 0x00000002,
+} SpvMemoryAccessMask;
+
+typedef enum SpvExecutionScope_ {
+ SpvExecutionScopeCrossDevice = 0,
+ SpvExecutionScopeDevice = 1,
+ SpvExecutionScopeWorkgroup = 2,
+ SpvExecutionScopeSubgroup = 3,
+} SpvExecutionScope;
+
+typedef enum SpvGroupOperation_ {
+ SpvGroupOperationReduce = 0,
+ SpvGroupOperationInclusiveScan = 1,
+ SpvGroupOperationExclusiveScan = 2,
+} SpvGroupOperation;
+
+typedef enum SpvKernelEnqueueFlags_ {
+ SpvKernelEnqueueFlagsNoWait = 0,
+ SpvKernelEnqueueFlagsWaitKernel = 1,
+ SpvKernelEnqueueFlagsWaitWorkGroup = 2,
+} SpvKernelEnqueueFlags;
+
+typedef enum SpvKernelProfilingInfoShift_ {
+ SpvKernelProfilingInfoCmdExecTimeShift = 0,
+} SpvKernelProfilingInfoShift;
+
+typedef enum SpvKernelProfilingInfoMask_ {
+ SpvKernelProfilingInfoMaskNone = 0,
+ SpvKernelProfilingInfoCmdExecTimeMask = 0x00000001,
+} SpvKernelProfilingInfoMask;
+
+typedef enum SpvOp_ {
+ SpvOpNop = 0,
+ SpvOpSource = 1,
+ SpvOpSourceExtension = 2,
+ SpvOpExtension = 3,
+ SpvOpExtInstImport = 4,
+ SpvOpMemoryModel = 5,
+ SpvOpEntryPoint = 6,
+ SpvOpExecutionMode = 7,
+ SpvOpTypeVoid = 8,
+ SpvOpTypeBool = 9,
+ SpvOpTypeInt = 10,
+ SpvOpTypeFloat = 11,
+ SpvOpTypeVector = 12,
+ SpvOpTypeMatrix = 13,
+ SpvOpTypeSampler = 14,
+ SpvOpTypeFilter = 15,
+ SpvOpTypeArray = 16,
+ SpvOpTypeRuntimeArray = 17,
+ SpvOpTypeStruct = 18,
+ SpvOpTypeOpaque = 19,
+ SpvOpTypePointer = 20,
+ SpvOpTypeFunction = 21,
+ SpvOpTypeEvent = 22,
+ SpvOpTypeDeviceEvent = 23,
+ SpvOpTypeReserveId = 24,
+ SpvOpTypeQueue = 25,
+ SpvOpTypePipe = 26,
+ SpvOpConstantTrue = 27,
+ SpvOpConstantFalse = 28,
+ SpvOpConstant = 29,
+ SpvOpConstantComposite = 30,
+ SpvOpConstantSampler = 31,
+ SpvOpConstantNullPointer = 32,
+ SpvOpConstantNullObject = 33,
+ SpvOpSpecConstantTrue = 34,
+ SpvOpSpecConstantFalse = 35,
+ SpvOpSpecConstant = 36,
+ SpvOpSpecConstantComposite = 37,
+ SpvOpVariable = 38,
+ SpvOpVariableArray = 39,
+ SpvOpFunction = 40,
+ SpvOpFunctionParameter = 41,
+ SpvOpFunctionEnd = 42,
+ SpvOpFunctionCall = 43,
+ SpvOpExtInst = 44,
+ SpvOpUndef = 45,
+ SpvOpLoad = 46,
+ SpvOpStore = 47,
+ SpvOpPhi = 48,
+ SpvOpDecorationGroup = 49,
+ SpvOpDecorate = 50,
+ SpvOpMemberDecorate = 51,
+ SpvOpGroupDecorate = 52,
+ SpvOpGroupMemberDecorate = 53,
+ SpvOpName = 54,
+ SpvOpMemberName = 55,
+ SpvOpString = 56,
+ SpvOpLine = 57,
+ SpvOpVectorExtractDynamic = 58,
+ SpvOpVectorInsertDynamic = 59,
+ SpvOpVectorShuffle = 60,
+ SpvOpCompositeConstruct = 61,
+ SpvOpCompositeExtract = 62,
+ SpvOpCompositeInsert = 63,
+ SpvOpCopyObject = 64,
+ SpvOpCopyMemory = 65,
+ SpvOpCopyMemorySized = 66,
+ SpvOpSampler = 67,
+ SpvOpTextureSample = 68,
+ SpvOpTextureSampleDref = 69,
+ SpvOpTextureSampleLod = 70,
+ SpvOpTextureSampleProj = 71,
+ SpvOpTextureSampleGrad = 72,
+ SpvOpTextureSampleOffset = 73,
+ SpvOpTextureSampleProjLod = 74,
+ SpvOpTextureSampleProjGrad = 75,
+ SpvOpTextureSampleLodOffset = 76,
+ SpvOpTextureSampleProjOffset = 77,
+ SpvOpTextureSampleGradOffset = 78,
+ SpvOpTextureSampleProjLodOffset = 79,
+ SpvOpTextureSampleProjGradOffset = 80,
+ SpvOpTextureFetchTexelLod = 81,
+ SpvOpTextureFetchTexelOffset = 82,
+ SpvOpTextureFetchSample = 83,
+ SpvOpTextureFetchTexel = 84,
+ SpvOpTextureGather = 85,
+ SpvOpTextureGatherOffset = 86,
+ SpvOpTextureGatherOffsets = 87,
+ SpvOpTextureQuerySizeLod = 88,
+ SpvOpTextureQuerySize = 89,
+ SpvOpTextureQueryLod = 90,
+ SpvOpTextureQueryLevels = 91,
+ SpvOpTextureQuerySamples = 92,
+ SpvOpAccessChain = 93,
+ SpvOpInBoundsAccessChain = 94,
+ SpvOpSNegate = 95,
+ SpvOpFNegate = 96,
+ SpvOpNot = 97,
+ SpvOpAny = 98,
+ SpvOpAll = 99,
+ SpvOpConvertFToU = 100,
+ SpvOpConvertFToS = 101,
+ SpvOpConvertSToF = 102,
+ SpvOpConvertUToF = 103,
+ SpvOpUConvert = 104,
+ SpvOpSConvert = 105,
+ SpvOpFConvert = 106,
+ SpvOpConvertPtrToU = 107,
+ SpvOpConvertUToPtr = 108,
+ SpvOpPtrCastToGeneric = 109,
+ SpvOpGenericCastToPtr = 110,
+ SpvOpBitcast = 111,
+ SpvOpTranspose = 112,
+ SpvOpIsNan = 113,
+ SpvOpIsInf = 114,
+ SpvOpIsFinite = 115,
+ SpvOpIsNormal = 116,
+ SpvOpSignBitSet = 117,
+ SpvOpLessOrGreater = 118,
+ SpvOpOrdered = 119,
+ SpvOpUnordered = 120,
+ SpvOpArrayLength = 121,
+ SpvOpIAdd = 122,
+ SpvOpFAdd = 123,
+ SpvOpISub = 124,
+ SpvOpFSub = 125,
+ SpvOpIMul = 126,
+ SpvOpFMul = 127,
+ SpvOpUDiv = 128,
+ SpvOpSDiv = 129,
+ SpvOpFDiv = 130,
+ SpvOpUMod = 131,
+ SpvOpSRem = 132,
+ SpvOpSMod = 133,
+ SpvOpFRem = 134,
+ SpvOpFMod = 135,
+ SpvOpVectorTimesScalar = 136,
+ SpvOpMatrixTimesScalar = 137,
+ SpvOpVectorTimesMatrix = 138,
+ SpvOpMatrixTimesVector = 139,
+ SpvOpMatrixTimesMatrix = 140,
+ SpvOpOuterProduct = 141,
+ SpvOpDot = 142,
+ SpvOpShiftRightLogical = 143,
+ SpvOpShiftRightArithmetic = 144,
+ SpvOpShiftLeftLogical = 145,
+ SpvOpLogicalOr = 146,
+ SpvOpLogicalXor = 147,
+ SpvOpLogicalAnd = 148,
+ SpvOpBitwiseOr = 149,
+ SpvOpBitwiseXor = 150,
+ SpvOpBitwiseAnd = 151,
+ SpvOpSelect = 152,
+ SpvOpIEqual = 153,
+ SpvOpFOrdEqual = 154,
+ SpvOpFUnordEqual = 155,
+ SpvOpINotEqual = 156,
+ SpvOpFOrdNotEqual = 157,
+ SpvOpFUnordNotEqual = 158,
+ SpvOpULessThan = 159,
+ SpvOpSLessThan = 160,
+ SpvOpFOrdLessThan = 161,
+ SpvOpFUnordLessThan = 162,
+ SpvOpUGreaterThan = 163,
+ SpvOpSGreaterThan = 164,
+ SpvOpFOrdGreaterThan = 165,
+ SpvOpFUnordGreaterThan = 166,
+ SpvOpULessThanEqual = 167,
+ SpvOpSLessThanEqual = 168,
+ SpvOpFOrdLessThanEqual = 169,
+ SpvOpFUnordLessThanEqual = 170,
+ SpvOpUGreaterThanEqual = 171,
+ SpvOpSGreaterThanEqual = 172,
+ SpvOpFOrdGreaterThanEqual = 173,
+ SpvOpFUnordGreaterThanEqual = 174,
+ SpvOpDPdx = 175,
+ SpvOpDPdy = 176,
+ SpvOpFwidth = 177,
+ SpvOpDPdxFine = 178,
+ SpvOpDPdyFine = 179,
+ SpvOpFwidthFine = 180,
+ SpvOpDPdxCoarse = 181,
+ SpvOpDPdyCoarse = 182,
+ SpvOpFwidthCoarse = 183,
+ SpvOpEmitVertex = 184,
+ SpvOpEndPrimitive = 185,
+ SpvOpEmitStreamVertex = 186,
+ SpvOpEndStreamPrimitive = 187,
+ SpvOpControlBarrier = 188,
+ SpvOpMemoryBarrier = 189,
+ SpvOpImagePointer = 190,
+ SpvOpAtomicInit = 191,
+ SpvOpAtomicLoad = 192,
+ SpvOpAtomicStore = 193,
+ SpvOpAtomicExchange = 194,
+ SpvOpAtomicCompareExchange = 195,
+ SpvOpAtomicCompareExchangeWeak = 196,
+ SpvOpAtomicIIncrement = 197,
+ SpvOpAtomicIDecrement = 198,
+ SpvOpAtomicIAdd = 199,
+ SpvOpAtomicISub = 200,
+ SpvOpAtomicUMin = 201,
+ SpvOpAtomicUMax = 202,
+ SpvOpAtomicAnd = 203,
+ SpvOpAtomicOr = 204,
+ SpvOpAtomicXor = 205,
+ SpvOpLoopMerge = 206,
+ SpvOpSelectionMerge = 207,
+ SpvOpLabel = 208,
+ SpvOpBranch = 209,
+ SpvOpBranchConditional = 210,
+ SpvOpSwitch = 211,
+ SpvOpKill = 212,
+ SpvOpReturn = 213,
+ SpvOpReturnValue = 214,
+ SpvOpUnreachable = 215,
+ SpvOpLifetimeStart = 216,
+ SpvOpLifetimeStop = 217,
+ SpvOpCompileFlag = 218,
+ SpvOpAsyncGroupCopy = 219,
+ SpvOpWaitGroupEvents = 220,
+ SpvOpGroupAll = 221,
+ SpvOpGroupAny = 222,
+ SpvOpGroupBroadcast = 223,
+ SpvOpGroupIAdd = 224,
+ SpvOpGroupFAdd = 225,
+ SpvOpGroupFMin = 226,
+ SpvOpGroupUMin = 227,
+ SpvOpGroupSMin = 228,
+ SpvOpGroupFMax = 229,
+ SpvOpGroupUMax = 230,
+ SpvOpGroupSMax = 231,
+ SpvOpGenericCastToPtrExplicit = 232,
+ SpvOpGenericPtrMemSemantics = 233,
+ SpvOpReadPipe = 234,
+ SpvOpWritePipe = 235,
+ SpvOpReservedReadPipe = 236,
+ SpvOpReservedWritePipe = 237,
+ SpvOpReserveReadPipePackets = 238,
+ SpvOpReserveWritePipePackets = 239,
+ SpvOpCommitReadPipe = 240,
+ SpvOpCommitWritePipe = 241,
+ SpvOpIsValidReserveId = 242,
+ SpvOpGetNumPipePackets = 243,
+ SpvOpGetMaxPipePackets = 244,
+ SpvOpGroupReserveReadPipePackets = 245,
+ SpvOpGroupReserveWritePipePackets = 246,
+ SpvOpGroupCommitReadPipe = 247,
+ SpvOpGroupCommitWritePipe = 248,
+ SpvOpEnqueueMarker = 249,
+ SpvOpEnqueueKernel = 250,
+ SpvOpGetKernelNDrangeSubGroupCount = 251,
+ SpvOpGetKernelNDrangeMaxSubGroupSize = 252,
+ SpvOpGetKernelWorkGroupSize = 253,
+ SpvOpGetKernelPreferredWorkGroupSizeMultiple = 254,
+ SpvOpRetainEvent = 255,
+ SpvOpReleaseEvent = 256,
+ SpvOpCreateUserEvent = 257,
+ SpvOpIsValidEvent = 258,
+ SpvOpSetUserEventStatus = 259,
+ SpvOpCaptureEventProfilingInfo = 260,
+ SpvOpGetDefaultQueue = 261,
+ SpvOpBuildNDRange = 262,
+ SpvOpSatConvertSToU = 263,
+ SpvOpSatConvertUToS = 264,
+ SpvOpAtomicIMin = 265,
+ SpvOpAtomicIMax = 266,
+} SpvOp;
+
+#endif // #ifndef __cplusplus
+
+#endif // #ifndef spirv_H
diff --git a/src/glsl/nir/spirv2nir.c b/src/glsl/nir/spirv2nir.c
new file mode 100644
index 00000000000..0eed23fbc3f
--- /dev/null
+++ b/src/glsl/nir/spirv2nir.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jason Ekstrand ([email protected])
+ *
+ */
+
+/*
+ * A simple executable that opens a SPIR-V shader, converts it to NIR, and
+ * dumps out the result. This should be useful for testing the
+ * spirv_to_nir code.
+ */
+
+#include "nir_spirv.h"
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+int main(int argc, char **argv)
+{
+ int fd = open(argv[1], O_RDONLY);
+ off_t len = lseek(fd, 0, SEEK_END);
+
+ assert(len % 4 == 0);
+ size_t word_count = len / 4;
+
+ const void *map = mmap(NULL, len, PROT_READ, MAP_PRIVATE, fd, 0);
+ assert(map != NULL);
+
+ nir_shader *shader = spirv_to_nir(map, word_count, NULL);
+ nir_print_shader(shader, stderr);
+}
diff --git a/src/glsl/nir/spirv_glsl450_to_nir.c b/src/glsl/nir/spirv_glsl450_to_nir.c
new file mode 100644
index 00000000000..3b9d0940aad
--- /dev/null
+++ b/src/glsl/nir/spirv_glsl450_to_nir.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jason Ekstrand ([email protected])
+ *
+ */
+
+#include "spirv_to_nir_private.h"
+
+enum GLSL450Entrypoint {
+ Round = 0,
+ RoundEven = 1,
+ Trunc = 2,
+ Abs = 3,
+ Sign = 4,
+ Floor = 5,
+ Ceil = 6,
+ Fract = 7,
+
+ Radians = 8,
+ Degrees = 9,
+ Sin = 10,
+ Cos = 11,
+ Tan = 12,
+ Asin = 13,
+ Acos = 14,
+ Atan = 15,
+ Sinh = 16,
+ Cosh = 17,
+ Tanh = 18,
+ Asinh = 19,
+ Acosh = 20,
+ Atanh = 21,
+ Atan2 = 22,
+
+ Pow = 23,
+ Exp = 24,
+ Log = 25,
+ Exp2 = 26,
+ Log2 = 27,
+ Sqrt = 28,
+ InverseSqrt = 29,
+
+ Determinant = 30,
+ MatrixInverse = 31,
+
+ Modf = 32, // second argument needs the OpVariable = , not an OpLoad
+ Min = 33,
+ Max = 34,
+ Clamp = 35,
+ Mix = 36,
+ Step = 37,
+ SmoothStep = 38,
+
+ FloatBitsToInt = 39,
+ FloatBitsToUint = 40,
+ IntBitsToFloat = 41,
+ UintBitsToFloat = 42,
+
+ Fma = 43,
+ Frexp = 44,
+ Ldexp = 45,
+
+ PackSnorm4x8 = 46,
+ PackUnorm4x8 = 47,
+ PackSnorm2x16 = 48,
+ PackUnorm2x16 = 49,
+ PackHalf2x16 = 50,
+ PackDouble2x32 = 51,
+ UnpackSnorm2x16 = 52,
+ UnpackUnorm2x16 = 53,
+ UnpackHalf2x16 = 54,
+ UnpackSnorm4x8 = 55,
+ UnpackUnorm4x8 = 56,
+ UnpackDouble2x32 = 57,
+
+ Length = 58,
+ Distance = 59,
+ Cross = 60,
+ Normalize = 61,
+ Ftransform = 62,
+ FaceForward = 63,
+ Reflect = 64,
+ Refract = 65,
+
+ UaddCarry = 66,
+ UsubBorrow = 67,
+ UmulExtended = 68,
+ ImulExtended = 69,
+ BitfieldExtract = 70,
+ BitfieldInsert = 71,
+ BitfieldReverse = 72,
+ BitCount = 73,
+ FindLSB = 74,
+ FindMSB = 75,
+
+ InterpolateAtCentroid = 76,
+ InterpolateAtSample = 77,
+ InterpolateAtOffset = 78,
+
+ Count
+};
+
+static nir_ssa_def*
+build_length(nir_builder *b, nir_ssa_def *vec)
+{
+ switch (vec->num_components) {
+ case 1: return nir_fsqrt(b, nir_fmul(b, vec, vec));
+ case 2: return nir_fsqrt(b, nir_fdot2(b, vec, vec));
+ case 3: return nir_fsqrt(b, nir_fdot3(b, vec, vec));
+ case 4: return nir_fsqrt(b, nir_fdot4(b, vec, vec));
+ default:
+ unreachable("Invalid number of components");
+ }
+}
+
+static void
+handle_glsl450_alu(struct vtn_builder *b, enum GLSL450Entrypoint entrypoint,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
+
+ /* Collect the various SSA sources */
+ unsigned num_inputs = count - 5;
+ nir_ssa_def *src[3];
+ for (unsigned i = 0; i < num_inputs; i++)
+ src[i] = vtn_ssa_value(b, w[i + 5]);
+
+ nir_op op;
+ switch (entrypoint) {
+ case Round: op = nir_op_fround_even; break; /* TODO */
+ case RoundEven: op = nir_op_fround_even; break;
+ case Trunc: op = nir_op_ftrunc; break;
+ case Abs: op = nir_op_fabs; break;
+ case Sign: op = nir_op_fsign; break;
+ case Floor: op = nir_op_ffloor; break;
+ case Ceil: op = nir_op_fceil; break;
+ case Fract: op = nir_op_ffract; break;
+ case Radians:
+ val->ssa = nir_fmul(&b->nb, src[0], nir_imm_float(&b->nb, 0.01745329251));
+ return;
+ case Degrees:
+ val->ssa = nir_fmul(&b->nb, src[0], nir_imm_float(&b->nb, 57.2957795131));
+ return;
+ case Sin: op = nir_op_fsin; break;
+ case Cos: op = nir_op_fcos; break;
+ case Tan:
+ val->ssa = nir_fdiv(&b->nb, nir_fsin(&b->nb, src[0]),
+ nir_fcos(&b->nb, src[0]));
+ return;
+ case Pow: op = nir_op_fpow; break;
+ case Exp2: op = nir_op_fexp2; break;
+ case Log2: op = nir_op_flog2; break;
+ case Sqrt: op = nir_op_fsqrt; break;
+ case InverseSqrt: op = nir_op_frsq; break;
+
+ case Modf: op = nir_op_fmod; break;
+ case Min: op = nir_op_fmin; break;
+ case Max: op = nir_op_fmax; break;
+ case Mix: op = nir_op_flrp; break;
+ case Step:
+ val->ssa = nir_sge(&b->nb, src[1], src[0]);
+ return;
+
+ case FloatBitsToInt:
+ case FloatBitsToUint:
+ case IntBitsToFloat:
+ case UintBitsToFloat:
+ /* Probably going to be removed from the final version of the spec. */
+ val->ssa = src[0];
+ return;
+
+ case Fma: op = nir_op_ffma; break;
+ case Ldexp: op = nir_op_ldexp; break;
+
+ /* Packing/Unpacking functions */
+ case PackSnorm4x8: op = nir_op_pack_snorm_4x8; break;
+ case PackUnorm4x8: op = nir_op_pack_unorm_4x8; break;
+ case PackSnorm2x16: op = nir_op_pack_snorm_2x16; break;
+ case PackUnorm2x16: op = nir_op_pack_unorm_2x16; break;
+ case PackHalf2x16: op = nir_op_pack_half_2x16; break;
+ case UnpackSnorm4x8: op = nir_op_unpack_snorm_4x8; break;
+ case UnpackUnorm4x8: op = nir_op_unpack_unorm_4x8; break;
+ case UnpackSnorm2x16: op = nir_op_unpack_snorm_2x16; break;
+ case UnpackUnorm2x16: op = nir_op_unpack_unorm_2x16; break;
+ case UnpackHalf2x16: op = nir_op_unpack_half_2x16; break;
+
+ case Length:
+ val->ssa = build_length(&b->nb, src[0]);
+ return;
+ case Distance:
+ val->ssa = build_length(&b->nb, nir_fsub(&b->nb, src[0], src[1]));
+ return;
+ case Normalize:
+ val->ssa = nir_fdiv(&b->nb, src[0], build_length(&b->nb, src[0]));
+ return;
+
+ case UaddCarry: op = nir_op_uadd_carry; break;
+ case UsubBorrow: op = nir_op_usub_borrow; break;
+ case BitfieldExtract: op = nir_op_ubitfield_extract; break; /* TODO */
+ case BitfieldInsert: op = nir_op_bitfield_insert; break;
+ case BitfieldReverse: op = nir_op_bitfield_reverse; break;
+ case BitCount: op = nir_op_bit_count; break;
+ case FindLSB: op = nir_op_find_lsb; break;
+ case FindMSB: op = nir_op_ufind_msb; break; /* TODO */
+
+ case Exp:
+ case Log:
+ case Clamp:
+ case Asin:
+ case Acos:
+ case Atan:
+ case Atan2:
+ case Sinh:
+ case Cosh:
+ case Tanh:
+ case Asinh:
+ case Acosh:
+ case Atanh:
+ case SmoothStep:
+ case Frexp:
+ case PackDouble2x32:
+ case UnpackDouble2x32:
+ case Cross:
+ case Ftransform:
+ case FaceForward:
+ case Reflect:
+ case Refract:
+ case UmulExtended:
+ case ImulExtended:
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ nir_alu_instr *instr = nir_alu_instr_create(b->shader, op);
+ nir_ssa_dest_init(&instr->instr, &instr->dest.dest,
+ glsl_get_vector_elements(val->type), val->name);
+ val->ssa = &instr->dest.dest.ssa;
+
+ for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++)
+ instr->src[i].src = nir_src_for_ssa(src[i]);
+
+ nir_builder_instr_insert(&b->nb, &instr->instr);
+}
+
+bool
+vtn_handle_glsl450_instruction(struct vtn_builder *b, uint32_t ext_opcode,
+ const uint32_t *words, unsigned count)
+{
+ switch ((enum GLSL450Entrypoint)ext_opcode) {
+ case Determinant:
+ case MatrixInverse:
+ case InterpolateAtCentroid:
+ case InterpolateAtSample:
+ case InterpolateAtOffset:
+ unreachable("Unhandled opcode");
+
+ default:
+ handle_glsl450_alu(b, (enum GLSL450Entrypoint)ext_opcode, words, count);
+ }
+
+ return true;
+}
diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c
new file mode 100644
index 00000000000..1fc1b8bc5dc
--- /dev/null
+++ b/src/glsl/nir/spirv_to_nir.c
@@ -0,0 +1,1572 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jason Ekstrand ([email protected])
+ *
+ */
+
+#include "spirv_to_nir_private.h"
+#include "nir_vla.h"
+
+nir_ssa_def *
+vtn_ssa_value(struct vtn_builder *b, uint32_t value_id)
+{
+ struct vtn_value *val = vtn_untyped_value(b, value_id);
+ switch (val->value_type) {
+ case vtn_value_type_constant: {
+ assert(glsl_type_is_vector_or_scalar(val->type));
+ unsigned num_components = glsl_get_vector_elements(val->type);
+ nir_load_const_instr *load =
+ nir_load_const_instr_create(b->shader, num_components);
+
+ for (unsigned i = 0; i < num_components; i++)
+ load->value.u[0] = val->constant->value.u[0];
+
+ nir_builder_instr_insert(&b->nb, &load->instr);
+ return &load->def;
+ }
+
+ case vtn_value_type_ssa:
+ return val->ssa;
+ default:
+ unreachable("Invalid type for an SSA value");
+ }
+}
+
+static char *
+vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
+ unsigned word_count)
+{
+ return ralloc_strndup(b, (char *)words, word_count * sizeof(*words));
+}
+
+static const uint32_t *
+vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
+ const uint32_t *end, vtn_instruction_handler handler)
+{
+ const uint32_t *w = start;
+ while (w < end) {
+ SpvOp opcode = w[0] & SpvOpCodeMask;
+ unsigned count = w[0] >> SpvWordCountShift;
+ assert(count >= 1 && w + count <= end);
+
+ if (!handler(b, opcode, w, count))
+ return w;
+
+ w += count;
+ }
+ assert(w == end);
+ return w;
+}
+
+static void
+vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpExtInstImport: {
+ struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
+ if (strcmp((const char *)&w[2], "GLSL.std.450") == 0) {
+ val->ext_handler = vtn_handle_glsl450_instruction;
+ } else {
+ assert(!"Unsupported extension");
+ }
+ break;
+ }
+
+ case SpvOpExtInst: {
+ struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
+ bool handled = val->ext_handler(b, w[4], w, count);
+ (void)handled;
+ assert(handled);
+ break;
+ }
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+static void
+_foreach_decoration_helper(struct vtn_builder *b,
+ struct vtn_value *base_value,
+ struct vtn_value *value,
+ vtn_decoration_foreach_cb cb, void *data)
+{
+ for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
+ if (dec->group) {
+ assert(dec->group->value_type == vtn_value_type_decoration_group);
+ _foreach_decoration_helper(b, base_value, dec->group, cb, data);
+ } else {
+ cb(b, base_value, dec, data);
+ }
+ }
+}
+
+/** Iterates (recursively if needed) over all of the decorations on a value
+ *
+ * This function iterates over all of the decorations applied to a given
+ * value. If it encounters a decoration group, it recurses into the group
+ * and iterates over all of those decorations as well.
+ */
+void
+vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
+ vtn_decoration_foreach_cb cb, void *data)
+{
+ _foreach_decoration_helper(b, value, value, cb, data);
+}
+
+static void
+vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpDecorationGroup:
+ vtn_push_value(b, w[1], vtn_value_type_undef);
+ break;
+
+ case SpvOpDecorate: {
+ struct vtn_value *val = &b->values[w[1]];
+
+ struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
+ dec->decoration = w[2];
+ dec->literals = &w[3];
+
+ /* Link into the list */
+ dec->next = val->decoration;
+ val->decoration = dec;
+ break;
+ }
+
+ case SpvOpGroupDecorate: {
+ struct vtn_value *group = &b->values[w[1]];
+ assert(group->value_type == vtn_value_type_decoration_group);
+
+ for (unsigned i = 2; i < count; i++) {
+ struct vtn_value *val = &b->values[w[i]];
+ struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
+ dec->group = group;
+
+ /* Link into the list */
+ dec->next = val->decoration;
+ val->decoration = dec;
+ }
+ break;
+ }
+
+ case SpvOpGroupMemberDecorate:
+ assert(!"Bad instruction. Khronos Bug #13513");
+ break;
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+static const struct glsl_type *
+vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *args, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpTypeVoid:
+ return glsl_void_type();
+ case SpvOpTypeBool:
+ return glsl_bool_type();
+ case SpvOpTypeInt:
+ return glsl_int_type();
+ case SpvOpTypeFloat:
+ return glsl_float_type();
+
+ case SpvOpTypeVector: {
+ const struct glsl_type *base =
+ vtn_value(b, args[0], vtn_value_type_type)->type;
+ unsigned elems = args[1];
+
+ assert(glsl_type_is_scalar(base));
+ return glsl_vector_type(glsl_get_base_type(base), elems);
+ }
+
+ case SpvOpTypeMatrix: {
+ const struct glsl_type *base =
+ vtn_value(b, args[0], vtn_value_type_type)->type;
+ unsigned columns = args[1];
+
+ assert(glsl_type_is_vector(base));
+ return glsl_matrix_type(glsl_get_base_type(base),
+ glsl_get_vector_elements(base),
+ columns);
+ }
+
+ case SpvOpTypeArray:
+ return glsl_array_type(b->values[args[0]].type, args[1]);
+
+ case SpvOpTypeStruct: {
+ NIR_VLA(struct glsl_struct_field, fields, count);
+ for (unsigned i = 0; i < count; i++) {
+ /* TODO: Handle decorators */
+ fields[i].type = vtn_value(b, args[i], vtn_value_type_type)->type;
+ fields[i].name = ralloc_asprintf(b, "field%d", i);
+ fields[i].location = -1;
+ fields[i].interpolation = 0;
+ fields[i].centroid = 0;
+ fields[i].sample = 0;
+ fields[i].matrix_layout = 2;
+ fields[i].stream = -1;
+ }
+ return glsl_struct_type(fields, count, "struct");
+ }
+
+ case SpvOpTypeFunction: {
+ const struct glsl_type *return_type = b->values[args[0]].type;
+ NIR_VLA(struct glsl_function_param, params, count - 1);
+ for (unsigned i = 1; i < count; i++) {
+ params[i - 1].type = vtn_value(b, args[i], vtn_value_type_type)->type;
+
+ /* FIXME: */
+ params[i - 1].in = true;
+ params[i - 1].out = true;
+ }
+ return glsl_function_type(return_type, params, count - 1);
+ }
+
+ case SpvOpTypePointer:
+ /* FIXME: For now, we'll just do the really lame thing and return
+ * the same type. The validator should ensure that the proper number
+ * of dereferences happen
+ */
+ return vtn_value(b, args[1], vtn_value_type_type)->type;
+
+ case SpvOpTypeSampler: {
+ const struct glsl_type *sampled_type =
+ vtn_value(b, args[0], vtn_value_type_type)->type;
+
+ assert(glsl_type_is_vector_or_scalar(sampled_type));
+
+ enum glsl_sampler_dim dim;
+ switch ((SpvDim)args[1]) {
+ case SpvDim1D: dim = GLSL_SAMPLER_DIM_1D; break;
+ case SpvDim2D: dim = GLSL_SAMPLER_DIM_2D; break;
+ case SpvDim3D: dim = GLSL_SAMPLER_DIM_3D; break;
+ case SpvDimCube: dim = GLSL_SAMPLER_DIM_CUBE; break;
+ case SpvDimRect: dim = GLSL_SAMPLER_DIM_RECT; break;
+ case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
+ default:
+ unreachable("Invalid SPIR-V Sampler dimension");
+ }
+
+ /* TODO: Handle the various texture image/filter options */
+ (void)args[2];
+
+ bool is_array = args[3];
+ bool is_shadow = args[4];
+
+ assert(args[5] == 0 && "FIXME: Handl multi-sampled textures");
+
+ return glsl_sampler_type(dim, is_shadow, is_array,
+ glsl_get_base_type(sampled_type));
+ }
+
+ case SpvOpTypeRuntimeArray:
+ case SpvOpTypeOpaque:
+ case SpvOpTypeEvent:
+ case SpvOpTypeDeviceEvent:
+ case SpvOpTypeReserveId:
+ case SpvOpTypeQueue:
+ case SpvOpTypePipe:
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+static void
+vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
+ val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
+ val->constant = ralloc(b, nir_constant);
+ switch (opcode) {
+ case SpvOpConstantTrue:
+ assert(val->type == glsl_bool_type());
+ val->constant->value.u[0] = NIR_TRUE;
+ break;
+ case SpvOpConstantFalse:
+ assert(val->type == glsl_bool_type());
+ val->constant->value.u[0] = NIR_FALSE;
+ break;
+ case SpvOpConstant:
+ assert(glsl_type_is_scalar(val->type));
+ val->constant->value.u[0] = w[3];
+ break;
+ case SpvOpConstantComposite: {
+ unsigned elem_count = count - 3;
+ nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
+ for (unsigned i = 0; i < elem_count; i++)
+ elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant;
+
+ switch (glsl_get_base_type(val->type)) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_BOOL:
+ if (glsl_type_is_matrix(val->type)) {
+ unsigned rows = glsl_get_vector_elements(val->type);
+ assert(glsl_get_matrix_columns(val->type) == elem_count);
+ for (unsigned i = 0; i < elem_count; i++)
+ for (unsigned j = 0; j < rows; j++)
+ val->constant->value.u[rows * i + j] = elems[i]->value.u[j];
+ } else {
+ assert(glsl_type_is_vector(val->type));
+ assert(glsl_get_vector_elements(val->type) == elem_count);
+ for (unsigned i = 0; i < elem_count; i++)
+ val->constant->value.u[i] = elems[i]->value.u[0];
+ }
+ ralloc_free(elems);
+ break;
+
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_ARRAY:
+ ralloc_steal(val->constant, elems);
+ val->constant->elements = elems;
+ break;
+
+ default:
+ unreachable("Unsupported type for constants");
+ }
+ break;
+ }
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+static void
+var_decoration_cb(struct vtn_builder *b, struct vtn_value *val,
+ const struct vtn_decoration *dec, void *void_var)
+{
+ assert(val->value_type == vtn_value_type_deref);
+ assert(val->deref->deref.child == NULL);
+ assert(val->deref->var == void_var);
+
+ nir_variable *var = void_var;
+ switch (dec->decoration) {
+ case SpvDecorationPrecisionLow:
+ case SpvDecorationPrecisionMedium:
+ case SpvDecorationPrecisionHigh:
+ break; /* FIXME: Do nothing with these for now. */
+ case SpvDecorationSmooth:
+ var->data.interpolation = INTERP_QUALIFIER_SMOOTH;
+ break;
+ case SpvDecorationNoperspective:
+ var->data.interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
+ break;
+ case SpvDecorationFlat:
+ var->data.interpolation = INTERP_QUALIFIER_FLAT;
+ break;
+ case SpvDecorationCentroid:
+ var->data.centroid = true;
+ break;
+ case SpvDecorationSample:
+ var->data.sample = true;
+ break;
+ case SpvDecorationInvariant:
+ var->data.invariant = true;
+ break;
+ case SpvDecorationConstant:
+ assert(var->constant_initializer != NULL);
+ var->data.read_only = true;
+ break;
+ case SpvDecorationNonwritable:
+ var->data.read_only = true;
+ break;
+ case SpvDecorationLocation:
+ var->data.explicit_location = true;
+ var->data.location = dec->literals[0];
+ break;
+ case SpvDecorationComponent:
+ var->data.location_frac = dec->literals[0];
+ break;
+ case SpvDecorationIndex:
+ var->data.explicit_index = true;
+ var->data.index = dec->literals[0];
+ break;
+ case SpvDecorationBinding:
+ var->data.explicit_binding = true;
+ var->data.binding = dec->literals[0];
+ break;
+ case SpvDecorationBlock:
+ case SpvDecorationBufferBlock:
+ case SpvDecorationRowMajor:
+ case SpvDecorationColMajor:
+ case SpvDecorationGLSLShared:
+ case SpvDecorationGLSLStd140:
+ case SpvDecorationGLSLStd430:
+ case SpvDecorationGLSLPacked:
+ case SpvDecorationPatch:
+ case SpvDecorationRestrict:
+ case SpvDecorationAliased:
+ case SpvDecorationVolatile:
+ case SpvDecorationCoherent:
+ case SpvDecorationNonreadable:
+ case SpvDecorationUniform:
+ /* This is really nice but we have no use for it right now. */
+ case SpvDecorationNoStaticUse:
+ case SpvDecorationCPacked:
+ case SpvDecorationSaturatedConversion:
+ case SpvDecorationStream:
+ case SpvDecorationDescriptorSet:
+ case SpvDecorationOffset:
+ case SpvDecorationAlignment:
+ case SpvDecorationXfbBuffer:
+ case SpvDecorationStride:
+ case SpvDecorationBuiltIn:
+ case SpvDecorationFuncParamAttr:
+ case SpvDecorationFPRoundingMode:
+ case SpvDecorationFPFastMathMode:
+ case SpvDecorationLinkageAttributes:
+ case SpvDecorationSpecId:
+ default:
+ unreachable("Unhandled variable decoration");
+ }
+}
+
+static void
+vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpVariable: {
+ const struct glsl_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type;
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_deref);
+
+ nir_variable *var = ralloc(b->shader, nir_variable);
+
+ var->type = type;
+ var->name = ralloc_strdup(var, val->name);
+
+ switch ((SpvStorageClass)w[3]) {
+ case SpvStorageClassUniformConstant:
+ var->data.mode = nir_var_uniform;
+ var->data.read_only = true;
+ break;
+ case SpvStorageClassInput:
+ var->data.mode = nir_var_shader_in;
+ var->data.read_only = true;
+ break;
+ case SpvStorageClassOutput:
+ var->data.mode = nir_var_shader_out;
+ break;
+ case SpvStorageClassPrivateGlobal:
+ var->data.mode = nir_var_global;
+ break;
+ case SpvStorageClassFunction:
+ var->data.mode = nir_var_local;
+ break;
+ case SpvStorageClassUniform:
+ case SpvStorageClassWorkgroupLocal:
+ case SpvStorageClassWorkgroupGlobal:
+ case SpvStorageClassGeneric:
+ case SpvStorageClassPrivate:
+ case SpvStorageClassAtomicCounter:
+ default:
+ unreachable("Unhandled variable storage class");
+ }
+
+ if (count > 4) {
+ assert(count == 5);
+ var->constant_initializer =
+ vtn_value(b, w[4], vtn_value_type_constant)->constant;
+ }
+
+ if (var->data.mode == nir_var_local) {
+ exec_list_push_tail(&b->impl->locals, &var->node);
+ } else {
+ exec_list_push_tail(&b->shader->globals, &var->node);
+ }
+
+ val->deref = nir_deref_var_create(b->shader, var);
+
+ vtn_foreach_decoration(b, val, var_decoration_cb, var);
+ break;
+ }
+
+ case SpvOpAccessChain:
+ case SpvOpInBoundsAccessChain: {
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_deref);
+ nir_deref_var *base = vtn_value(b, w[3], vtn_value_type_deref)->deref;
+ val->deref = nir_deref_as_var(nir_copy_deref(b, &base->deref));
+
+ nir_deref *tail = &val->deref->deref;
+ while (tail->child)
+ tail = tail->child;
+
+ for (unsigned i = 0; i < count - 4; i++) {
+ assert(w[i + 4] < b->value_id_bound);
+ struct vtn_value *idx_val = &b->values[w[i + 4]];
+
+ enum glsl_base_type base_type = glsl_get_base_type(tail->type);
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_ARRAY: {
+ nir_deref_array *deref_arr = nir_deref_array_create(b);
+ if (base_type == GLSL_TYPE_ARRAY) {
+ deref_arr->deref.type = glsl_get_array_element(tail->type);
+ } else if (glsl_type_is_matrix(tail->type)) {
+ deref_arr->deref.type = glsl_get_column_type(tail->type);
+ } else {
+ assert(glsl_type_is_vector(tail->type));
+ deref_arr->deref.type = glsl_scalar_type(base_type);
+ }
+
+ if (idx_val->value_type == vtn_value_type_constant) {
+ unsigned idx = idx_val->constant->value.u[0];
+ deref_arr->deref_array_type = nir_deref_array_type_direct;
+ deref_arr->base_offset = idx;
+ } else {
+ assert(idx_val->value_type == vtn_value_type_ssa);
+ deref_arr->deref_array_type = nir_deref_array_type_indirect;
+ deref_arr->base_offset = 0;
+ deref_arr->indirect = nir_src_for_ssa(vtn_ssa_value(b, w[1]));
+ }
+ tail->child = &deref_arr->deref;
+ break;
+ }
+
+ case GLSL_TYPE_STRUCT: {
+ assert(idx_val->value_type == vtn_value_type_constant);
+ unsigned idx = idx_val->constant->value.u[0];
+ nir_deref_struct *deref_struct = nir_deref_struct_create(b, idx);
+ deref_struct->deref.type = glsl_get_struct_field(tail->type, idx);
+ tail->child = &deref_struct->deref;
+ break;
+ }
+ default:
+ unreachable("Invalid type for deref");
+ }
+ tail = tail->child;
+ }
+ break;
+ }
+
+ case SpvOpCopyMemory: {
+ nir_deref_var *dest = vtn_value(b, w[1], vtn_value_type_deref)->deref;
+ nir_deref_var *src = vtn_value(b, w[2], vtn_value_type_deref)->deref;
+
+ nir_intrinsic_instr *copy =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_copy_var);
+ copy->variables[0] = nir_deref_as_var(nir_copy_deref(copy, &dest->deref));
+ copy->variables[1] = nir_deref_as_var(nir_copy_deref(copy, &src->deref));
+
+ nir_builder_instr_insert(&b->nb, &copy->instr);
+ break;
+ }
+
+ case SpvOpLoad: {
+ nir_deref_var *src = vtn_value(b, w[3], vtn_value_type_deref)->deref;
+ const struct glsl_type *src_type = nir_deref_tail(&src->deref)->type;
+
+ if (glsl_get_base_type(src_type) == GLSL_TYPE_SAMPLER) {
+ vtn_push_value(b, w[2], vtn_value_type_deref)->deref = src;
+ return;
+ }
+
+ assert(glsl_type_is_vector_or_scalar(src_type));
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_var);
+ load->variables[0] = nir_deref_as_var(nir_copy_deref(load, &src->deref));
+ load->num_components = glsl_get_vector_elements(src_type);
+ nir_ssa_dest_init(&load->instr, &load->dest, load->num_components,
+ val->name);
+
+ nir_builder_instr_insert(&b->nb, &load->instr);
+ val->type = src_type;
+
+ if (src->var->data.mode == nir_var_uniform &&
+ glsl_get_base_type(src_type) == GLSL_TYPE_BOOL) {
+ /* Uniform boolean loads need to be fixed up since they're defined
+ * to be zero/nonzero rather than NIR_FALSE/NIR_TRUE.
+ */
+ val->ssa = nir_ine(&b->nb, &load->dest.ssa, nir_imm_int(&b->nb, 0));
+ } else {
+ val->ssa = &load->dest.ssa;
+ }
+ break;
+ }
+
+ case SpvOpStore: {
+ nir_deref_var *dest = vtn_value(b, w[1], vtn_value_type_deref)->deref;
+ const struct glsl_type *dest_type = nir_deref_tail(&dest->deref)->type;
+ struct vtn_value *src_val = vtn_untyped_value(b, w[2]);
+ if (src_val->value_type == vtn_value_type_ssa) {
+ assert(glsl_type_is_vector_or_scalar(dest_type));
+ nir_intrinsic_instr *store =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_var);
+ store->src[0] = nir_src_for_ssa(src_val->ssa);
+ store->variables[0] = nir_deref_as_var(nir_copy_deref(store, &dest->deref));
+ store->num_components = glsl_get_vector_elements(dest_type);
+
+ nir_builder_instr_insert(&b->nb, &store->instr);
+ } else {
+ assert(src_val->value_type == vtn_value_type_constant);
+
+ nir_variable *const_tmp = rzalloc(b->shader, nir_variable);
+ const_tmp->type = dest_type;
+ const_tmp->name = "const_temp";
+ const_tmp->data.mode = nir_var_local;
+ const_tmp->data.read_only = true;
+ exec_list_push_tail(&b->impl->locals, &const_tmp->node);
+
+ nir_intrinsic_instr *copy =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_copy_var);
+ copy->variables[0] = nir_deref_as_var(nir_copy_deref(copy, &dest->deref));
+ copy->variables[1] = nir_deref_var_create(copy, const_tmp);
+
+ nir_builder_instr_insert(&b->nb, &copy->instr);
+ }
+ break;
+ }
+
+ case SpvOpVariableArray:
+ case SpvOpCopyMemorySized:
+ case SpvOpArrayLength:
+ case SpvOpImagePointer:
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+static void
+vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ unreachable("Unhandled opcode");
+}
+
+static nir_tex_src
+vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
+{
+ nir_tex_src src;
+ src.src = nir_src_for_ssa(vtn_value(b, index, vtn_value_type_ssa)->ssa);
+ src.src_type = type;
+ return src;
+}
+
+static void
+vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ nir_deref_var *sampler = vtn_value(b, w[3], vtn_value_type_deref)->deref;
+
+ nir_tex_src srcs[8]; /* 8 should be enough */
+ nir_tex_src *p = srcs;
+
+ unsigned coord_components = 0;
+ switch (opcode) {
+ case SpvOpTextureSample:
+ case SpvOpTextureSampleDref:
+ case SpvOpTextureSampleLod:
+ case SpvOpTextureSampleProj:
+ case SpvOpTextureSampleGrad:
+ case SpvOpTextureSampleOffset:
+ case SpvOpTextureSampleProjLod:
+ case SpvOpTextureSampleProjGrad:
+ case SpvOpTextureSampleLodOffset:
+ case SpvOpTextureSampleProjOffset:
+ case SpvOpTextureSampleGradOffset:
+ case SpvOpTextureSampleProjLodOffset:
+ case SpvOpTextureSampleProjGradOffset:
+ case SpvOpTextureFetchTexelLod:
+ case SpvOpTextureFetchTexelOffset:
+ case SpvOpTextureFetchSample:
+ case SpvOpTextureFetchTexel:
+ case SpvOpTextureGather:
+ case SpvOpTextureGatherOffset:
+ case SpvOpTextureGatherOffsets:
+ case SpvOpTextureQueryLod: {
+ /* All these types have the coordinate as their first real argument */
+ struct vtn_value *coord = vtn_value(b, w[4], vtn_value_type_ssa);
+ coord_components = glsl_get_vector_elements(coord->type);
+ p->src = nir_src_for_ssa(coord->ssa);
+ p->src_type = nir_tex_src_coord;
+ p++;
+ break;
+ }
+ default:
+ break;
+ }
+
+ nir_texop texop;
+ switch (opcode) {
+ case SpvOpTextureSample:
+ texop = nir_texop_tex;
+
+ if (count == 6) {
+ texop = nir_texop_txb;
+ *p++ = vtn_tex_src(b, w[5], nir_tex_src_bias);
+ }
+ break;
+
+ case SpvOpTextureSampleDref:
+ case SpvOpTextureSampleLod:
+ case SpvOpTextureSampleProj:
+ case SpvOpTextureSampleGrad:
+ case SpvOpTextureSampleOffset:
+ case SpvOpTextureSampleProjLod:
+ case SpvOpTextureSampleProjGrad:
+ case SpvOpTextureSampleLodOffset:
+ case SpvOpTextureSampleProjOffset:
+ case SpvOpTextureSampleGradOffset:
+ case SpvOpTextureSampleProjLodOffset:
+ case SpvOpTextureSampleProjGradOffset:
+ case SpvOpTextureFetchTexelLod:
+ case SpvOpTextureFetchTexelOffset:
+ case SpvOpTextureFetchSample:
+ case SpvOpTextureFetchTexel:
+ case SpvOpTextureGather:
+ case SpvOpTextureGatherOffset:
+ case SpvOpTextureGatherOffsets:
+ case SpvOpTextureQuerySizeLod:
+ case SpvOpTextureQuerySize:
+ case SpvOpTextureQueryLod:
+ case SpvOpTextureQueryLevels:
+ case SpvOpTextureQuerySamples:
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs);
+
+ const struct glsl_type *sampler_type = nir_deref_tail(&sampler->deref)->type;
+ instr->sampler_dim = glsl_get_sampler_dim(sampler_type);
+
+ switch (glsl_get_sampler_result_type(sampler_type)) {
+ case GLSL_TYPE_FLOAT: instr->dest_type = nir_type_float; break;
+ case GLSL_TYPE_INT: instr->dest_type = nir_type_int; break;
+ case GLSL_TYPE_UINT: instr->dest_type = nir_type_unsigned; break;
+ case GLSL_TYPE_BOOL: instr->dest_type = nir_type_bool; break;
+ default:
+ unreachable("Invalid base type for sampler result");
+ }
+
+ instr->op = texop;
+ memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
+ instr->coord_components = coord_components;
+ instr->is_array = glsl_sampler_type_is_array(sampler_type);
+ instr->is_shadow = glsl_sampler_type_is_shadow(sampler_type);
+
+ instr->sampler = sampler;
+
+ nir_ssa_dest_init(&instr->instr, &instr->dest, 4, NULL);
+ val->ssa = &instr->dest.ssa;
+
+ nir_builder_instr_insert(&b->nb, &instr->instr);
+}
+
+static void
+vtn_handle_matrix_alu(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ unreachable("Matrix math not handled");
+}
+
+static void
+vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
+
+ /* Collect the various SSA sources */
+ unsigned num_inputs = count - 3;
+ nir_ssa_def *src[4];
+ for (unsigned i = 0; i < num_inputs; i++)
+ src[i] = vtn_ssa_value(b, w[i + 3]);
+
+ /* Indicates that the first two arguments should be swapped. This is
+ * used for implementing greater-than and less-than-or-equal.
+ */
+ bool swap = false;
+
+ nir_op op;
+ switch (opcode) {
+ /* Basic ALU operations */
+ case SpvOpSNegate: op = nir_op_ineg; break;
+ case SpvOpFNegate: op = nir_op_fneg; break;
+ case SpvOpNot: op = nir_op_inot; break;
+
+ case SpvOpAny:
+ switch (src[0]->num_components) {
+ case 1: op = nir_op_imov; break;
+ case 2: op = nir_op_bany2; break;
+ case 3: op = nir_op_bany3; break;
+ case 4: op = nir_op_bany4; break;
+ }
+ break;
+
+ case SpvOpAll:
+ switch (src[0]->num_components) {
+ case 1: op = nir_op_imov; break;
+ case 2: op = nir_op_ball2; break;
+ case 3: op = nir_op_ball3; break;
+ case 4: op = nir_op_ball4; break;
+ }
+ break;
+
+ case SpvOpIAdd: op = nir_op_iadd; break;
+ case SpvOpFAdd: op = nir_op_fadd; break;
+ case SpvOpISub: op = nir_op_isub; break;
+ case SpvOpFSub: op = nir_op_fsub; break;
+ case SpvOpIMul: op = nir_op_imul; break;
+ case SpvOpFMul: op = nir_op_fmul; break;
+ case SpvOpUDiv: op = nir_op_udiv; break;
+ case SpvOpSDiv: op = nir_op_idiv; break;
+ case SpvOpFDiv: op = nir_op_fdiv; break;
+ case SpvOpUMod: op = nir_op_umod; break;
+ case SpvOpSMod: op = nir_op_umod; break; /* FIXME? */
+ case SpvOpFMod: op = nir_op_fmod; break;
+
+ case SpvOpDot:
+ assert(src[0]->num_components == src[1]->num_components);
+ switch (src[0]->num_components) {
+ case 1: op = nir_op_fmul; break;
+ case 2: op = nir_op_fdot2; break;
+ case 3: op = nir_op_fdot3; break;
+ case 4: op = nir_op_fdot4; break;
+ }
+ break;
+
+ case SpvOpShiftRightLogical: op = nir_op_ushr; break;
+ case SpvOpShiftRightArithmetic: op = nir_op_ishr; break;
+ case SpvOpShiftLeftLogical: op = nir_op_ishl; break;
+ case SpvOpLogicalOr: op = nir_op_ior; break;
+ case SpvOpLogicalXor: op = nir_op_ixor; break;
+ case SpvOpLogicalAnd: op = nir_op_iand; break;
+ case SpvOpBitwiseOr: op = nir_op_ior; break;
+ case SpvOpBitwiseXor: op = nir_op_ixor; break;
+ case SpvOpBitwiseAnd: op = nir_op_iand; break;
+ case SpvOpSelect: op = nir_op_bcsel; break;
+ case SpvOpIEqual: op = nir_op_ieq; break;
+
+ /* Comparisons: (TODO: How do we want to handled ordered/unordered?) */
+ case SpvOpFOrdEqual: op = nir_op_feq; break;
+ case SpvOpFUnordEqual: op = nir_op_feq; break;
+ case SpvOpINotEqual: op = nir_op_ine; break;
+ case SpvOpFOrdNotEqual: op = nir_op_fne; break;
+ case SpvOpFUnordNotEqual: op = nir_op_fne; break;
+ case SpvOpULessThan: op = nir_op_ult; break;
+ case SpvOpSLessThan: op = nir_op_ilt; break;
+ case SpvOpFOrdLessThan: op = nir_op_flt; break;
+ case SpvOpFUnordLessThan: op = nir_op_flt; break;
+ case SpvOpUGreaterThan: op = nir_op_ult; swap = true; break;
+ case SpvOpSGreaterThan: op = nir_op_ilt; swap = true; break;
+ case SpvOpFOrdGreaterThan: op = nir_op_flt; swap = true; break;
+ case SpvOpFUnordGreaterThan: op = nir_op_flt; swap = true; break;
+ case SpvOpULessThanEqual: op = nir_op_uge; swap = true; break;
+ case SpvOpSLessThanEqual: op = nir_op_ige; swap = true; break;
+ case SpvOpFOrdLessThanEqual: op = nir_op_fge; swap = true; break;
+ case SpvOpFUnordLessThanEqual: op = nir_op_fge; swap = true; break;
+ case SpvOpUGreaterThanEqual: op = nir_op_uge; break;
+ case SpvOpSGreaterThanEqual: op = nir_op_ige; break;
+ case SpvOpFOrdGreaterThanEqual: op = nir_op_fge; break;
+ case SpvOpFUnordGreaterThanEqual:op = nir_op_fge; break;
+
+ /* Conversions: */
+ case SpvOpConvertFToU: op = nir_op_f2u; break;
+ case SpvOpConvertFToS: op = nir_op_f2i; break;
+ case SpvOpConvertSToF: op = nir_op_i2f; break;
+ case SpvOpConvertUToF: op = nir_op_u2f; break;
+ case SpvOpBitcast: op = nir_op_imov; break;
+ case SpvOpUConvert:
+ case SpvOpSConvert:
+ op = nir_op_imov; /* TODO: NIR is 32-bit only; these are no-ops. */
+ break;
+ case SpvOpFConvert:
+ op = nir_op_fmov;
+ break;
+
+ /* Derivatives: */
+ case SpvOpDPdx: op = nir_op_fddx; break;
+ case SpvOpDPdy: op = nir_op_fddy; break;
+ case SpvOpDPdxFine: op = nir_op_fddx_fine; break;
+ case SpvOpDPdyFine: op = nir_op_fddy_fine; break;
+ case SpvOpDPdxCoarse: op = nir_op_fddx_coarse; break;
+ case SpvOpDPdyCoarse: op = nir_op_fddy_coarse; break;
+ case SpvOpFwidth:
+ val->ssa = nir_fadd(&b->nb,
+ nir_fabs(&b->nb, nir_fddx(&b->nb, src[0])),
+ nir_fabs(&b->nb, nir_fddx(&b->nb, src[1])));
+ return;
+ case SpvOpFwidthFine:
+ val->ssa = nir_fadd(&b->nb,
+ nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[0])),
+ nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[1])));
+ return;
+ case SpvOpFwidthCoarse:
+ val->ssa = nir_fadd(&b->nb,
+ nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[0])),
+ nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[1])));
+ return;
+
+ case SpvOpVectorTimesScalar:
+ /* The builder will take care of splatting for us. */
+ val->ssa = nir_fmul(&b->nb, src[0], src[1]);
+ return;
+
+ case SpvOpSRem:
+ case SpvOpFRem:
+ unreachable("No NIR equivalent");
+
+ case SpvOpIsNan:
+ case SpvOpIsInf:
+ case SpvOpIsFinite:
+ case SpvOpIsNormal:
+ case SpvOpSignBitSet:
+ case SpvOpLessOrGreater:
+ case SpvOpOrdered:
+ case SpvOpUnordered:
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ if (swap) {
+ nir_ssa_def *tmp = src[0];
+ src[0] = src[1];
+ src[1] = tmp;
+ }
+
+ nir_alu_instr *instr = nir_alu_instr_create(b->shader, op);
+ nir_ssa_dest_init(&instr->instr, &instr->dest.dest,
+ glsl_get_vector_elements(val->type), val->name);
+ val->ssa = &instr->dest.dest.ssa;
+
+ for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++)
+ instr->src[i].src = nir_src_for_ssa(src[i]);
+
+ nir_builder_instr_insert(&b->nb, &instr->instr);
+}
+
+static bool
+vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpSource:
+ case SpvOpSourceExtension:
+ case SpvOpCompileFlag:
+ case SpvOpExtension:
+ /* Unhandled, but these are for debug so that's ok. */
+ break;
+
+ case SpvOpExtInstImport:
+ vtn_handle_extension(b, opcode, w, count);
+ break;
+
+ case SpvOpMemoryModel:
+ assert(w[1] == SpvAddressingModelLogical);
+ assert(w[2] == SpvMemoryModelGLSL450);
+ break;
+
+ case SpvOpEntryPoint:
+ assert(b->entry_point == NULL);
+ b->entry_point = &b->values[w[2]];
+ b->execution_model = w[1];
+ break;
+
+ case SpvOpExecutionMode:
+ unreachable("Execution modes not yet implemented");
+ break;
+
+ case SpvOpString:
+ vtn_push_value(b, w[1], vtn_value_type_string)->str =
+ vtn_string_literal(b, &w[2], count - 2);
+ break;
+
+ case SpvOpName:
+ b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2);
+ break;
+
+ case SpvOpMemberName:
+ /* TODO */
+ break;
+
+ case SpvOpLine:
+ break; /* Ignored for now */
+
+ case SpvOpDecorationGroup:
+ case SpvOpDecorate:
+ case SpvOpMemberDecorate:
+ case SpvOpGroupDecorate:
+ case SpvOpGroupMemberDecorate:
+ vtn_handle_decoration(b, opcode, w, count);
+ break;
+
+ case SpvOpTypeVoid:
+ case SpvOpTypeBool:
+ case SpvOpTypeInt:
+ case SpvOpTypeFloat:
+ case SpvOpTypeVector:
+ case SpvOpTypeMatrix:
+ case SpvOpTypeSampler:
+ case SpvOpTypeArray:
+ case SpvOpTypeRuntimeArray:
+ case SpvOpTypeStruct:
+ case SpvOpTypeOpaque:
+ case SpvOpTypePointer:
+ case SpvOpTypeFunction:
+ case SpvOpTypeEvent:
+ case SpvOpTypeDeviceEvent:
+ case SpvOpTypeReserveId:
+ case SpvOpTypeQueue:
+ case SpvOpTypePipe:
+ vtn_push_value(b, w[1], vtn_value_type_type)->type =
+ vtn_handle_type(b, opcode, &w[2], count - 2);
+ break;
+
+ case SpvOpConstantTrue:
+ case SpvOpConstantFalse:
+ case SpvOpConstant:
+ case SpvOpConstantComposite:
+ case SpvOpConstantSampler:
+ case SpvOpConstantNullPointer:
+ case SpvOpConstantNullObject:
+ case SpvOpSpecConstantTrue:
+ case SpvOpSpecConstantFalse:
+ case SpvOpSpecConstant:
+ case SpvOpSpecConstantComposite:
+ vtn_handle_constant(b, opcode, w, count);
+ break;
+
+ case SpvOpVariable:
+ vtn_handle_variables(b, opcode, w, count);
+ break;
+
+ default:
+ return false; /* End of preamble */
+ }
+
+ return true;
+}
+
+static bool
+vtn_handle_first_cfg_pass_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpFunction: {
+ assert(b->func == NULL);
+ b->func = rzalloc(b, struct vtn_function);
+
+ const struct glsl_type *result_type =
+ vtn_value(b, w[1], vtn_value_type_type)->type;
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_function);
+ const struct glsl_type *func_type =
+ vtn_value(b, w[4], vtn_value_type_type)->type;
+
+ assert(glsl_get_function_return_type(func_type) == result_type);
+
+ nir_function *func =
+ nir_function_create(b->shader, ralloc_strdup(b->shader, val->name));
+
+ nir_function_overload *overload = nir_function_overload_create(func);
+ overload->num_params = glsl_get_length(func_type);
+ overload->params = ralloc_array(overload, nir_parameter,
+ overload->num_params);
+ for (unsigned i = 0; i < overload->num_params; i++) {
+ const struct glsl_function_param *param =
+ glsl_get_function_param(func_type, i);
+ overload->params[i].type = param->type;
+ if (param->in) {
+ if (param->out) {
+ overload->params[i].param_type = nir_parameter_inout;
+ } else {
+ overload->params[i].param_type = nir_parameter_in;
+ }
+ } else {
+ if (param->out) {
+ overload->params[i].param_type = nir_parameter_out;
+ } else {
+ assert(!"Parameter is neither in nor out");
+ }
+ }
+ }
+ b->func->overload = overload;
+ break;
+ }
+
+ case SpvOpFunctionEnd:
+ b->func = NULL;
+ break;
+
+ case SpvOpFunctionParameter:
+ break; /* Does nothing */
+
+ case SpvOpLabel: {
+ assert(b->block == NULL);
+ b->block = rzalloc(b, struct vtn_block);
+ b->block->label = w;
+ vtn_push_value(b, w[1], vtn_value_type_block)->block = b->block;
+
+ if (b->func->start_block == NULL) {
+ /* This is the first block encountered for this function. In this
+ * case, we set the start block and add it to the list of
+ * implemented functions that we'll walk later.
+ */
+ b->func->start_block = b->block;
+ exec_list_push_tail(&b->functions, &b->func->node);
+ }
+ break;
+ }
+
+ case SpvOpBranch:
+ case SpvOpBranchConditional:
+ case SpvOpSwitch:
+ case SpvOpKill:
+ case SpvOpReturn:
+ case SpvOpReturnValue:
+ case SpvOpUnreachable:
+ assert(b->block);
+ b->block->branch = w;
+ b->block = NULL;
+ break;
+
+ case SpvOpSelectionMerge:
+ case SpvOpLoopMerge:
+ assert(b->block && b->block->merge_op == SpvOpNop);
+ b->block->merge_op = opcode;
+ b->block->merge_block_id = w[1];
+ break;
+
+ default:
+ /* Continue on as per normal */
+ return true;
+ }
+
+ return true;
+}
+
+static bool
+vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpLabel: {
+ struct vtn_block *block = vtn_value(b, w[1], vtn_value_type_block)->block;
+ assert(block->block == NULL);
+
+ struct exec_node *list_tail = exec_list_get_tail(b->nb.cf_node_list);
+ nir_cf_node *tail_node = exec_node_data(nir_cf_node, list_tail, node);
+ assert(tail_node->type == nir_cf_node_block);
+ block->block = nir_cf_node_as_block(tail_node);
+ break;
+ }
+
+ case SpvOpLoopMerge:
+ case SpvOpSelectionMerge:
+ /* This is handled by cfg pre-pass and walk_blocks */
+ break;
+
+ case SpvOpUndef:
+ vtn_push_value(b, w[2], vtn_value_type_undef);
+ break;
+
+ case SpvOpExtInst:
+ vtn_handle_extension(b, opcode, w, count);
+ break;
+
+ case SpvOpVariable:
+ case SpvOpVariableArray:
+ case SpvOpLoad:
+ case SpvOpStore:
+ case SpvOpCopyMemory:
+ case SpvOpCopyMemorySized:
+ case SpvOpAccessChain:
+ case SpvOpInBoundsAccessChain:
+ case SpvOpArrayLength:
+ case SpvOpImagePointer:
+ vtn_handle_variables(b, opcode, w, count);
+ break;
+
+ case SpvOpFunctionCall:
+ vtn_handle_function_call(b, opcode, w, count);
+ break;
+
+ case SpvOpTextureSample:
+ case SpvOpTextureSampleDref:
+ case SpvOpTextureSampleLod:
+ case SpvOpTextureSampleProj:
+ case SpvOpTextureSampleGrad:
+ case SpvOpTextureSampleOffset:
+ case SpvOpTextureSampleProjLod:
+ case SpvOpTextureSampleProjGrad:
+ case SpvOpTextureSampleLodOffset:
+ case SpvOpTextureSampleProjOffset:
+ case SpvOpTextureSampleGradOffset:
+ case SpvOpTextureSampleProjLodOffset:
+ case SpvOpTextureSampleProjGradOffset:
+ case SpvOpTextureFetchTexelLod:
+ case SpvOpTextureFetchTexelOffset:
+ case SpvOpTextureFetchSample:
+ case SpvOpTextureFetchTexel:
+ case SpvOpTextureGather:
+ case SpvOpTextureGatherOffset:
+ case SpvOpTextureGatherOffsets:
+ case SpvOpTextureQuerySizeLod:
+ case SpvOpTextureQuerySize:
+ case SpvOpTextureQueryLod:
+ case SpvOpTextureQueryLevels:
+ case SpvOpTextureQuerySamples:
+ vtn_handle_texture(b, opcode, w, count);
+ break;
+
+ case SpvOpSNegate:
+ case SpvOpFNegate:
+ case SpvOpNot:
+ case SpvOpAny:
+ case SpvOpAll:
+ case SpvOpConvertFToU:
+ case SpvOpConvertFToS:
+ case SpvOpConvertSToF:
+ case SpvOpConvertUToF:
+ case SpvOpUConvert:
+ case SpvOpSConvert:
+ case SpvOpFConvert:
+ case SpvOpConvertPtrToU:
+ case SpvOpConvertUToPtr:
+ case SpvOpPtrCastToGeneric:
+ case SpvOpGenericCastToPtr:
+ case SpvOpBitcast:
+ case SpvOpIsNan:
+ case SpvOpIsInf:
+ case SpvOpIsFinite:
+ case SpvOpIsNormal:
+ case SpvOpSignBitSet:
+ case SpvOpLessOrGreater:
+ case SpvOpOrdered:
+ case SpvOpUnordered:
+ case SpvOpIAdd:
+ case SpvOpFAdd:
+ case SpvOpISub:
+ case SpvOpFSub:
+ case SpvOpIMul:
+ case SpvOpFMul:
+ case SpvOpUDiv:
+ case SpvOpSDiv:
+ case SpvOpFDiv:
+ case SpvOpUMod:
+ case SpvOpSRem:
+ case SpvOpSMod:
+ case SpvOpFRem:
+ case SpvOpFMod:
+ case SpvOpVectorTimesScalar:
+ case SpvOpDot:
+ case SpvOpShiftRightLogical:
+ case SpvOpShiftRightArithmetic:
+ case SpvOpShiftLeftLogical:
+ case SpvOpLogicalOr:
+ case SpvOpLogicalXor:
+ case SpvOpLogicalAnd:
+ case SpvOpBitwiseOr:
+ case SpvOpBitwiseXor:
+ case SpvOpBitwiseAnd:
+ case SpvOpSelect:
+ case SpvOpIEqual:
+ case SpvOpFOrdEqual:
+ case SpvOpFUnordEqual:
+ case SpvOpINotEqual:
+ case SpvOpFOrdNotEqual:
+ case SpvOpFUnordNotEqual:
+ case SpvOpULessThan:
+ case SpvOpSLessThan:
+ case SpvOpFOrdLessThan:
+ case SpvOpFUnordLessThan:
+ case SpvOpUGreaterThan:
+ case SpvOpSGreaterThan:
+ case SpvOpFOrdGreaterThan:
+ case SpvOpFUnordGreaterThan:
+ case SpvOpULessThanEqual:
+ case SpvOpSLessThanEqual:
+ case SpvOpFOrdLessThanEqual:
+ case SpvOpFUnordLessThanEqual:
+ case SpvOpUGreaterThanEqual:
+ case SpvOpSGreaterThanEqual:
+ case SpvOpFOrdGreaterThanEqual:
+ case SpvOpFUnordGreaterThanEqual:
+ case SpvOpDPdx:
+ case SpvOpDPdy:
+ case SpvOpFwidth:
+ case SpvOpDPdxFine:
+ case SpvOpDPdyFine:
+ case SpvOpFwidthFine:
+ case SpvOpDPdxCoarse:
+ case SpvOpDPdyCoarse:
+ case SpvOpFwidthCoarse:
+ vtn_handle_alu(b, opcode, w, count);
+ break;
+
+ case SpvOpTranspose:
+ case SpvOpOuterProduct:
+ case SpvOpMatrixTimesScalar:
+ case SpvOpVectorTimesMatrix:
+ case SpvOpMatrixTimesVector:
+ case SpvOpMatrixTimesMatrix:
+ vtn_handle_matrix_alu(b, opcode, w, count);
+ break;
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ return true;
+}
+
+static void
+vtn_walk_blocks(struct vtn_builder *b, struct vtn_block *start,
+ struct vtn_block *break_block, struct vtn_block *cont_block,
+ struct vtn_block *end_block)
+{
+ struct vtn_block *block = start;
+ while (block != end_block) {
+ if (block->block != NULL) {
+ /* We've already visited this block once before so this is a
+ * back-edge. Back-edges are only allowed to point to a loop
+ * merge.
+ */
+ assert(block == cont_block);
+ return;
+ }
+
+ if (block->merge_op == SpvOpLoopMerge) {
+ /* This is the jump into a loop. */
+ cont_block = block;
+ break_block = vtn_value(b, block->merge_block_id,
+ vtn_value_type_block)->block;
+
+ nir_loop *loop = nir_loop_create(b->shader);
+ nir_cf_node_insert_end(b->nb.cf_node_list, &loop->cf_node);
+
+ struct exec_list *old_list = b->nb.cf_node_list;
+
+ /* Reset the merge_op to prerevent infinite recursion */
+ block->merge_op = SpvOpNop;
+
+ nir_builder_insert_after_cf_list(&b->nb, &loop->body);
+ vtn_walk_blocks(b, block, break_block, cont_block, NULL);
+
+ nir_builder_insert_after_cf_list(&b->nb, old_list);
+ block = break_block;
+ continue;
+ }
+
+ const uint32_t *w = block->branch;
+ SpvOp branch_op = w[0] & SpvOpCodeMask;
+
+ b->block = block;
+ vtn_foreach_instruction(b, block->label, block->branch,
+ vtn_handle_body_instruction);
+
+ switch (branch_op) {
+ case SpvOpBranch: {
+ struct vtn_block *branch_block =
+ vtn_value(b, w[1], vtn_value_type_block)->block;
+
+ if (branch_block == break_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_break);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+
+ return;
+ } else if (branch_block == cont_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_continue);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+
+ return;
+ } else if (branch_block == end_block) {
+ return;
+ } else {
+ block = branch_block;
+ continue;
+ }
+ }
+
+ case SpvOpBranchConditional: {
+ /* Gather up the branch blocks */
+ struct vtn_block *then_block =
+ vtn_value(b, w[2], vtn_value_type_block)->block;
+ struct vtn_block *else_block =
+ vtn_value(b, w[3], vtn_value_type_block)->block;
+
+ nir_if *if_stmt = nir_if_create(b->shader);
+ if_stmt->condition = nir_src_for_ssa(vtn_ssa_value(b, w[1]));
+ nir_cf_node_insert_end(b->nb.cf_node_list, &if_stmt->cf_node);
+
+ if (then_block == break_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_break);
+ nir_instr_insert_after_cf_list(&if_stmt->then_list,
+ &jump->instr);
+ block = else_block;
+ } else if (else_block == break_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_break);
+ nir_instr_insert_after_cf_list(&if_stmt->else_list,
+ &jump->instr);
+ block = then_block;
+ } else if (then_block == cont_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_continue);
+ nir_instr_insert_after_cf_list(&if_stmt->then_list,
+ &jump->instr);
+ block = else_block;
+ } else if (else_block == cont_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_continue);
+ nir_instr_insert_after_cf_list(&if_stmt->else_list,
+ &jump->instr);
+ block = then_block;
+ } else {
+ /* Conventional if statement */
+ assert(block->merge_op == SpvOpSelectionMerge);
+ struct vtn_block *merge_block =
+ vtn_value(b, block->merge_block_id, vtn_value_type_block)->block;
+
+ struct exec_list *old_list = b->nb.cf_node_list;
+
+ nir_builder_insert_after_cf_list(&b->nb, &if_stmt->then_list);
+ vtn_walk_blocks(b, then_block, break_block, cont_block, merge_block);
+
+ nir_builder_insert_after_cf_list(&b->nb, &if_stmt->else_list);
+ vtn_walk_blocks(b, else_block, break_block, cont_block, merge_block);
+
+ nir_builder_insert_after_cf_list(&b->nb, old_list);
+ block = merge_block;
+ continue;
+ }
+
+ /* If we got here then we inserted a predicated break or continue
+ * above and we need to handle the other case. We already set
+ * `block` above to indicate what block to visit after the
+ * predicated break.
+ */
+
+ /* It's possible that the other branch is also a break/continue.
+ * If it is, we handle that here.
+ */
+ if (block == break_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_break);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+
+ return;
+ } else if (block == cont_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_continue);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+
+ return;
+ }
+
+ /* If we got here then there was a predicated break/continue but
+ * the other half of the if has stuff in it. `block` was already
+ * set above so there is nothing left for us to do.
+ */
+ continue;
+ }
+
+ case SpvOpReturn: {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_return);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+ return;
+ }
+
+ case SpvOpKill: {
+ nir_intrinsic_instr *discard =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard);
+ nir_builder_instr_insert(&b->nb, &discard->instr);
+ return;
+ }
+
+ case SpvOpSwitch:
+ case SpvOpReturnValue:
+ case SpvOpUnreachable:
+ default:
+ unreachable("Unhandled opcode");
+ }
+ }
+}
+
+nir_shader *
+spirv_to_nir(const uint32_t *words, size_t word_count,
+ const nir_shader_compiler_options *options)
+{
+ const uint32_t *word_end = words + word_count;
+
+ /* Handle the SPIR-V header (first 4 dwords) */
+ assert(word_count > 5);
+
+ assert(words[0] == SpvMagicNumber);
+ assert(words[1] == 99);
+ /* words[2] == generator magic */
+ unsigned value_id_bound = words[3];
+ assert(words[4] == 0);
+
+ words+= 5;
+
+ nir_shader *shader = nir_shader_create(NULL, options);
+
+ /* Initialize the stn_builder object */
+ struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
+ b->shader = shader;
+ b->value_id_bound = value_id_bound;
+ b->values = ralloc_array(b, struct vtn_value, value_id_bound);
+ exec_list_make_empty(&b->functions);
+
+ /* Handle all the preamble instructions */
+ words = vtn_foreach_instruction(b, words, word_end,
+ vtn_handle_preamble_instruction);
+
+ /* Do a very quick CFG analysis pass */
+ vtn_foreach_instruction(b, words, word_end,
+ vtn_handle_first_cfg_pass_instruction);
+
+ foreach_list_typed(struct vtn_function, func, node, &b->functions) {
+ b->impl = nir_function_impl_create(func->overload);
+ nir_builder_init(&b->nb, b->impl);
+ nir_builder_insert_after_cf_list(&b->nb, &b->impl->body);
+ vtn_walk_blocks(b, func->start_block, NULL, NULL, NULL);
+ }
+
+ ralloc_free(b);
+
+ return shader;
+}
diff --git a/src/glsl/nir/spirv_to_nir_private.h b/src/glsl/nir/spirv_to_nir_private.h
new file mode 100644
index 00000000000..d2b364bdfeb
--- /dev/null
+++ b/src/glsl/nir/spirv_to_nir_private.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jason Ekstrand ([email protected])
+ *
+ */
+
+#include "nir_spirv.h"
+#include "nir_builder.h"
+#include "spirv.h"
+
+struct vtn_builder;
+struct vtn_decoration;
+
+enum vtn_value_type {
+ vtn_value_type_invalid = 0,
+ vtn_value_type_undef,
+ vtn_value_type_string,
+ vtn_value_type_decoration_group,
+ vtn_value_type_type,
+ vtn_value_type_constant,
+ vtn_value_type_deref,
+ vtn_value_type_function,
+ vtn_value_type_block,
+ vtn_value_type_ssa,
+ vtn_value_type_extension,
+};
+
+struct vtn_block {
+ /* Merge opcode if this block contains a merge; SpvOpNop otherwise. */
+ SpvOp merge_op;
+ uint32_t merge_block_id;
+ const uint32_t *label;
+ const uint32_t *branch;
+ nir_block *block;
+};
+
+struct vtn_function {
+ struct exec_node node;
+
+ nir_function_overload *overload;
+ struct vtn_block *start_block;
+};
+
+typedef bool (*vtn_instruction_handler)(struct vtn_builder *, uint32_t,
+ const uint32_t *, unsigned);
+
+struct vtn_value {
+ enum vtn_value_type value_type;
+ const char *name;
+ struct vtn_decoration *decoration;
+ const struct glsl_type *type;
+ union {
+ void *ptr;
+ char *str;
+ nir_constant *constant;
+ nir_deref_var *deref;
+ struct vtn_function *func;
+ struct vtn_block *block;
+ nir_ssa_def *ssa;
+ vtn_instruction_handler ext_handler;
+ };
+};
+
+struct vtn_decoration {
+ struct vtn_decoration *next;
+ const uint32_t *literals;
+ struct vtn_value *group;
+ SpvDecoration decoration;
+};
+
+struct vtn_builder {
+ nir_builder nb;
+
+ nir_shader *shader;
+ nir_function_impl *impl;
+ struct vtn_block *block;
+
+ unsigned value_id_bound;
+ struct vtn_value *values;
+
+ SpvExecutionModel execution_model;
+ struct vtn_value *entry_point;
+
+ struct vtn_function *func;
+ struct exec_list functions;
+};
+
+static inline struct vtn_value *
+vtn_push_value(struct vtn_builder *b, uint32_t value_id,
+ enum vtn_value_type value_type)
+{
+ assert(value_id < b->value_id_bound);
+ assert(b->values[value_id].value_type == vtn_value_type_invalid);
+
+ b->values[value_id].value_type = value_type;
+
+ return &b->values[value_id];
+}
+
+static inline struct vtn_value *
+vtn_untyped_value(struct vtn_builder *b, uint32_t value_id)
+{
+ assert(value_id < b->value_id_bound);
+ return &b->values[value_id];
+}
+
+static inline struct vtn_value *
+vtn_value(struct vtn_builder *b, uint32_t value_id,
+ enum vtn_value_type value_type)
+{
+ struct vtn_value *val = vtn_untyped_value(b, value_id);
+ assert(val->value_type == value_type);
+ return val;
+}
+
+nir_ssa_def *vtn_ssa_value(struct vtn_builder *b, uint32_t value_id);
+
+typedef void (*vtn_decoration_foreach_cb)(struct vtn_builder *,
+ struct vtn_value *,
+ const struct vtn_decoration *,
+ void *);
+
+void vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
+ vtn_decoration_foreach_cb cb, void *data);
+
+bool vtn_handle_glsl450_instruction(struct vtn_builder *b, uint32_t ext_opcode,
+ const uint32_t *words, unsigned count);
diff --git a/src/glsl/standalone_scaffolding.cpp b/src/glsl/standalone_scaffolding.cpp
index a109c4e92d2..6e1ecec3235 100644
--- a/src/glsl/standalone_scaffolding.cpp
+++ b/src/glsl/standalone_scaffolding.cpp
@@ -34,6 +34,12 @@
#include <string.h>
#include "util/ralloc.h"
+extern "C" void
+_mesa_error_no_memory(const char *caller)
+{
+ fprintf(stderr, "Mesa error: out of memory in %s", caller);
+}
+
void
_mesa_warning(struct gl_context *ctx, const char *fmt, ...)
{
diff --git a/src/mesa/drivers/dri/i965/brw_fs.cpp b/src/mesa/drivers/dri/i965/brw_fs.cpp
index 8a9bbdbae52..5ce1dfc6633 100644
--- a/src/mesa/drivers/dri/i965/brw_fs.cpp
+++ b/src/mesa/drivers/dri/i965/brw_fs.cpp
@@ -671,6 +671,7 @@ fs_visitor::type_size(const struct glsl_type *type)
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_FUNCTION:
unreachable("not reached");
}
diff --git a/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp b/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp
index 975f5f6b2c9..d6bb1178f7c 100644
--- a/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp
+++ b/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp
@@ -1354,6 +1354,7 @@ fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r,
case GLSL_TYPE_VOID:
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_FUNCTION:
unreachable("not reached");
}
}
diff --git a/src/mesa/drivers/dri/i965/brw_shader.cpp b/src/mesa/drivers/dri/i965/brw_shader.cpp
index c1fd859fef5..ebfb49acf8d 100644
--- a/src/mesa/drivers/dri/i965/brw_shader.cpp
+++ b/src/mesa/drivers/dri/i965/brw_shader.cpp
@@ -351,6 +351,7 @@ brw_type_for_base_type(const struct glsl_type *type)
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_FUNCTION:
unreachable("not reached");
}
diff --git a/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp b/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp
index 5a60fe43bf8..e51c140c0f2 100644
--- a/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp
+++ b/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp
@@ -615,6 +615,7 @@ type_size(const struct glsl_type *type)
case GLSL_TYPE_DOUBLE:
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_FUNCTION:
unreachable("not reached");
}
diff --git a/src/mesa/program/ir_to_mesa.cpp b/src/mesa/program/ir_to_mesa.cpp
index 3dcb53702a5..fceed712bdb 100644
--- a/src/mesa/program/ir_to_mesa.cpp
+++ b/src/mesa/program/ir_to_mesa.cpp
@@ -541,6 +541,7 @@ type_size(const struct glsl_type *type)
case GLSL_TYPE_VOID:
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_FUNCTION:
assert(!"Invalid type in type_size");
break;
}
@@ -2448,6 +2449,7 @@ _mesa_associate_uniform_storage(struct gl_context *ctx,
case GLSL_TYPE_STRUCT:
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_FUNCTION:
assert(!"Should not get here.");
break;
}