diff options
Diffstat (limited to 'src/compiler')
-rw-r--r-- | src/compiler/nir/nir.h | 2 | ||||
-rw-r--r-- | src/compiler/nir/nir_builder.h | 37 | ||||
-rw-r--r-- | src/compiler/nir/nir_intrinsics.py | 72 | ||||
-rw-r--r-- | src/compiler/nir/nir_validate.c | 34 |
4 files changed, 144 insertions, 1 deletions
diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h index da0b179263f..66b8590e98b 100644 --- a/src/compiler/nir/nir.h +++ b/src/compiler/nir/nir.h @@ -1165,7 +1165,7 @@ typedef enum { } nir_intrinsic_index_flag; -#define NIR_INTRINSIC_MAX_INPUTS 4 +#define NIR_INTRINSIC_MAX_INPUTS 5 typedef struct { const char *name; diff --git a/src/compiler/nir/nir_builder.h b/src/compiler/nir/nir_builder.h index b305473f440..a667372bd7a 100644 --- a/src/compiler/nir/nir_builder.h +++ b/src/compiler/nir/nir_builder.h @@ -651,6 +651,43 @@ nir_load_reg(nir_builder *build, nir_register *reg) } static inline nir_ssa_def * +nir_load_deref(nir_builder *build, nir_deref_instr *deref) +{ + nir_intrinsic_instr *load = + nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_deref); + load->num_components = glsl_get_vector_elements(deref->type); + load->src[0] = nir_src_for_ssa(&deref->dest.ssa); + nir_ssa_dest_init(&load->instr, &load->dest, load->num_components, + glsl_get_bit_size(deref->type), NULL); + nir_builder_instr_insert(build, &load->instr); + return &load->dest.ssa; +} + +static inline void +nir_store_deref(nir_builder *build, nir_deref_instr *deref, + nir_ssa_def *value, unsigned writemask) +{ + nir_intrinsic_instr *store = + nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_deref); + store->num_components = glsl_get_vector_elements(deref->type); + store->src[0] = nir_src_for_ssa(&deref->dest.ssa); + store->src[1] = nir_src_for_ssa(value); + nir_intrinsic_set_write_mask(store, + writemask & ((1 << store->num_components) - 1)); + nir_builder_instr_insert(build, &store->instr); +} + +static inline void +nir_copy_deref(nir_builder *build, nir_deref_instr *dest, nir_deref_instr *src) +{ + nir_intrinsic_instr *copy = + nir_intrinsic_instr_create(build->shader, nir_intrinsic_copy_deref); + copy->src[0] = nir_src_for_ssa(&dest->dest.ssa); + copy->src[1] = nir_src_for_ssa(&src->dest.ssa); + nir_builder_instr_insert(build, ©->instr); +} + +static inline nir_ssa_def * nir_load_var(nir_builder *build, nir_variable *var) { const unsigned num_components = glsl_get_vector_elements(var->type); diff --git a/src/compiler/nir/nir_intrinsics.py b/src/compiler/nir/nir_intrinsics.py index 484b2d4fd68..1d9a76a32ef 100644 --- a/src/compiler/nir/nir_intrinsics.py +++ b/src/compiler/nir/nir_intrinsics.py @@ -124,6 +124,10 @@ intrinsic("load_var", dest_comp=0, num_vars=1, flags=[CAN_ELIMINATE]) intrinsic("store_var", src_comp=[0], num_vars=1, indices=[WRMASK]) intrinsic("copy_var", num_vars=2) +intrinsic("load_deref", dest_comp=0, src_comp=[1], flags=[CAN_ELIMINATE]) +intrinsic("store_deref", src_comp=[1, 0], indices=[WRMASK]) +intrinsic("copy_deref", src_comp=[1, 1]) + # Interpolation of input. The interp_var_at* intrinsics are similar to the # load_var intrinsic acting on a shader input except that they interpolate # the input differently. The at_sample and at_offset intrinsics take an @@ -137,6 +141,19 @@ intrinsic("interp_var_at_sample", src_comp=[1], dest_comp=0, num_vars=1, intrinsic("interp_var_at_offset", src_comp=[2], dest_comp=0, num_vars=1, flags=[CAN_ELIMINATE, CAN_REORDER]) +# Interpolation of input. The interp_deref_at* intrinsics are similar to the +# load_var intrinsic acting on a shader input except that they interpolate the +# input differently. The at_sample and at_offset intrinsics take an +# additional source that is an integer sample id or a vec2 position offset +# respectively. + +intrinsic("interp_deref_at_centroid", dest_comp=0, src_comp=[1], + flags=[ CAN_ELIMINATE, CAN_REORDER]) +intrinsic("interp_deref_at_sample", src_comp=[1, 1], dest_comp=0, + flags=[CAN_ELIMINATE, CAN_REORDER]) +intrinsic("interp_deref_at_offset", src_comp=[1, 2], dest_comp=0, + flags=[CAN_ELIMINATE, CAN_REORDER]) + # Ask the driver for the size of a given buffer. It takes the buffer index # as source. intrinsic("get_buffer_size", src_comp=[1], dest_comp=1, @@ -258,14 +275,17 @@ intrinsic("set_vertex_count", src_comp=[1]) def atomic(name, flags=[]): intrinsic(name + "_var", dest_comp=1, num_vars=1, flags=flags) + intrinsic(name + "_deref", src_comp=[1], dest_comp=1, flags=flags) intrinsic(name, src_comp=[1], dest_comp=1, indices=[BASE], flags=flags) def atomic2(name): intrinsic(name + "_var", src_comp=[1], dest_comp=1, num_vars=1) + intrinsic(name + "_deref", src_comp=[1, 1], dest_comp=1) intrinsic(name, src_comp=[1, 1], dest_comp=1, indices=[BASE]) def atomic3(name): intrinsic(name + "_var", src_comp=[1, 1], dest_comp=1, num_vars=1) + intrinsic(name + "_deref", src_comp=[1, 1, 1], dest_comp=1) intrinsic(name, src_comp=[1, 1, 1], dest_comp=1, indices=[BASE]) atomic("atomic_counter_inc") @@ -307,6 +327,34 @@ intrinsic("image_var_atomic_comp_swap", src_comp=[4, 1, 1, 1], dest_comp=1, num_ intrinsic("image_var_size", dest_comp=0, num_vars=1, flags=[CAN_ELIMINATE, CAN_REORDER]) intrinsic("image_var_samples", dest_comp=1, num_vars=1, flags=[CAN_ELIMINATE, CAN_REORDER]) +# Image load, store and atomic intrinsics. +# +# All image intrinsics take an image target passed as a nir_variable. The +# variable is passed in using a chain of nir_deref_instr with as the first +# source of the image intrinsic. Image variables contain a number of memory +# and layout qualifiers that influence the semantics of the intrinsic. +# +# All image intrinsics take a four-coordinate vector and a sample index as +# first two sources, determining the location within the image that will be +# accessed by the intrinsic. Components not applicable to the image target +# in use are undefined. Image store takes an additional four-component +# argument with the value to be written, and image atomic operations take +# either one or two additional scalar arguments with the same meaning as in +# the ARB_shader_image_load_store specification. +intrinsic("image_deref_load", src_comp=[1, 4, 1], dest_comp=4, + flags=[CAN_ELIMINATE]) +intrinsic("image_deref_store", src_comp=[1, 4, 1, 4]) +intrinsic("image_deref_atomic_add", src_comp=[1, 4, 1, 1], dest_comp=1) +intrinsic("image_deref_atomic_min", src_comp=[1, 4, 1, 1], dest_comp=1) +intrinsic("image_deref_atomic_max", src_comp=[1, 4, 1, 1], dest_comp=1) +intrinsic("image_deref_atomic_and", src_comp=[1, 4, 1, 1], dest_comp=1) +intrinsic("image_deref_atomic_or", src_comp=[1, 4, 1, 1], dest_comp=1) +intrinsic("image_deref_atomic_xor", src_comp=[1, 4, 1, 1], dest_comp=1) +intrinsic("image_deref_atomic_exchange", src_comp=[1, 4, 1, 1], dest_comp=1) +intrinsic("image_deref_atomic_comp_swap", src_comp=[1, 4, 1, 1, 1], dest_comp=1) +intrinsic("image_deref_size", src_comp=[1], dest_comp=0, flags=[CAN_ELIMINATE, CAN_REORDER]) +intrinsic("image_deref_samples", src_comp=[1], dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER]) + # Vulkan descriptor set intrinsics # # The Vulkan API uses a different binding model from GL. In the Vulkan @@ -356,6 +404,30 @@ intrinsic("var_atomic_xor", src_comp=[1], dest_comp=1, num_vars=1) intrinsic("var_atomic_exchange", src_comp=[1], dest_comp=1, num_vars=1) intrinsic("var_atomic_comp_swap", src_comp=[1, 1], dest_comp=1, num_vars=1) +# variable atomic intrinsics +# +# All of these variable atomic memory operations read a value from memory, +# compute a new value using one of the operations below, write the new value +# to memory, and return the original value read. +# +# All operations take 2 sources except CompSwap that takes 3. These sources +# represent: +# +# 0: A deref to the memory on which to perform the atomic +# 1: The data parameter to the atomic function (i.e. the value to add +# in shared_atomic_add, etc). +# 2: For CompSwap only: the second data parameter. +intrinsic("deref_atomic_add", src_comp=[1, 1], dest_comp=1) +intrinsic("deref_atomic_imin", src_comp=[1, 1], dest_comp=1) +intrinsic("deref_atomic_umin", src_comp=[1, 1], dest_comp=1) +intrinsic("deref_atomic_imax", src_comp=[1, 1], dest_comp=1) +intrinsic("deref_atomic_umax", src_comp=[1, 1], dest_comp=1) +intrinsic("deref_atomic_and", src_comp=[1, 1], dest_comp=1) +intrinsic("deref_atomic_or", src_comp=[1, 1], dest_comp=1) +intrinsic("deref_atomic_xor", src_comp=[1, 1], dest_comp=1) +intrinsic("deref_atomic_exchange", src_comp=[1, 1], dest_comp=1) +intrinsic("deref_atomic_comp_swap", src_comp=[1, 1, 1], dest_comp=1) + # SSBO atomic intrinsics # # All of the SSBO atomic memory operations read a value from memory, diff --git a/src/compiler/nir/nir_validate.c b/src/compiler/nir/nir_validate.c index 2d6f287f722..191e3b72325 100644 --- a/src/compiler/nir/nir_validate.c +++ b/src/compiler/nir/nir_validate.c @@ -547,6 +547,40 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state) unsigned dest_bit_size = 0; unsigned src_bit_sizes[NIR_INTRINSIC_MAX_INPUTS] = { 0, }; switch (instr->intrinsic) { + case nir_intrinsic_load_deref: { + nir_deref_instr *src = nir_src_as_deref(instr->src[0]); + validate_assert(state, glsl_type_is_vector_or_scalar(src->type) || + (src->mode == nir_var_uniform && + glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE)); + validate_assert(state, instr->num_components == + glsl_get_vector_elements(src->type)); + dest_bit_size = glsl_get_bit_size(src->type); + break; + } + + case nir_intrinsic_store_deref: { + nir_deref_instr *dst = nir_src_as_deref(instr->src[0]); + validate_assert(state, glsl_type_is_vector_or_scalar(dst->type)); + validate_assert(state, instr->num_components == + glsl_get_vector_elements(dst->type)); + src_bit_sizes[1] = glsl_get_bit_size(dst->type); + validate_assert(state, (dst->mode & (nir_var_shader_in | + nir_var_uniform | + nir_var_shader_storage)) == 0); + validate_assert(state, (nir_intrinsic_write_mask(instr) & ~((1 << instr->num_components) - 1)) == 0); + break; + } + + case nir_intrinsic_copy_deref: { + nir_deref_instr *dst = nir_src_as_deref(instr->src[0]); + nir_deref_instr *src = nir_src_as_deref(instr->src[1]); + validate_assert(state, dst->type == src->type); + validate_assert(state, (dst->mode & (nir_var_shader_in | + nir_var_uniform | + nir_var_shader_storage)) == 0); + break; + } + case nir_intrinsic_load_var: { const struct glsl_type *type = nir_deref_tail(&instr->variables[0]->deref)->type; |