diff options
author | Timothy Arceri <[email protected]> | 2018-04-30 20:39:43 +1000 |
---|---|---|
committer | Timothy Arceri <[email protected]> | 2018-05-01 12:39:33 +1000 |
commit | 6487e7a30c9e4c2a417ddfe632d5f68e065e21eb (patch) | |
tree | 561df9b47ea4efa99c89dd80dfd22a1a1e122ae8 /src/compiler/glsl | |
parent | f56e22e49673e8234a7fe0c241b4c3eae4752f34 (diff) |
nir: move GL specific passes to src/compiler/glsl
With this we should have no passes in src/compiler/nir with any
dependencies on headers from core GL Mesa.
Reviewed-by: Alejandro Piñeiro <[email protected]>
Diffstat (limited to 'src/compiler/glsl')
-rw-r--r-- | src/compiler/glsl/gl_nir.h | 47 | ||||
-rw-r--r-- | src/compiler/glsl/gl_nir_lower_atomics.c | 213 | ||||
-rw-r--r-- | src/compiler/glsl/gl_nir_lower_samplers.c | 164 | ||||
-rw-r--r-- | src/compiler/glsl/gl_nir_lower_samplers_as_deref.c | 250 | ||||
-rw-r--r-- | src/compiler/glsl/meson.build | 4 |
5 files changed, 678 insertions, 0 deletions
diff --git a/src/compiler/glsl/gl_nir.h b/src/compiler/glsl/gl_nir.h new file mode 100644 index 00000000000..59d5f65e659 --- /dev/null +++ b/src/compiler/glsl/gl_nir.h @@ -0,0 +1,47 @@ +/* + * Copyright © 2018 Timothy Arceri + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef GL_NIR_H +#define GL_NIR_H + +#ifdef __cplusplus +extern "C" { +#endif + +struct nir_shader; +struct gl_shader_program; + +bool gl_nir_lower_atomics(nir_shader *shader, + const struct gl_shader_program *shader_program, + bool use_binding_as_idx); + +bool gl_nir_lower_samplers(nir_shader *shader, + const struct gl_shader_program *shader_program); +bool gl_nir_lower_samplers_as_deref(nir_shader *shader, + const struct gl_shader_program *shader_program); + +#ifdef __cplusplus +} +#endif + +#endif /* GL_NIR_H */ diff --git a/src/compiler/glsl/gl_nir_lower_atomics.c b/src/compiler/glsl/gl_nir_lower_atomics.c new file mode 100644 index 00000000000..e203b390b48 --- /dev/null +++ b/src/compiler/glsl/gl_nir_lower_atomics.c @@ -0,0 +1,213 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Connor Abbott ([email protected]) + * + */ + +#include "compiler/nir/nir.h" +#include "gl_nir.h" +#include "ir_uniform.h" + +#include "main/config.h" +#include "main/mtypes.h" +#include <assert.h> + +/* + * replace atomic counter intrinsics that use a variable with intrinsics + * that directly store the buffer index and byte offset + */ + +static bool +lower_instr(nir_intrinsic_instr *instr, + const struct gl_shader_program *shader_program, + nir_shader *shader, bool use_binding_as_idx) +{ + nir_intrinsic_op op; + switch (instr->intrinsic) { + case nir_intrinsic_atomic_counter_read_var: + op = nir_intrinsic_atomic_counter_read; + break; + + case nir_intrinsic_atomic_counter_inc_var: + op = nir_intrinsic_atomic_counter_inc; + break; + + case nir_intrinsic_atomic_counter_dec_var: + op = nir_intrinsic_atomic_counter_dec; + break; + + case nir_intrinsic_atomic_counter_add_var: + op = nir_intrinsic_atomic_counter_add; + break; + + case nir_intrinsic_atomic_counter_min_var: + op = nir_intrinsic_atomic_counter_min; + break; + + case nir_intrinsic_atomic_counter_max_var: + op = nir_intrinsic_atomic_counter_max; + break; + + case nir_intrinsic_atomic_counter_and_var: + op = nir_intrinsic_atomic_counter_and; + break; + + case nir_intrinsic_atomic_counter_or_var: + op = nir_intrinsic_atomic_counter_or; + break; + + case nir_intrinsic_atomic_counter_xor_var: + op = nir_intrinsic_atomic_counter_xor; + break; + + case nir_intrinsic_atomic_counter_exchange_var: + op = nir_intrinsic_atomic_counter_exchange; + break; + + case nir_intrinsic_atomic_counter_comp_swap_var: + op = nir_intrinsic_atomic_counter_comp_swap; + break; + + default: + return false; + } + + if (instr->variables[0]->var->data.mode != nir_var_uniform && + instr->variables[0]->var->data.mode != nir_var_shader_storage && + instr->variables[0]->var->data.mode != nir_var_shared) + return false; /* atomics passed as function arguments can't be lowered */ + + void *mem_ctx = ralloc_parent(instr); + unsigned uniform_loc = instr->variables[0]->var->data.location; + + unsigned idx = use_binding_as_idx ? + instr->variables[0]->var->data.binding : + shader_program->data->UniformStorage[uniform_loc].opaque[shader->info.stage].index; + + nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(mem_ctx, op); + nir_intrinsic_set_base(new_instr, idx); + + nir_load_const_instr *offset_const = + nir_load_const_instr_create(mem_ctx, 1, 32); + offset_const->value.u32[0] = instr->variables[0]->var->data.offset; + + nir_instr_insert_before(&instr->instr, &offset_const->instr); + + nir_ssa_def *offset_def = &offset_const->def; + + nir_deref *tail = &instr->variables[0]->deref; + while (tail->child != NULL) { + nir_deref_array *deref_array = nir_deref_as_array(tail->child); + tail = tail->child; + + unsigned child_array_elements = tail->child != NULL ? + glsl_get_aoa_size(tail->type) : 1; + + offset_const->value.u32[0] += deref_array->base_offset * + child_array_elements * ATOMIC_COUNTER_SIZE; + + if (deref_array->deref_array_type == nir_deref_array_type_indirect) { + nir_load_const_instr *atomic_counter_size = + nir_load_const_instr_create(mem_ctx, 1, 32); + atomic_counter_size->value.u32[0] = child_array_elements * ATOMIC_COUNTER_SIZE; + nir_instr_insert_before(&instr->instr, &atomic_counter_size->instr); + + nir_alu_instr *mul = nir_alu_instr_create(mem_ctx, nir_op_imul); + nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32, NULL); + mul->dest.write_mask = 0x1; + nir_src_copy(&mul->src[0].src, &deref_array->indirect, mul); + mul->src[1].src.is_ssa = true; + mul->src[1].src.ssa = &atomic_counter_size->def; + nir_instr_insert_before(&instr->instr, &mul->instr); + + nir_alu_instr *add = nir_alu_instr_create(mem_ctx, nir_op_iadd); + nir_ssa_dest_init(&add->instr, &add->dest.dest, 1, 32, NULL); + add->dest.write_mask = 0x1; + add->src[0].src.is_ssa = true; + add->src[0].src.ssa = &mul->dest.dest.ssa; + add->src[1].src.is_ssa = true; + add->src[1].src.ssa = offset_def; + nir_instr_insert_before(&instr->instr, &add->instr); + + offset_def = &add->dest.dest.ssa; + } + } + + new_instr->src[0].is_ssa = true; + new_instr->src[0].ssa = offset_def; + + /* Copy the other sources, if any, from the original instruction to the new + * instruction. + */ + for (unsigned i = 0; i < nir_intrinsic_infos[instr->intrinsic].num_srcs; i++) + nir_src_copy(&new_instr->src[i + 1], &instr->src[i], new_instr); + + if (instr->dest.is_ssa) { + nir_ssa_dest_init(&new_instr->instr, &new_instr->dest, + instr->dest.ssa.num_components, 32, NULL); + nir_ssa_def_rewrite_uses(&instr->dest.ssa, + nir_src_for_ssa(&new_instr->dest.ssa)); + } else { + nir_dest_copy(&new_instr->dest, &instr->dest, mem_ctx); + } + + nir_instr_insert_before(&instr->instr, &new_instr->instr); + nir_instr_remove(&instr->instr); + + return true; +} + +bool +gl_nir_lower_atomics(nir_shader *shader, + const struct gl_shader_program *shader_program, + bool use_binding_as_idx) +{ + bool progress = false; + + nir_foreach_function(function, shader) { + if (!function->impl) + continue; + + bool impl_progress = false; + + nir_foreach_block(block, function->impl) { + nir_foreach_instr_safe(instr, block) { + if (instr->type != nir_instr_type_intrinsic) + continue; + + impl_progress |= lower_instr(nir_instr_as_intrinsic(instr), + shader_program, shader, + use_binding_as_idx); + } + } + + if (impl_progress) { + nir_metadata_preserve(function->impl, nir_metadata_block_index | + nir_metadata_dominance); + progress = true; + } + } + + return progress; +} diff --git a/src/compiler/glsl/gl_nir_lower_samplers.c b/src/compiler/glsl/gl_nir_lower_samplers.c new file mode 100644 index 00000000000..a53fabb7e62 --- /dev/null +++ b/src/compiler/glsl/gl_nir_lower_samplers.c @@ -0,0 +1,164 @@ +/* + * Copyright (C) 2005-2007 Brian Paul All Rights Reserved. + * Copyright (C) 2008 VMware, Inc. All Rights Reserved. + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "compiler/nir/nir.h" +#include "compiler/nir/nir_builder.h" +#include "gl_nir.h" +#include "ir_uniform.h" + +#include "main/compiler.h" +#include "main/mtypes.h" + +/* Calculate the sampler index based on array indicies and also + * calculate the base uniform location for struct members. + */ +static void +calc_sampler_offsets(nir_deref *tail, nir_tex_instr *instr, + unsigned *array_elements, nir_ssa_def **indirect, + nir_builder *b, unsigned *location) +{ + if (tail->child == NULL) + return; + + switch (tail->child->deref_type) { + case nir_deref_type_array: { + nir_deref_array *deref_array = nir_deref_as_array(tail->child); + + assert(deref_array->deref_array_type != nir_deref_array_type_wildcard); + + calc_sampler_offsets(tail->child, instr, array_elements, + indirect, b, location); + instr->texture_index += deref_array->base_offset * *array_elements; + + if (deref_array->deref_array_type == nir_deref_array_type_indirect) { + nir_ssa_def *mul = + nir_imul(b, nir_imm_int(b, *array_elements), + nir_ssa_for_src(b, deref_array->indirect, 1)); + + nir_instr_rewrite_src(&instr->instr, &deref_array->indirect, + NIR_SRC_INIT); + + if (*indirect) { + *indirect = nir_iadd(b, *indirect, mul); + } else { + *indirect = mul; + } + } + + *array_elements *= glsl_get_length(tail->type); + break; + } + + case nir_deref_type_struct: { + nir_deref_struct *deref_struct = nir_deref_as_struct(tail->child); + *location += glsl_get_record_location_offset(tail->type, deref_struct->index); + calc_sampler_offsets(tail->child, instr, array_elements, + indirect, b, location); + break; + } + + default: + unreachable("Invalid deref type"); + break; + } +} + +static bool +lower_sampler(nir_tex_instr *instr, const struct gl_shader_program *shader_program, + gl_shader_stage stage, nir_builder *b) +{ + if (instr->texture == NULL) + return false; + + /* In GLSL, we only fill out the texture field. The sampler is inferred */ + assert(instr->sampler == NULL); + + instr->texture_index = 0; + unsigned location = instr->texture->var->data.location; + unsigned array_elements = 1; + nir_ssa_def *indirect = NULL; + + b->cursor = nir_before_instr(&instr->instr); + calc_sampler_offsets(&instr->texture->deref, instr, &array_elements, + &indirect, b, &location); + + if (indirect) { + assert(array_elements >= 1); + indirect = nir_umin(b, indirect, nir_imm_int(b, array_elements - 1)); + + nir_tex_instr_add_src(instr, nir_tex_src_texture_offset, + nir_src_for_ssa(indirect)); + nir_tex_instr_add_src(instr, nir_tex_src_sampler_offset, + nir_src_for_ssa(indirect)); + + instr->texture_array_size = array_elements; + } + + assert(location < shader_program->data->NumUniformStorage && + shader_program->data->UniformStorage[location].opaque[stage].active); + + instr->texture_index += + shader_program->data->UniformStorage[location].opaque[stage].index; + + instr->sampler_index = instr->texture_index; + + instr->texture = NULL; + + return true; +} + +static bool +lower_impl(nir_function_impl *impl, const struct gl_shader_program *shader_program, + gl_shader_stage stage) +{ + nir_builder b; + nir_builder_init(&b, impl); + bool progress = false; + + nir_foreach_block(block, impl) { + nir_foreach_instr(instr, block) { + if (instr->type == nir_instr_type_tex) + progress |= lower_sampler(nir_instr_as_tex(instr), + shader_program, stage, &b); + } + } + + return progress; +} + +bool +gl_nir_lower_samplers(nir_shader *shader, + const struct gl_shader_program *shader_program) +{ + bool progress = false; + + nir_foreach_function(function, shader) { + if (function->impl) + progress |= lower_impl(function->impl, shader_program, + shader->info.stage); + } + + return progress; +} diff --git a/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c b/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c new file mode 100644 index 00000000000..47115f943fe --- /dev/null +++ b/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c @@ -0,0 +1,250 @@ +/* + * Copyright (C) 2005-2007 Brian Paul All Rights Reserved. + * Copyright (C) 2008 VMware, Inc. All Rights Reserved. + * Copyright © 2014 Intel Corporation + * Copyright © 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * \file + * + * Lower sampler and image references of (non-bindless) uniforms by removing + * struct dereferences, and synthesizing new uniform variables without structs + * if required. + * + * This will allow backends to have a simple, uniform treatment of bindless and + * non-bindless samplers and images. + * + * Example: + * + * struct S { + * sampler2D tex[2]; + * sampler2D other; + * }; + * uniform S s[2]; + * + * tmp = texture(s[n].tex[m], coord); + * + * Becomes: + * + * decl_var uniform INTERP_MODE_NONE sampler2D[2][2] [email protected] (...) + * + * vec1 32 ssa_idx = $(2 * n + m) + * vec4 32 ssa_out = tex ssa_coord (coord), [email protected][n][m] (texture), [email protected][n][m] (sampler) + * + * and [email protected] has var->data.binding set to the base index as defined by + * the opaque uniform mapping. + */ + +#include "compiler/nir/nir.h" +#include "compiler/nir/nir_builder.h" +#include "gl_nir.h" +#include "ir_uniform.h" + +#include "main/compiler.h" +#include "main/mtypes.h" + +struct lower_samplers_as_deref_state { + nir_shader *shader; + const struct gl_shader_program *shader_program; + struct hash_table *remap_table; +}; + +static void +remove_struct_derefs(nir_deref *tail, + struct lower_samplers_as_deref_state *state, + nir_builder *b, char **path, unsigned *location) +{ + if (!tail->child) + return; + + switch (tail->child->deref_type) { + case nir_deref_type_array: { + unsigned length = glsl_get_length(tail->type); + + remove_struct_derefs(tail->child, state, b, path, location); + + tail->type = glsl_get_array_instance(tail->child->type, length); + break; + } + + case nir_deref_type_struct: { + nir_deref_struct *deref_struct = nir_deref_as_struct(tail->child); + + *location += glsl_get_record_location_offset(tail->type, deref_struct->index); + ralloc_asprintf_append(path, ".%s", + glsl_get_struct_elem_name(tail->type, deref_struct->index)); + + remove_struct_derefs(tail->child, state, b, path, location); + + /* Drop the struct deref and re-parent. */ + ralloc_steal(tail, tail->child->child); + tail->type = tail->child->type; + tail->child = tail->child->child; + break; + } + + default: + unreachable("Invalid deref type"); + break; + } +} + +static void +lower_deref(nir_deref_var *deref, + struct lower_samplers_as_deref_state *state, + nir_builder *b) +{ + nir_variable *var = deref->var; + gl_shader_stage stage = state->shader->info.stage; + unsigned location = var->data.location; + unsigned binding; + const struct glsl_type *orig_type = deref->deref.type; + char *path; + + assert(var->data.mode == nir_var_uniform); + + path = ralloc_asprintf(state->remap_table, "lower@%s", var->name); + remove_struct_derefs(&deref->deref, state, b, &path, &location); + + assert(location < state->shader_program->data->NumUniformStorage && + state->shader_program->data->UniformStorage[location].opaque[stage].active); + + binding = state->shader_program->data->UniformStorage[location].opaque[stage].index; + + if (orig_type == deref->deref.type) { + /* Fast path: We did not encounter any struct derefs. */ + var->data.binding = binding; + return; + } + + uint32_t hash = _mesa_key_hash_string(path); + struct hash_entry *h = + _mesa_hash_table_search_pre_hashed(state->remap_table, hash, path); + + if (h) { + var = (nir_variable *)h->data; + } else { + var = nir_variable_create(state->shader, nir_var_uniform, deref->deref.type, path); + var->data.binding = binding; + _mesa_hash_table_insert_pre_hashed(state->remap_table, hash, path, var); + } + + deref->var = var; +} + +static bool +lower_sampler(nir_tex_instr *instr, struct lower_samplers_as_deref_state *state, + nir_builder *b) +{ + if (!instr->texture || instr->texture->var->data.bindless || + instr->texture->var->data.mode != nir_var_uniform) + return false; + + /* In GLSL, we only fill out the texture field. The sampler is inferred */ + assert(instr->sampler == NULL); + + b->cursor = nir_before_instr(&instr->instr); + lower_deref(instr->texture, state, b); + + if (instr->op != nir_texop_txf_ms && + instr->op != nir_texop_txf_ms_mcs && + instr->op != nir_texop_samples_identical) { + nir_instr_rewrite_deref(&instr->instr, &instr->sampler, + nir_deref_var_clone(instr->texture, instr)); + } else { + assert(!instr->sampler); + } + + return true; +} + +static bool +lower_intrinsic(nir_intrinsic_instr *instr, + struct lower_samplers_as_deref_state *state, + nir_builder *b) +{ + if (instr->intrinsic == nir_intrinsic_image_var_load || + instr->intrinsic == nir_intrinsic_image_var_store || + instr->intrinsic == nir_intrinsic_image_var_atomic_add || + instr->intrinsic == nir_intrinsic_image_var_atomic_min || + instr->intrinsic == nir_intrinsic_image_var_atomic_max || + instr->intrinsic == nir_intrinsic_image_var_atomic_and || + instr->intrinsic == nir_intrinsic_image_var_atomic_or || + instr->intrinsic == nir_intrinsic_image_var_atomic_xor || + instr->intrinsic == nir_intrinsic_image_var_atomic_exchange || + instr->intrinsic == nir_intrinsic_image_var_atomic_comp_swap || + instr->intrinsic == nir_intrinsic_image_var_size) { + b->cursor = nir_before_instr(&instr->instr); + + if (instr->variables[0]->var->data.bindless || + instr->variables[0]->var->data.mode != nir_var_uniform) + return false; + + lower_deref(instr->variables[0], state, b); + return true; + } + + return false; +} + +static bool +lower_impl(nir_function_impl *impl, struct lower_samplers_as_deref_state *state) +{ + nir_builder b; + nir_builder_init(&b, impl); + bool progress = false; + + nir_foreach_block(block, impl) { + nir_foreach_instr(instr, block) { + if (instr->type == nir_instr_type_tex) + progress |= lower_sampler(nir_instr_as_tex(instr), state, &b); + else if (instr->type == nir_instr_type_intrinsic) + progress |= lower_intrinsic(nir_instr_as_intrinsic(instr), state, &b); + } + } + + return progress; +} + +bool +gl_nir_lower_samplers_as_deref(nir_shader *shader, + const struct gl_shader_program *shader_program) +{ + bool progress = false; + struct lower_samplers_as_deref_state state; + + state.shader = shader; + state.shader_program = shader_program; + state.remap_table = _mesa_hash_table_create(NULL, _mesa_key_hash_string, + _mesa_key_string_equal); + + nir_foreach_function(function, shader) { + if (function->impl) + progress |= lower_impl(function->impl, &state); + } + + /* keys are freed automatically by ralloc */ + _mesa_hash_table_destroy(state.remap_table, NULL); + + return progress; +} diff --git a/src/compiler/glsl/meson.build b/src/compiler/glsl/meson.build index 26ab4f1c8d3..055a84714c1 100644 --- a/src/compiler/glsl/meson.build +++ b/src/compiler/glsl/meson.build @@ -66,6 +66,10 @@ files_libglsl = files( 'builtin_types.cpp', 'builtin_variables.cpp', 'generate_ir.cpp', + 'gl_nir_lower_atomics.c', + 'gl_nir_lower_samplers.c', + 'gl_nir_lower_samplers_as_deref.c', + 'gl_nir.h', 'glsl_parser_extras.cpp', 'glsl_parser_extras.h', 'glsl_symbol_table.cpp', |