diff options
author | Jonathan Marek <[email protected]> | 2019-09-26 00:29:26 -0400 |
---|---|---|
committer | Jonathan Marek <[email protected]> | 2019-09-26 17:18:13 -0400 |
commit | b54f9e9e9e0c2c34fd2e9d3e939320da12a48cb9 (patch) | |
tree | fc1c1c3b204ddb09763e16a3e98b1948ebafcb45 /src | |
parent | c39afe68f0390d45130c1317b3b7e65f55542c36 (diff) |
turnip: lower samplers and uniform buffer indices
Lower these to something compatible with ir3, and save the descriptor set
and binding information.
Signed-off-by: Jonathan Marek <[email protected]>
Reviewed-by: Kristian H. Kristensen <[email protected]>
Acked-by: Eric Anholt <[email protected]>
Diffstat (limited to 'src')
-rw-r--r-- | src/freedreno/vulkan/tu_private.h | 11 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_shader.c | 136 |
2 files changed, 147 insertions, 0 deletions
diff --git a/src/freedreno/vulkan/tu_private.h b/src/freedreno/vulkan/tu_private.h index 170be1a149e..c93d7fa6357 100644 --- a/src/freedreno/vulkan/tu_private.h +++ b/src/freedreno/vulkan/tu_private.h @@ -1023,10 +1023,21 @@ struct tu_shader_compile_options bool include_binning_pass; }; +struct tu_descriptor_map +{ + unsigned num; + int set[32]; + int binding[32]; +}; + struct tu_shader { struct ir3_shader ir3_shader; + struct tu_descriptor_map texture_map; + struct tu_descriptor_map sampler_map; + struct tu_descriptor_map ubo_map; + /* This may be true for vertex shaders. When true, variants[1] is the * binning variant and binning_binary is non-NULL. */ diff --git a/src/freedreno/vulkan/tu_shader.c b/src/freedreno/vulkan/tu_shader.c index f3d81675bb1..37edf82642b 100644 --- a/src/freedreno/vulkan/tu_shader.c +++ b/src/freedreno/vulkan/tu_shader.c @@ -107,6 +107,139 @@ tu_sort_variables_by_location(struct exec_list *variables) exec_list_move_nodes_to(&sorted, variables); } +static unsigned +map_add(struct tu_descriptor_map *map, int set, int binding) +{ + unsigned index; + for (index = 0; index < map->num; index++) { + if (set == map->set[index] && binding == map->binding[index]) + break; + } + + assert(index < ARRAY_SIZE(map->set)); + + map->set[index] = set; + map->binding[index] = binding; + map->num = MAX2(map->num, index + 1); + return index; +} + +static void +lower_tex_src_to_offset(nir_tex_instr *instr, unsigned src_idx, + struct tu_shader *shader, bool is_sampler) +{ + nir_deref_instr *deref = + nir_instr_as_deref(instr->src[src_idx].src.ssa->parent_instr); + + if (deref->deref_type != nir_deref_type_var) { + tu_finishme("sampler array"); + return; + } + + if (is_sampler) { + instr->sampler_index = map_add(&shader->sampler_map, + deref->var->data.descriptor_set, + deref->var->data.binding); + } else { + instr->texture_index = map_add(&shader->texture_map, + deref->var->data.descriptor_set, + deref->var->data.binding); + instr->texture_array_size = 1; + } + + nir_tex_instr_remove_src(instr, src_idx); +} + +static bool +lower_sampler(nir_tex_instr *instr, struct tu_shader *shader) +{ + int texture_idx = + nir_tex_instr_src_index(instr, nir_tex_src_texture_deref); + + if (texture_idx >= 0) + lower_tex_src_to_offset(instr, texture_idx, shader, false); + + int sampler_idx = + nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref); + + if (sampler_idx >= 0) + lower_tex_src_to_offset(instr, sampler_idx, shader, true); + + if (texture_idx < 0 && sampler_idx < 0) + return false; + + return true; +} + +static bool +lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr, + struct tu_shader *shader) +{ + if (instr->intrinsic != nir_intrinsic_vulkan_resource_index) + return false; + + nir_const_value *const_val = nir_src_as_const_value(instr->src[0]); + if (!const_val || const_val->u32 != 0) { + tu_finishme("non-zero vulkan_resource_index array index"); + return false; + } + + if (nir_intrinsic_desc_type(instr) != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) { + tu_finishme("non-ubo vulkan_resource_index"); + return false; + } + + unsigned index = map_add(&shader->ubo_map, + nir_intrinsic_desc_set(instr), + nir_intrinsic_binding(instr)); + + b->cursor = nir_before_instr(&instr->instr); + /* skip index 0 because ir3 treats it differently */ + nir_ssa_def_rewrite_uses(&instr->dest.ssa, + nir_src_for_ssa(nir_imm_int(b, index + 1))); + nir_instr_remove(&instr->instr); + + return true; +} + +static bool +lower_impl(nir_function_impl *impl, struct tu_shader *shader) +{ + nir_builder b; + nir_builder_init(&b, impl); + bool progress = false; + + nir_foreach_block(block, impl) { + nir_foreach_instr_safe(instr, block) { + switch (instr->type) { + case nir_instr_type_tex: + progress |= lower_sampler(nir_instr_as_tex(instr), shader); + break; + case nir_instr_type_intrinsic: + progress |= lower_intrinsic(&b, nir_instr_as_intrinsic(instr), shader); + break; + default: + break; + } + } + } + + return progress; +} + +static bool +tu_lower_io(nir_shader *shader, struct tu_shader *tu_shader) +{ + bool progress = false; + + nir_foreach_function(function, shader) { + if (function->impl) + progress |= lower_impl(function->impl, tu_shader); + } + + return progress; +} + struct tu_shader * tu_shader_create(struct tu_device *dev, gl_shader_stage stage, @@ -171,6 +304,9 @@ tu_shader_create(struct tu_device *dev, NIR_PASS_V(nir, nir_lower_system_values); NIR_PASS_V(nir, nir_lower_frexp); + + NIR_PASS_V(nir, tu_lower_io, shader); + NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size, 0); nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir)); |