summaryrefslogtreecommitdiffstats
path: root/src/compiler
diff options
context:
space:
mode:
authorTimothy Arceri <[email protected]>2019-03-29 12:39:48 +1100
committerKarol Herbst <[email protected]>2019-04-12 09:02:59 +0200
commit035759b61ba1778d5143cdf3a8795a62dd5d8a60 (patch)
tree18f3e4fb796338f1e72defe381e9bd7e463e7ce3 /src/compiler
parent3b2a9ffd60eb3612e1034019e499a27a1c2a672b (diff)
nir/i965/freedreno/vc4: add a bindless bool to type size functions
This required to calculate sizes correctly when we have bindless samplers/images. Reviewed-by: Marek Olšák <[email protected]>
Diffstat (limited to 'src/compiler')
-rw-r--r--src/compiler/nir/nir.h4
-rw-r--r--src/compiler/nir/nir_lower_io.c32
2 files changed, 22 insertions, 14 deletions
diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h
index fb36b04ae66..956b716d38d 100644
--- a/src/compiler/nir/nir.h
+++ b/src/compiler/nir/nir.h
@@ -3015,7 +3015,7 @@ void nir_lower_io_to_temporaries(nir_shader *shader,
void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
void nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
- int (*type_size)(const struct glsl_type *));
+ int (*type_size)(const struct glsl_type *, bool));
/* Some helpers to do very simple linking */
bool nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer);
@@ -3036,7 +3036,7 @@ typedef enum {
} nir_lower_io_options;
bool nir_lower_io(nir_shader *shader,
nir_variable_mode modes,
- int (*type_size)(const struct glsl_type *),
+ int (*type_size)(const struct glsl_type *, bool),
nir_lower_io_options);
typedef enum {
diff --git a/src/compiler/nir/nir_lower_io.c b/src/compiler/nir/nir_lower_io.c
index 98e2afe76ea..5f18f1df445 100644
--- a/src/compiler/nir/nir_lower_io.c
+++ b/src/compiler/nir/nir_lower_io.c
@@ -38,7 +38,7 @@
struct lower_io_state {
void *dead_ctx;
nir_builder builder;
- int (*type_size)(const struct glsl_type *type);
+ int (*type_size)(const struct glsl_type *type, bool);
nir_variable_mode modes;
nir_lower_io_options options;
};
@@ -95,7 +95,7 @@ global_atomic_for_deref(nir_intrinsic_op deref_op)
void
nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
- int (*type_size)(const struct glsl_type *))
+ int (*type_size)(const struct glsl_type *, bool))
{
unsigned location = 0;
@@ -108,7 +108,10 @@ nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
continue;
var->data.driver_location = location;
- location += type_size(var->type);
+ bool bindless_type_size = var->data.mode == nir_var_shader_in ||
+ var->data.mode == nir_var_shader_out ||
+ var->data.bindless;
+ location += type_size(var->type, bindless_type_size);
}
*size = location;
@@ -138,8 +141,8 @@ nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage)
static nir_ssa_def *
get_io_offset(nir_builder *b, nir_deref_instr *deref,
nir_ssa_def **vertex_index,
- int (*type_size)(const struct glsl_type *),
- unsigned *component)
+ int (*type_size)(const struct glsl_type *, bool),
+ unsigned *component, bool bts)
{
nir_deref_path path;
nir_deref_path_init(&path, deref, NULL);
@@ -165,7 +168,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
const unsigned total_offset = *component + index;
const unsigned slot_offset = total_offset / 4;
*component = total_offset % 4;
- return nir_imm_int(b, type_size(glsl_vec4_type()) * slot_offset);
+ return nir_imm_int(b, type_size(glsl_vec4_type(), bts) * slot_offset);
}
/* Just emit code and let constant-folding go to town */
@@ -173,7 +176,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
for (; *p; p++) {
if ((*p)->deref_type == nir_deref_type_array) {
- unsigned size = type_size((*p)->type);
+ unsigned size = type_size((*p)->type, bts);
nir_ssa_def *mul =
nir_imul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
@@ -185,7 +188,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
unsigned field_offset = 0;
for (unsigned i = 0; i < (*p)->strct.index; i++) {
- field_offset += type_size(glsl_get_struct_field(parent->type, i));
+ field_offset += type_size(glsl_get_struct_field(parent->type, i), bts);
}
offset = nir_iadd_imm(b, offset, field_offset);
} else {
@@ -255,7 +258,8 @@ lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
nir_intrinsic_set_component(load, component);
if (load->intrinsic == nir_intrinsic_load_uniform)
- nir_intrinsic_set_range(load, state->type_size(var->type));
+ nir_intrinsic_set_range(load,
+ state->type_size(var->type, var->data.bindless));
if (vertex_index) {
load->src[0] = nir_src_for_ssa(vertex_index);
@@ -468,9 +472,13 @@ nir_lower_io_block(nir_block *block,
nir_ssa_def *offset;
nir_ssa_def *vertex_index = NULL;
unsigned component_offset = var->data.location_frac;
+ bool bindless_type_size = mode == nir_var_shader_in ||
+ mode == nir_var_shader_out ||
+ var->data.bindless;
offset = get_io_offset(b, deref, per_vertex ? &vertex_index : NULL,
- state->type_size, &component_offset);
+ state->type_size, &component_offset,
+ bindless_type_size);
nir_intrinsic_instr *replacement;
@@ -538,7 +546,7 @@ nir_lower_io_block(nir_block *block,
static bool
nir_lower_io_impl(nir_function_impl *impl,
nir_variable_mode modes,
- int (*type_size)(const struct glsl_type *),
+ int (*type_size)(const struct glsl_type *, bool),
nir_lower_io_options options)
{
struct lower_io_state state;
@@ -563,7 +571,7 @@ nir_lower_io_impl(nir_function_impl *impl,
bool
nir_lower_io(nir_shader *shader, nir_variable_mode modes,
- int (*type_size)(const struct glsl_type *),
+ int (*type_size)(const struct glsl_type *, bool),
nir_lower_io_options options)
{
bool progress = false;