summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/glsl/ir_uniform.h8
-rw-r--r--src/glsl/link_uniforms.cpp103
-rw-r--r--src/glsl/linker.cpp182
3 files changed, 286 insertions, 7 deletions
diff --git a/src/glsl/ir_uniform.h b/src/glsl/ir_uniform.h
index e1b80147788..0b6f7201a20 100644
--- a/src/glsl/ir_uniform.h
+++ b/src/glsl/ir_uniform.h
@@ -114,6 +114,8 @@ struct gl_uniform_storage {
struct gl_opaque_uniform_index image[MESA_SHADER_STAGES];
+ struct gl_opaque_uniform_index subroutine[MESA_SHADER_STAGES];
+
/**
* Storage used by the driver for the uniform
*/
@@ -173,10 +175,16 @@ struct gl_uniform_storage {
/**
* The 'base location' for this uniform in the uniform remap table. For
* arrays this is the first element in the array.
+ * for subroutines this is in shader subroutine uniform remap table.
*/
unsigned remap_location;
/**
+ * The number of compatible subroutines with this subroutine uniform.
+ */
+ unsigned num_compatible_subroutines;
+
+ /**
* This is a compiler-generated uniform that should not be advertised
* via the API.
*/
diff --git a/src/glsl/link_uniforms.cpp b/src/glsl/link_uniforms.cpp
index e786ddcaa90..254086dc050 100644
--- a/src/glsl/link_uniforms.cpp
+++ b/src/glsl/link_uniforms.cpp
@@ -47,9 +47,10 @@
static unsigned
values_for_type(const glsl_type *type)
{
- if (type->is_sampler()) {
+ if (type->is_sampler() || type->is_subroutine()) {
return 1;
- } else if (type->is_array() && type->fields.array->is_sampler()) {
+ } else if (type->is_array() && (type->fields.array->is_sampler() ||
+ type->fields.array->is_subroutine())) {
return type->array_size();
} else {
return type->component_slots();
@@ -284,6 +285,7 @@ public:
count_uniform_size(struct string_to_uint_map *map)
: num_active_uniforms(0), num_values(0), num_shader_samplers(0),
num_shader_images(0), num_shader_uniform_components(0),
+ num_shader_subroutines(0),
is_ubo_var(false), map(map)
{
/* empty */
@@ -294,6 +296,7 @@ public:
this->num_shader_samplers = 0;
this->num_shader_images = 0;
this->num_shader_uniform_components = 0;
+ this->num_shader_subroutines = 0;
}
void process(ir_variable *var)
@@ -331,6 +334,11 @@ public:
*/
unsigned num_shader_uniform_components;
+ /**
+ * Number of subroutine uniforms used
+ */
+ unsigned num_shader_subroutines;
+
bool is_ubo_var;
private:
@@ -348,7 +356,9 @@ private:
* count it for each shader target.
*/
const unsigned values = values_for_type(type);
- if (type->contains_sampler()) {
+ if (type->contains_subroutine()) {
+ this->num_shader_subroutines += values;
+ } else if (type->contains_sampler()) {
this->num_shader_samplers += values;
} else if (type->contains_image()) {
this->num_shader_images += values;
@@ -421,6 +431,7 @@ public:
this->shader_shadow_samplers = 0;
this->next_sampler = 0;
this->next_image = 0;
+ this->next_subroutine = 0;
memset(this->targets, 0, sizeof(this->targets));
}
@@ -535,6 +546,24 @@ private:
}
}
+ void handle_subroutines(const glsl_type *base_type,
+ struct gl_uniform_storage *uniform)
+ {
+ if (base_type->is_subroutine()) {
+ uniform->subroutine[shader_type].index = this->next_subroutine;
+ uniform->subroutine[shader_type].active = true;
+
+ /* Increment the subroutine index by 1 for non-arrays and by the
+ * number of array elements for arrays.
+ */
+ this->next_subroutine += MAX2(1, uniform->array_elements);
+
+ } else {
+ uniform->subroutine[shader_type].index = ~0;
+ uniform->subroutine[shader_type].active = false;
+ }
+ }
+
virtual void visit_field(const glsl_type *type, const char *name,
bool row_major)
{
@@ -588,6 +617,7 @@ private:
/* This assigns uniform indices to sampler and image uniforms. */
handle_samplers(base_type, &this->uniforms[id]);
handle_images(base_type, &this->uniforms[id]);
+ handle_subroutines(base_type, &this->uniforms[id]);
/* If there is already storage associated with this uniform or if the
* uniform is set as builtin, it means that it was set while processing
@@ -672,6 +702,7 @@ private:
struct gl_uniform_storage *uniforms;
unsigned next_sampler;
unsigned next_image;
+ unsigned next_subroutine;
public:
union gl_constant_value *values;
@@ -954,8 +985,8 @@ link_assign_uniform_locations(struct gl_shader_program *prog,
sh->num_samplers = uniform_size.num_shader_samplers;
sh->NumImages = uniform_size.num_shader_images;
sh->num_uniform_components = uniform_size.num_shader_uniform_components;
-
sh->num_combined_uniform_components = sh->num_uniform_components;
+
for (unsigned i = 0; i < sh->NumUniformBlocks; i++) {
sh->num_combined_uniform_components +=
sh->UniformBlocks[i].UniformBufferSize / 4;
@@ -1008,6 +1039,9 @@ link_assign_uniform_locations(struct gl_shader_program *prog,
/* Reserve all the explicit locations of the active uniforms. */
for (unsigned i = 0; i < num_uniforms; i++) {
+ if (uniforms[i].type->is_subroutine())
+ continue;
+
if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC) {
/* How many new entries for this uniform? */
const unsigned entries = MAX2(1, uniforms[i].array_elements);
@@ -1025,6 +1059,8 @@ link_assign_uniform_locations(struct gl_shader_program *prog,
/* Reserve locations for rest of the uniforms. */
for (unsigned i = 0; i < num_uniforms; i++) {
+ if (uniforms[i].type->is_subroutine())
+ continue;
/* Built-in uniforms should not get any location. */
if (uniforms[i].builtin)
continue;
@@ -1053,6 +1089,65 @@ link_assign_uniform_locations(struct gl_shader_program *prog,
prog->NumUniformRemapTable += entries;
}
+ /* Reserve all the explicit locations of the active subroutine uniforms. */
+ for (unsigned i = 0; i < num_uniforms; i++) {
+ if (!uniforms[i].type->is_subroutine())
+ continue;
+
+ if (uniforms[i].remap_location == UNMAPPED_UNIFORM_LOC)
+ continue;
+
+ for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
+ struct gl_shader *sh = prog->_LinkedShaders[j];
+ if (!sh)
+ continue;
+
+ if (!uniforms[i].subroutine[j].active)
+ continue;
+
+ /* How many new entries for this uniform? */
+ const unsigned entries = MAX2(1, uniforms[i].array_elements);
+
+ /* Set remap table entries point to correct gl_uniform_storage. */
+ for (unsigned k = 0; k < entries; k++) {
+ unsigned element_loc = uniforms[i].remap_location + k;
+ assert(sh->SubroutineUniformRemapTable[element_loc] ==
+ INACTIVE_UNIFORM_EXPLICIT_LOCATION);
+ sh->SubroutineUniformRemapTable[element_loc] = &uniforms[i];
+ }
+ }
+ }
+
+ /* reserve subroutine locations */
+ for (unsigned i = 0; i < num_uniforms; i++) {
+
+ if (!uniforms[i].type->is_subroutine())
+ continue;
+ const unsigned entries = MAX2(1, uniforms[i].array_elements);
+
+ if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC)
+ continue;
+ for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
+ struct gl_shader *sh = prog->_LinkedShaders[j];
+ if (!sh)
+ continue;
+
+ if (!uniforms[i].subroutine[j].active)
+ continue;
+
+ sh->SubroutineUniformRemapTable =
+ reralloc(sh,
+ sh->SubroutineUniformRemapTable,
+ gl_uniform_storage *,
+ sh->NumSubroutineUniformRemapTable + entries);
+
+ for (unsigned k = 0; k < entries; k++)
+ sh->SubroutineUniformRemapTable[sh->NumSubroutineUniformRemapTable + k] = &uniforms[i];
+ uniforms[i].remap_location = sh->NumSubroutineUniformRemapTable;
+ sh->NumSubroutineUniformRemapTable += entries;
+ }
+ }
+
#ifndef NDEBUG
for (unsigned i = 0; i < num_uniforms; i++) {
assert(uniforms[i].storage != NULL || uniforms[i].builtin);
diff --git a/src/glsl/linker.cpp b/src/glsl/linker.cpp
index d0445f16032..8f2c8ee9a05 100644
--- a/src/glsl/linker.cpp
+++ b/src/glsl/linker.cpp
@@ -933,6 +933,10 @@ cross_validate_globals(struct gl_shader_program *prog,
if (uniforms_only && (var->data.mode != ir_var_uniform && var->data.mode != ir_var_shader_storage))
continue;
+ /* don't cross validate subroutine uniforms */
+ if (var->type->contains_subroutine())
+ continue;
+
/* Don't cross validate temporaries that are at global scope. These
* will eventually get pulled into the shaders 'main'.
*/
@@ -2196,8 +2200,11 @@ update_array_sizes(struct gl_shader_program *prog)
* Atomic counters are supposed to get deterministic
* locations assigned based on the declaration ordering and
* sizes, array compaction would mess that up.
+ *
+ * Subroutine uniforms are not removed.
*/
- if (var->is_in_buffer_block() || var->type->contains_atomic())
+ if (var->is_in_buffer_block() || var->type->contains_atomic() ||
+ var->type->contains_subroutine())
continue;
unsigned int size = var->data.max_array_access;
@@ -2788,6 +2795,49 @@ check_resources(struct gl_context *ctx, struct gl_shader_program *prog)
}
}
+static void
+link_calculate_subroutine_compat(struct gl_context *ctx, struct gl_shader_program *prog)
+{
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_shader *sh = prog->_LinkedShaders[i];
+ int count;
+ if (!sh)
+ continue;
+
+ for (unsigned j = 0; j < sh->NumSubroutineUniformRemapTable; j++) {
+ struct gl_uniform_storage *uni = sh->SubroutineUniformRemapTable[j];
+
+ if (!uni)
+ continue;
+
+ count = 0;
+ for (unsigned f = 0; f < sh->NumSubroutineFunctions; f++) {
+ struct gl_subroutine_function *fn = &sh->SubroutineFunctions[f];
+ for (int k = 0; k < fn->num_compat_types; k++) {
+ if (fn->types[k] == uni->type) {
+ count++;
+ break;
+ }
+ }
+ }
+ uni->num_compatible_subroutines = count;
+ }
+ }
+}
+
+static void
+check_subroutine_resources(struct gl_context *ctx, struct gl_shader_program *prog)
+{
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_shader *sh = prog->_LinkedShaders[i];
+
+ if (sh) {
+ if (sh->NumSubroutineUniformRemapTable > MAX_SUBROUTINE_UNIFORM_LOCATIONS)
+ linker_error(prog, "Too many %s shader subroutine uniforms\n",
+ _mesa_shader_stage_to_string(i));
+ }
+ }
+}
/**
* Validate shader image resources.
*/
@@ -2896,6 +2946,59 @@ reserve_explicit_locations(struct gl_shader_program *prog,
return true;
}
+static bool
+reserve_subroutine_explicit_locations(struct gl_shader_program *prog,
+ struct gl_shader *sh,
+ ir_variable *var)
+{
+ unsigned slots = var->type->uniform_locations();
+ unsigned max_loc = var->data.location + slots - 1;
+
+ /* Resize remap table if locations do not fit in the current one. */
+ if (max_loc + 1 > sh->NumSubroutineUniformRemapTable) {
+ sh->SubroutineUniformRemapTable =
+ reralloc(sh, sh->SubroutineUniformRemapTable,
+ gl_uniform_storage *,
+ max_loc + 1);
+
+ if (!sh->SubroutineUniformRemapTable) {
+ linker_error(prog, "Out of memory during linking.\n");
+ return false;
+ }
+
+ /* Initialize allocated space. */
+ for (unsigned i = sh->NumSubroutineUniformRemapTable; i < max_loc + 1; i++)
+ sh->SubroutineUniformRemapTable[i] = NULL;
+
+ sh->NumSubroutineUniformRemapTable = max_loc + 1;
+ }
+
+ for (unsigned i = 0; i < slots; i++) {
+ unsigned loc = var->data.location + i;
+
+ /* Check if location is already used. */
+ if (sh->SubroutineUniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
+
+ /* ARB_explicit_uniform_location specification states:
+ * "No two subroutine uniform variables can have the same location
+ * in the same shader stage, otherwise a compiler or linker error
+ * will be generated."
+ */
+ linker_error(prog,
+ "location qualifier for uniform %s overlaps "
+ "previously used location\n",
+ var->name);
+ return false;
+ }
+
+ /* Initialize location as inactive before optimization
+ * rounds and location assignment.
+ */
+ sh->SubroutineUniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
+ }
+
+ return true;
+}
/**
* Check and reserve all explicit uniform locations, called before
* any optimizations happen to handle also inactive uniforms and
@@ -2928,7 +3031,12 @@ check_explicit_uniform_locations(struct gl_context *ctx,
ir_variable *var = node->as_variable();
if (var && (var->data.mode == ir_var_uniform || var->data.mode == ir_var_shader_storage) &&
var->data.explicit_location) {
- if (!reserve_explicit_locations(prog, uniform_map, var)) {
+ bool ret;
+ if (var->type->is_subroutine())
+ ret = reserve_subroutine_explicit_locations(prog, sh, var);
+ else
+ ret = reserve_explicit_locations(prog, uniform_map, var);
+ if (!ret) {
delete uniform_map;
return;
}
@@ -3143,10 +3251,39 @@ build_program_resource_list(struct gl_context *ctx,
return;
}
+ for (unsigned i = 0; i < shProg->NumUniformStorage; i++) {
+ GLenum type;
+ if (!shProg->UniformStorage[i].hidden)
+ continue;
+
+ for (int j = MESA_SHADER_VERTEX; j < MESA_SHADER_STAGES; j++) {
+ if (!shProg->UniformStorage[i].subroutine[j].active)
+ continue;
+
+ type = _mesa_shader_stage_to_subroutine_uniform((gl_shader_stage)j);
+ /* add shader subroutines */
+ if (!add_program_resource(shProg, type, &shProg->UniformStorage[i], 0))
+ return;
+ }
+ }
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_shader *sh = shProg->_LinkedShaders[i];
+ GLuint type;
+
+ if (!sh)
+ continue;
+
+ type = _mesa_shader_stage_to_subroutine((gl_shader_stage)i);
+ for (unsigned j = 0; j < sh->NumSubroutineFunctions; j++) {
+ if (!add_program_resource(shProg, type, &sh->SubroutineFunctions[j], 0))
+ return;
+ }
+ }
+
/* TODO - following extensions will require more resource types:
*
* GL_ARB_shader_storage_buffer_object
- * GL_ARB_shader_subroutine
*/
}
@@ -3184,6 +3321,41 @@ validate_sampler_array_indexing(struct gl_context *ctx,
return true;
}
+void
+link_assign_subroutine_types(struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ gl_shader *sh = prog->_LinkedShaders[i];
+
+ if (sh == NULL)
+ continue;
+
+ foreach_in_list(ir_instruction, node, sh->ir) {
+ ir_function *fn = node->as_function();
+ if (!fn)
+ continue;
+
+ if (fn->is_subroutine)
+ sh->NumSubroutineUniformTypes++;
+
+ if (!fn->num_subroutine_types)
+ continue;
+
+ sh->SubroutineFunctions = reralloc(sh, sh->SubroutineFunctions,
+ struct gl_subroutine_function,
+ sh->NumSubroutineFunctions + 1);
+ sh->SubroutineFunctions[sh->NumSubroutineFunctions].name = ralloc_strdup(sh, fn->name);
+ sh->SubroutineFunctions[sh->NumSubroutineFunctions].num_compat_types = fn->num_subroutine_types;
+ sh->SubroutineFunctions[sh->NumSubroutineFunctions].types =
+ ralloc_array(sh, const struct glsl_type *,
+ fn->num_subroutine_types);
+ for (int j = 0; j < fn->num_subroutine_types; j++)
+ sh->SubroutineFunctions[sh->NumSubroutineFunctions].types[j] = fn->subroutine_types[j];
+ sh->NumSubroutineFunctions++;
+ }
+ }
+}
void
link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
@@ -3373,6 +3545,8 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
}
check_explicit_uniform_locations(ctx, prog);
+ link_assign_subroutine_types(ctx, prog);
+
if (!prog->LinkStatus)
goto done;
@@ -3630,7 +3804,9 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
link_assign_atomic_counter_resources(ctx, prog);
store_fragdepth_layout(prog);
+ link_calculate_subroutine_compat(ctx, prog);
check_resources(ctx, prog);
+ check_subroutine_resources(ctx, prog);
check_image_resources(ctx, prog);
link_check_atomic_counter_resources(ctx, prog);