aboutsummaryrefslogtreecommitdiffstats
path: root/src/compiler
diff options
context:
space:
mode:
authorTimothy Arceri <[email protected]>2016-06-09 13:43:23 +1000
committerTimothy Arceri <[email protected]>2016-06-12 21:56:28 +1000
commitad3def919e8bdb4f01db0f06d54961175a1910c4 (patch)
tree6e4677fe46374bc9e7f6d2202cda2c883d35195a /src/compiler
parent0fb85ac08d61d365e67c8f79d6955e9f89543560 (diff)
glsl: fix max varyings count for ARB_enhanced_layouts
Since this extension allows more than one varying to share a single location we can't just count the number of slots a varying takes and add it to the total. Instead we now reuse the reserved varyings bitfield to determine how many slots are reserved for explicit locations instead. Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/compiler')
-rw-r--r--src/compiler/glsl/link_varyings.cpp25
-rw-r--r--src/compiler/glsl/link_varyings.h12
-rw-r--r--src/compiler/glsl/linker.cpp26
3 files changed, 43 insertions, 20 deletions
diff --git a/src/compiler/glsl/link_varyings.cpp b/src/compiler/glsl/link_varyings.cpp
index 67534a6ca82..230c11fc87b 100644
--- a/src/compiler/glsl/link_varyings.cpp
+++ b/src/compiler/glsl/link_varyings.cpp
@@ -1937,7 +1937,7 @@ canonicalize_shader_io(exec_list *ir, enum ir_variable_mode io_mode)
* 64 bit map. Per-vertex and per-patch both have separate location domains
* with a max of MAX_VARYING.
*/
-static uint64_t
+uint64_t
reserved_varying_slot(struct gl_shader *stage, ir_variable_mode io_mode)
{
assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
@@ -1999,7 +1999,8 @@ assign_varying_locations(struct gl_context *ctx,
struct gl_shader_program *prog,
gl_shader *producer, gl_shader *consumer,
unsigned num_tfeedback_decls,
- tfeedback_decl *tfeedback_decls)
+ tfeedback_decl *tfeedback_decls,
+ const uint64_t reserved_slots)
{
/* Tessellation shaders treat inputs and outputs as shared memory and can
* access inputs and outputs of other invocations.
@@ -2177,10 +2178,6 @@ assign_varying_locations(struct gl_context *ctx,
}
}
- const uint64_t reserved_slots =
- reserved_varying_slot(producer, ir_var_shader_out) |
- reserved_varying_slot(consumer, ir_var_shader_in);
-
const unsigned slots_used = matches.assign_locations(prog, reserved_slots);
matches.store_locations();
@@ -2263,14 +2260,16 @@ assign_varying_locations(struct gl_context *ctx,
bool
check_against_output_limit(struct gl_context *ctx,
struct gl_shader_program *prog,
- gl_shader *producer)
+ gl_shader *producer,
+ unsigned num_explicit_locations)
{
- unsigned output_vectors = 0;
+ unsigned output_vectors = num_explicit_locations;
foreach_in_list(ir_instruction, node, producer->ir) {
ir_variable *const var = node->as_variable();
- if (var && var->data.mode == ir_var_shader_out &&
+ if (var && !var->data.explicit_location &&
+ var->data.mode == ir_var_shader_out &&
var_counts_against_varying_limit(producer->Stage, var)) {
/* outputs for fragment shader can't be doubles */
output_vectors += var->type->count_attribute_slots(false);
@@ -2305,14 +2304,16 @@ check_against_output_limit(struct gl_context *ctx,
bool
check_against_input_limit(struct gl_context *ctx,
struct gl_shader_program *prog,
- gl_shader *consumer)
+ gl_shader *consumer,
+ unsigned num_explicit_locations)
{
- unsigned input_vectors = 0;
+ unsigned input_vectors = num_explicit_locations;
foreach_in_list(ir_instruction, node, consumer->ir) {
ir_variable *const var = node->as_variable();
- if (var && var->data.mode == ir_var_shader_in &&
+ if (var && !var->data.explicit_location &&
+ var->data.mode == ir_var_shader_in &&
var_counts_against_varying_limit(consumer->Stage, var)) {
/* vertex inputs aren't varying counted */
input_vectors += var->type->count_attribute_slots(false);
diff --git a/src/compiler/glsl/link_varyings.h b/src/compiler/glsl/link_varyings.h
index 2126a5c6d98..39e907039a6 100644
--- a/src/compiler/glsl/link_varyings.h
+++ b/src/compiler/glsl/link_varyings.h
@@ -320,16 +320,22 @@ assign_varying_locations(struct gl_context *ctx,
struct gl_shader_program *prog,
gl_shader *producer, gl_shader *consumer,
unsigned num_tfeedback_decls,
- tfeedback_decl *tfeedback_decls);
+ tfeedback_decl *tfeedback_decls,
+ const uint64_t reserved_slots);
+
+uint64_t
+reserved_varying_slot(struct gl_shader *stage, ir_variable_mode io_mode);
bool
check_against_output_limit(struct gl_context *ctx,
struct gl_shader_program *prog,
- gl_shader *producer);
+ gl_shader *producer,
+ unsigned num_explicit_locations);
bool
check_against_input_limit(struct gl_context *ctx,
struct gl_shader_program *prog,
- gl_shader *consumer);
+ gl_shader *consumer,
+ unsigned num_explicit_locations);
#endif /* GLSL_LINK_VARYINGS_H */
diff --git a/src/compiler/glsl/linker.cpp b/src/compiler/glsl/linker.cpp
index 4ddd46e3a80..1c27854eb7a 100644
--- a/src/compiler/glsl/linker.cpp
+++ b/src/compiler/glsl/linker.cpp
@@ -4850,9 +4850,12 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
*/
if (last < MESA_SHADER_FRAGMENT &&
(num_tfeedback_decls != 0 || prog->SeparateShader)) {
+ const uint64_t reserved_out_slots =
+ reserved_varying_slot(prog->_LinkedShaders[last], ir_var_shader_out);
if (!assign_varying_locations(ctx, mem_ctx, prog,
prog->_LinkedShaders[last], NULL,
- num_tfeedback_decls, tfeedback_decls))
+ num_tfeedback_decls, tfeedback_decls,
+ reserved_out_slots))
goto done;
}
@@ -4870,6 +4873,9 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
gl_shader *const sh = prog->_LinkedShaders[last];
if (prog->SeparateShader) {
+ const uint64_t reserved_slots =
+ reserved_varying_slot(sh, ir_var_shader_in);
+
/* Assign input locations for SSO, output locations are already
* assigned.
*/
@@ -4877,7 +4883,8 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
NULL /* producer */,
sh /* consumer */,
0 /* num_tfeedback_decls */,
- NULL /* tfeedback_decls */))
+ NULL /* tfeedback_decls */,
+ reserved_slots))
goto done;
}
@@ -4898,9 +4905,15 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
gl_shader *const sh_i = prog->_LinkedShaders[i];
gl_shader *const sh_next = prog->_LinkedShaders[next];
+ const uint64_t reserved_out_slots =
+ reserved_varying_slot(sh_i, ir_var_shader_out);
+ const uint64_t reserved_in_slots =
+ reserved_varying_slot(sh_next, ir_var_shader_in);
+
if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
- tfeedback_decls))
+ tfeedback_decls,
+ reserved_out_slots | reserved_in_slots))
goto done;
do_dead_builtin_varyings(ctx, sh_i, sh_next,
@@ -4909,11 +4922,14 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
/* This must be done after all dead varyings are eliminated. */
if (sh_i != NULL) {
- if (!check_against_output_limit(ctx, prog, sh_i)) {
+ unsigned slots_used = _mesa_bitcount_64(reserved_out_slots);
+ if (!check_against_output_limit(ctx, prog, sh_i, slots_used)) {
goto done;
}
}
- if (!check_against_input_limit(ctx, prog, sh_next))
+
+ unsigned slots_used = _mesa_bitcount_64(reserved_in_slots);
+ if (!check_against_input_limit(ctx, prog, sh_next, slots_used))
goto done;
next = i;