diff options
author | Ian Romanick <[email protected]> | 2014-04-26 17:45:35 -0700 |
---|---|---|
committer | Ian Romanick <[email protected]> | 2014-05-02 07:16:54 -0700 |
commit | 7016afe25d1c8ca652bd712ce130b135220530c5 (patch) | |
tree | 3923da738e0dbb732ff066948f8630002d5137fa | |
parent | 03488cd3b956a5afb46749a6e3894a664138eb47 (diff) |
glsl: Remove varying "base" parameters
In February 2013 Paul unified the values used for shader stage outputs
and shader stage inputs. See commits 8a076c5f0^..eed6baf76. Since that
time, the location_base parameters are always VARYING_SLOT_VAR0.
Instead of passing that around, just hard code it.
Signed-off-by: Ian Romanick <[email protected]>
Reviewed-by: Eric Anholt <[email protected]>
-rw-r--r-- | src/glsl/ir_optimization.h | 2 | ||||
-rw-r--r-- | src/glsl/link_varyings.cpp | 21 | ||||
-rw-r--r-- | src/glsl/lower_packed_varyings.cpp | 32 |
3 files changed, 20 insertions, 35 deletions
diff --git a/src/glsl/ir_optimization.h b/src/glsl/ir_optimization.h index 40bb6139285..30b3d17b4e9 100644 --- a/src/glsl/ir_optimization.h +++ b/src/glsl/ir_optimization.h @@ -112,7 +112,7 @@ bool lower_clip_distance(gl_shader *shader); void lower_output_reads(exec_list *instructions); bool lower_packing_builtins(exec_list *instructions, int op_mask); void lower_ubo_reference(struct gl_shader *shader, exec_list *instructions); -void lower_packed_varyings(void *mem_ctx, unsigned location_base, +void lower_packed_varyings(void *mem_ctx, unsigned locations_used, ir_variable_mode mode, unsigned gs_input_vertices, gl_shader *shader); bool lower_vector_insert(exec_list *instructions, bool lower_nonconstant_index); diff --git a/src/glsl/link_varyings.cpp b/src/glsl/link_varyings.cpp index a9b15f6f7b1..adb23593b65 100644 --- a/src/glsl/link_varyings.cpp +++ b/src/glsl/link_varyings.cpp @@ -622,7 +622,7 @@ public: ~varying_matches(); void record(ir_variable *producer_var, ir_variable *consumer_var); unsigned assign_locations(); - void store_locations(unsigned producer_base, unsigned consumer_base) const; + void store_locations() const; private: /** @@ -842,8 +842,7 @@ varying_matches::assign_locations() * assignments that were made by varying_matches::assign_locations(). */ void -varying_matches::store_locations(unsigned producer_base, - unsigned consumer_base) const +varying_matches::store_locations() const { for (unsigned i = 0; i < this->num_matches; i++) { ir_variable *producer_var = this->matches[i].producer_var; @@ -852,11 +851,11 @@ varying_matches::store_locations(unsigned producer_base, unsigned slot = generic_location / 4; unsigned offset = generic_location % 4; - producer_var->data.location = producer_base + slot; + producer_var->data.location = VARYING_SLOT_VAR0 + slot; producer_var->data.location_frac = offset; if (consumer_var) { assert(consumer_var->data.location == -1); - consumer_var->data.location = consumer_base + slot; + consumer_var->data.location = VARYING_SLOT_VAR0 + slot; consumer_var->data.location_frac = offset; } } @@ -1069,8 +1068,6 @@ assign_varying_locations(struct gl_context *ctx, tfeedback_decl *tfeedback_decls, unsigned gs_input_vertices) { - const unsigned producer_base = VARYING_SLOT_VAR0; - const unsigned consumer_base = VARYING_SLOT_VAR0; varying_matches matches(ctx->Const.DisableVaryingPacking, consumer && consumer->Stage == MESA_SHADER_FRAGMENT); hash_table *tfeedback_candidates @@ -1162,7 +1159,7 @@ assign_varying_locations(struct gl_context *ctx, } const unsigned slots_used = matches.assign_locations(); - matches.store_locations(producer_base, consumer_base); + matches.store_locations(); for (unsigned i = 0; i < num_tfeedback_decls; ++i) { if (!tfeedback_decls[i].is_varying()) @@ -1187,11 +1184,11 @@ assign_varying_locations(struct gl_context *ctx, */ assert(!ctx->Extensions.EXT_transform_feedback); } else { - lower_packed_varyings(mem_ctx, producer_base, slots_used, - ir_var_shader_out, 0, producer); + lower_packed_varyings(mem_ctx, slots_used, ir_var_shader_out, + 0, producer); if (consumer) { - lower_packed_varyings(mem_ctx, consumer_base, slots_used, - ir_var_shader_in, gs_input_vertices, consumer); + lower_packed_varyings(mem_ctx, slots_used, ir_var_shader_in, + gs_input_vertices, consumer); } } diff --git a/src/glsl/lower_packed_varyings.cpp b/src/glsl/lower_packed_varyings.cpp index 8c1b8850ba2..35f1c5f60cc 100644 --- a/src/glsl/lower_packed_varyings.cpp +++ b/src/glsl/lower_packed_varyings.cpp @@ -160,8 +160,7 @@ namespace { class lower_packed_varyings_visitor { public: - lower_packed_varyings_visitor(void *mem_ctx, unsigned location_base, - unsigned locations_used, + lower_packed_varyings_visitor(void *mem_ctx, unsigned locations_used, ir_variable_mode mode, unsigned gs_input_vertices, exec_list *out_instructions); @@ -190,18 +189,10 @@ private: void * const mem_ctx; /** - * Location representing the first generic varying slot for this shader - * stage (e.g. VARYING_SLOT_VAR0 if we are packing vertex shader outputs). - * Varyings whose location is less than this value are assumed to - * correspond to special fixed function hardware, so they are not lowered. - */ - const unsigned location_base; - - /** * Number of generic varying slots which are used by this shader. This is * used to allocate temporary intermediate data structures. If any varying * used by this shader has a location greater than or equal to - * location_base + locations_used, an assertion will fire. + * VARYING_SLOT_VAR0 + locations_used, an assertion will fire. */ const unsigned locations_used; @@ -235,11 +226,9 @@ private: } /* anonymous namespace */ lower_packed_varyings_visitor::lower_packed_varyings_visitor( - void *mem_ctx, unsigned location_base, unsigned locations_used, - ir_variable_mode mode, unsigned gs_input_vertices, - exec_list *out_instructions) + void *mem_ctx, unsigned locations_used, ir_variable_mode mode, + unsigned gs_input_vertices, exec_list *out_instructions) : mem_ctx(mem_ctx), - location_base(location_base), locations_used(locations_used), packed_varyings((ir_variable **) rzalloc_array_size(mem_ctx, sizeof(*packed_varyings), @@ -259,7 +248,7 @@ lower_packed_varyings_visitor::run(exec_list *instructions) continue; if (var->data.mode != this->mode || - var->data.location < (int) this->location_base || + var->data.location < VARYING_SLOT_VAR0 || !this->needs_lowering(var)) continue; @@ -542,7 +531,7 @@ lower_packed_varyings_visitor::get_packed_varying_deref( unsigned location, ir_variable *unpacked_var, const char *name, unsigned vertex_index) { - unsigned slot = location - this->location_base; + unsigned slot = location - VARYING_SLOT_VAR0; assert(slot < locations_used); if (this->packed_varyings[slot] == NULL) { char *packed_name = ralloc_asprintf(this->mem_ctx, "packed:%s", name); @@ -654,9 +643,9 @@ lower_packed_varyings_gs_splicer::visit(ir_emit_vertex *ev) void -lower_packed_varyings(void *mem_ctx, unsigned location_base, - unsigned locations_used, ir_variable_mode mode, - unsigned gs_input_vertices, gl_shader *shader) +lower_packed_varyings(void *mem_ctx, unsigned locations_used, + ir_variable_mode mode, unsigned gs_input_vertices, + gl_shader *shader) { exec_list *instructions = shader->ir; ir_function *main_func = shader->symbols->get_function("main"); @@ -664,8 +653,7 @@ lower_packed_varyings(void *mem_ctx, unsigned location_base, ir_function_signature *main_func_sig = main_func->matching_signature(NULL, &void_parameters); exec_list new_instructions; - lower_packed_varyings_visitor visitor(mem_ctx, location_base, - locations_used, mode, + lower_packed_varyings_visitor visitor(mem_ctx, locations_used, mode, gs_input_vertices, &new_instructions); visitor.run(instructions); if (mode == ir_var_shader_out) { |