summaryrefslogtreecommitdiffstats
path: root/src/glsl
diff options
context:
space:
mode:
Diffstat (limited to 'src/glsl')
-rw-r--r--src/glsl/Makefile2
-rw-r--r--src/glsl/ir_function.cpp2
-rw-r--r--src/glsl/ir_optimization.h2
-rw-r--r--src/glsl/ir_print_visitor.cpp10
-rw-r--r--src/glsl/ir_reader.cpp24
-rw-r--r--src/glsl/linker.cpp108
-rw-r--r--src/glsl/lower_jumps.cpp502
-rw-r--r--src/glsl/opt_constant_propagation.cpp17
8 files changed, 585 insertions, 82 deletions
diff --git a/src/glsl/Makefile b/src/glsl/Makefile
index 4100414a37d..e0776c1b55d 100644
--- a/src/glsl/Makefile
+++ b/src/glsl/Makefile
@@ -154,7 +154,7 @@ clean: clean-dricore
-rm -f $(APPS)
clean-dricore:
- -rm -f $(DRICORE_OBJ_DIR) $(TOP)/$(LIB_DIR)/libglsl.so libglsl.so
+ -rm -f $(OBJECTS_DRICORE) $(TOP)/$(LIB_DIR)/libglsl.so libglsl.so
ifneq (,$(DRICORE_GLSL_LIBS))
DRICORE_INSTALL_TARGET = install-dricore
diff --git a/src/glsl/ir_function.cpp b/src/glsl/ir_function.cpp
index ef8d4fcfcd4..1255072a571 100644
--- a/src/glsl/ir_function.cpp
+++ b/src/glsl/ir_function.cpp
@@ -165,7 +165,7 @@ ir_function_signature *
ir_function::matching_signature(const exec_list *actual_parameters)
{
ir_function_signature *match = NULL;
- int matched_score;
+ int matched_score = 0;
foreach_iter(exec_list_iterator, iter, signatures) {
ir_function_signature *const sig =
diff --git a/src/glsl/ir_optimization.h b/src/glsl/ir_optimization.h
index dd265673c55..59a040751d9 100644
--- a/src/glsl/ir_optimization.h
+++ b/src/glsl/ir_optimization.h
@@ -56,10 +56,8 @@ bool do_if_simplification(exec_list *instructions);
bool do_discard_simplification(exec_list *instructions);
bool lower_if_to_cond_assign(exec_list *instructions, unsigned max_depth = 0);
bool do_mat_op_to_vec(exec_list *instructions);
-bool do_mod_to_fract(exec_list *instructions);
bool do_noop_swizzle(exec_list *instructions);
bool do_structure_splitting(exec_list *instructions);
-bool do_sub_to_add_neg(exec_list *instructions);
bool do_swizzle_swizzle(exec_list *instructions);
bool do_tree_grafting(exec_list *instructions);
bool do_vec_index_to_cond_assign(exec_list *instructions);
diff --git a/src/glsl/ir_print_visitor.cpp b/src/glsl/ir_print_visitor.cpp
index 5b5409daa22..518910bd129 100644
--- a/src/glsl/ir_print_visitor.cpp
+++ b/src/glsl/ir_print_visitor.cpp
@@ -96,6 +96,16 @@ void ir_print_visitor::indent(void)
const char *
ir_print_visitor::unique_name(ir_variable *var)
{
+ /* var->name can be NULL in function prototypes when a type is given for a
+ * parameter but no name is given. In that case, just return an empty
+ * string. Don't worry about tracking the generated name in the printable
+ * names hash because this is the only scope where it can ever appear.
+ */
+ if (var->name == NULL) {
+ static unsigned arg = 1;
+ return ralloc_asprintf(this->mem_ctx, "parameter@%u", arg++);
+ }
+
/* Do we already have a name for this variable? */
const char *name = (const char *) hash_table_find(this->printable_names, var);
if (name != NULL)
diff --git a/src/glsl/ir_reader.cpp b/src/glsl/ir_reader.cpp
index 30df257be2f..f3a621734ba 100644
--- a/src/glsl/ir_reader.cpp
+++ b/src/glsl/ir_reader.cpp
@@ -482,19 +482,21 @@ ir_reader::read_return(s_expression *expr)
{
s_expression *s_retval;
- s_pattern pat[] = { "return", s_retval};
- if (!MATCH(expr, pat)) {
- ir_read_error(expr, "expected (return <rvalue>)");
- return NULL;
- }
-
- ir_rvalue *retval = read_rvalue(s_retval);
- if (retval == NULL) {
- ir_read_error(NULL, "when reading return value");
+ s_pattern return_value_pat[] = { "return", s_retval};
+ s_pattern return_void_pat[] = { "return" };
+ if (MATCH(expr, return_value_pat)) {
+ ir_rvalue *retval = read_rvalue(s_retval);
+ if (retval == NULL) {
+ ir_read_error(NULL, "when reading return value");
+ return NULL;
+ }
+ return new(mem_ctx) ir_return(retval);
+ } else if (MATCH(expr, return_void_pat)) {
+ return new(mem_ctx) ir_return;
+ } else {
+ ir_read_error(expr, "expected (return <rvalue>) or (return)");
return NULL;
}
-
- return new(mem_ctx) ir_return(retval);
}
diff --git a/src/glsl/linker.cpp b/src/glsl/linker.cpp
index b6479e7a3a4..34b64837a46 100644
--- a/src/glsl/linker.cpp
+++ b/src/glsl/linker.cpp
@@ -1194,16 +1194,43 @@ find_available_slots(unsigned used_mask, unsigned needed_count)
}
+/**
+ * Assign locations for either VS inputs for FS outputs
+ *
+ * \param prog Shader program whose variables need locations assigned
+ * \param target_index Selector for the program target to receive location
+ * assignmnets. Must be either \c MESA_SHADER_VERTEX or
+ * \c MESA_SHADER_FRAGMENT.
+ * \param max_index Maximum number of generic locations. This corresponds
+ * to either the maximum number of draw buffers or the
+ * maximum number of generic attributes.
+ *
+ * \return
+ * If locations are successfully assigned, true is returned. Otherwise an
+ * error is emitted to the shader link log and false is returned.
+ *
+ * \bug
+ * Locations set via \c glBindFragDataLocation are not currently supported.
+ * Only locations assigned automatically by the linker, explicitly set by a
+ * layout qualifier, or explicitly set by a built-in variable (e.g., \c
+ * gl_FragColor) are supported for fragment shaders.
+ */
bool
-assign_attribute_locations(gl_shader_program *prog, unsigned max_attribute_index)
+assign_attribute_or_color_locations(gl_shader_program *prog,
+ unsigned target_index,
+ unsigned max_index)
{
- /* Mark invalid attribute locations as being used.
+ /* Mark invalid locations as being used.
*/
- unsigned used_locations = (max_attribute_index >= 32)
- ? ~0 : ~((1 << max_attribute_index) - 1);
+ unsigned used_locations = (max_index >= 32)
+ ? ~0 : ~((1 << max_index) - 1);
- gl_shader *const sh = prog->_LinkedShaders[0];
- assert(sh->Type == GL_VERTEX_SHADER);
+ assert((target_index == MESA_SHADER_VERTEX)
+ || (target_index == MESA_SHADER_FRAGMENT));
+
+ gl_shader *const sh = prog->_LinkedShaders[target_index];
+ if (sh == NULL)
+ return true;
/* Operate in a total of four passes.
*
@@ -1220,9 +1247,16 @@ assign_attribute_locations(gl_shader_program *prog, unsigned max_attribute_index
* 4. Assign locations to any inputs without assigned locations.
*/
- invalidate_variable_locations(sh, ir_var_in, VERT_ATTRIB_GENERIC0);
+ const int generic_base = (target_index == MESA_SHADER_VERTEX)
+ ? (int) VERT_ATTRIB_GENERIC0 : (int) FRAG_RESULT_DATA0;
+
+ const enum ir_variable_mode direction =
+ (target_index == MESA_SHADER_VERTEX) ? ir_var_in : ir_var_out;
+
- if (prog->Attributes != NULL) {
+ invalidate_variable_locations(sh, direction, generic_base);
+
+ if ((target_index == MESA_SHADER_VERTEX) && (prog->Attributes != NULL)) {
for (unsigned i = 0; i < prog->Attributes->NumParameters; i++) {
ir_variable *const var =
sh->symbols->get_variable(prog->Attributes->Parameters[i].Name);
@@ -1309,15 +1343,15 @@ assign_attribute_locations(gl_shader_program *prog, unsigned max_attribute_index
foreach_list(node, sh->ir) {
ir_variable *const var = ((ir_instruction *) node)->as_variable();
- if ((var == NULL) || (var->mode != ir_var_in))
+ if ((var == NULL) || (var->mode != direction))
continue;
if (var->explicit_location) {
const unsigned slots = count_attribute_slots(var->type);
const unsigned use_mask = (1 << slots) - 1;
- const int attr = var->location - VERT_ATTRIB_GENERIC0;
+ const int attr = var->location - generic_base;
- if ((var->location >= (int)(max_attribute_index + VERT_ATTRIB_GENERIC0))
+ if ((var->location >= (int)(max_index + generic_base))
|| (var->location < 0)) {
linker_error_printf(prog,
"invalid explicit location %d specified for "
@@ -1325,7 +1359,7 @@ assign_attribute_locations(gl_shader_program *prog, unsigned max_attribute_index
(var->location < 0) ? var->location : attr,
var->name);
return false;
- } else if (var->location >= VERT_ATTRIB_GENERIC0) {
+ } else if (var->location >= generic_base) {
used_locations |= (use_mask << attr);
}
}
@@ -1349,14 +1383,16 @@ assign_attribute_locations(gl_shader_program *prog, unsigned max_attribute_index
qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
- /* VERT_ATTRIB_GENERIC0 is a pseudo-alias for VERT_ATTRIB_POS. It can only
- * be explicitly assigned by via glBindAttribLocation. Mark it as reserved
- * to prevent it from being automatically allocated below.
- */
- find_deref_visitor find("gl_Vertex");
- find.run(sh->ir);
- if (find.variable_found())
- used_locations |= (1 << 0);
+ if (target_index == MESA_SHADER_VERTEX) {
+ /* VERT_ATTRIB_GENERIC0 is a pseudo-alias for VERT_ATTRIB_POS. It can
+ * only be explicitly assigned by via glBindAttribLocation. Mark it as
+ * reserved to prevent it from being automatically allocated below.
+ */
+ find_deref_visitor find("gl_Vertex");
+ find.run(sh->ir);
+ if (find.variable_found())
+ used_locations |= (1 << 0);
+ }
for (unsigned i = 0; i < num_attr; i++) {
/* Mask representing the contiguous slots that will be used by this
@@ -1367,14 +1403,17 @@ assign_attribute_locations(gl_shader_program *prog, unsigned max_attribute_index
int location = find_available_slots(used_locations, to_assign[i].slots);
if (location < 0) {
+ const char *const string = (target_index == MESA_SHADER_VERTEX)
+ ? "vertex shader input" : "fragment shader output";
+
linker_error_printf(prog,
"insufficient contiguous attribute locations "
- "available for vertex shader input `%s'",
- to_assign[i].var->name);
+ "available for %s `%s'",
+ string, to_assign[i].var->name);
return false;
}
- to_assign[i].var->location = VERT_ATTRIB_GENERIC0 + location;
+ to_assign[i].var->location = generic_base + location;
used_locations |= (use_mask << location);
}
@@ -1671,16 +1710,19 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
assign_uniform_locations(prog);
- if (prog->_LinkedShaders[MESA_SHADER_VERTEX] != NULL) {
- /* FINISHME: The value of the max_attribute_index parameter is
- * FINISHME: implementation dependent based on the value of
- * FINISHME: GL_MAX_VERTEX_ATTRIBS. GL_MAX_VERTEX_ATTRIBS must be
- * FINISHME: at least 16, so hardcode 16 for now.
- */
- if (!assign_attribute_locations(prog, 16)) {
- prog->LinkStatus = false;
- goto done;
- }
+ /* FINISHME: The value of the max_attribute_index parameter is
+ * FINISHME: implementation dependent based on the value of
+ * FINISHME: GL_MAX_VERTEX_ATTRIBS. GL_MAX_VERTEX_ATTRIBS must be
+ * FINISHME: at least 16, so hardcode 16 for now.
+ */
+ if (!assign_attribute_or_color_locations(prog, MESA_SHADER_VERTEX, 16)) {
+ prog->LinkStatus = false;
+ goto done;
+ }
+
+ if (!assign_attribute_or_color_locations(prog, MESA_SHADER_FRAGMENT, ctx->Const.MaxDrawBuffers)) {
+ prog->LinkStatus = false;
+ goto done;
}
unsigned prev;
diff --git a/src/glsl/lower_jumps.cpp b/src/glsl/lower_jumps.cpp
index dd2601d1aad..61874990a94 100644
--- a/src/glsl/lower_jumps.cpp
+++ b/src/glsl/lower_jumps.cpp
@@ -60,12 +60,76 @@
#include <string.h>
#include "ir.h"
+/**
+ * Enum recording the result of analyzing how control flow might exit
+ * an IR node.
+ *
+ * Each possible value of jump_strength indicates a strictly stronger
+ * guarantee on control flow than the previous value.
+ *
+ * The ordering of strengths roughly reflects the way jumps are
+ * lowered: jumps with higher strength tend to be lowered to jumps of
+ * lower strength. Accordingly, strength is used as a heuristic to
+ * determine which lowering to perform first.
+ *
+ * This enum is also used by get_jump_strength() to categorize
+ * instructions as either break, continue, return, or other. When
+ * used in this fashion, strength_always_clears_execute_flag is not
+ * used.
+ *
+ * The control flow analysis made by this optimization pass makes two
+ * simplifying assumptions:
+ *
+ * - It ignores discard instructions, since they are lowered by a
+ * separate pass (lower_discard.cpp).
+ *
+ * - It assumes it is always possible for control to flow from a loop
+ * to the instruction immediately following it. Technically, this
+ * is not true (since all execution paths through the loop might
+ * jump back to the top, or return from the function).
+ *
+ * Both of these simplifying assumtions are safe, since they can never
+ * cause reachable code to be incorrectly classified as unreachable;
+ * they can only do the opposite.
+ */
enum jump_strength
{
+ /**
+ * Analysis has produced no guarantee on how control flow might
+ * exit this IR node. It might fall out the bottom (with or
+ * without clearing the execute flag, if present), or it might
+ * continue to the top of the innermost enclosing loop, break out
+ * of it, or return from the function.
+ */
strength_none,
+
+ /**
+ * The only way control can fall out the bottom of this node is
+ * through a code path that clears the execute flag. It might also
+ * continue to the top of the innermost enclosing loop, break out
+ * of it, or return from the function.
+ */
strength_always_clears_execute_flag,
+
+ /**
+ * Control cannot fall out the bottom of this node. It might
+ * continue to the top of the innermost enclosing loop, break out
+ * of it, or return from the function.
+ */
strength_continue,
+
+ /**
+ * Control cannot fall out the bottom of this node, or continue the
+ * top of the innermost enclosing loop. It can only break out of
+ * it or return from the function.
+ */
strength_break,
+
+ /**
+ * Control cannot fall out the bottom of this node, continue to the
+ * top of the innermost enclosing loop, or break out of it. It can
+ * only return from the function.
+ */
strength_return
};
@@ -146,16 +210,17 @@ struct function_record
ir_function_signature* signature;
ir_variable* return_flag; /* used to break out of all loops and then jump to the return instruction */
ir_variable* return_value;
- bool is_main;
+ bool lower_return;
unsigned nesting_depth;
- function_record(ir_function_signature* p_signature = 0)
+ function_record(ir_function_signature* p_signature = 0,
+ bool lower_return = false)
{
this->signature = p_signature;
this->return_flag = 0;
this->return_value = 0;
this->nesting_depth = 0;
- this->is_main = this->signature && (strcmp(this->signature->function_name(), "main") == 0);
+ this->lower_return = lower_return;
}
ir_variable* get_return_flag()
@@ -180,6 +245,27 @@ struct function_record
};
struct ir_lower_jumps_visitor : public ir_control_flow_visitor {
+ /* Postconditions: on exit of any visit() function:
+ *
+ * ANALYSIS: this->block.min_strength,
+ * this->block.may_clear_execute_flag, and
+ * this->loop.may_set_return_flag are updated to reflect the
+ * characteristics of the visited statement.
+ *
+ * DEAD_CODE_ELIMINATION: If this->block.min_strength is not
+ * strength_none, the visited node is at the end of its exec_list.
+ * In other words, any unreachable statements that follow the
+ * visited statement in its exec_list have been removed.
+ *
+ * CONTAINED_JUMPS_LOWERED: If the visited statement contains other
+ * statements, then should_lower_jump() is false for all of the
+ * return, break, or continue statements it contains.
+ *
+ * Note that visiting a jump does not lower it. That is the
+ * responsibility of the statement (or function signature) that
+ * contains the jump.
+ */
+
bool progress;
struct function_record function;
@@ -218,20 +304,140 @@ struct ir_lower_jumps_visitor : public ir_control_flow_visitor {
}
}
+ /**
+ * Insert the instructions necessary to lower a return statement,
+ * before the given return instruction.
+ */
+ void insert_lowered_return(ir_return *ir)
+ {
+ ir_variable* return_flag = this->function.get_return_flag();
+ if(!this->function.signature->return_type->is_void()) {
+ ir_variable* return_value = this->function.get_return_value();
+ ir->insert_before(
+ new(ir) ir_assignment(
+ new (ir) ir_dereference_variable(return_value),
+ ir->value));
+ }
+ ir->insert_before(
+ new(ir) ir_assignment(
+ new (ir) ir_dereference_variable(return_flag),
+ new (ir) ir_constant(true)));
+ this->loop.may_set_return_flag = true;
+ }
+
+ /**
+ * If the given instruction is a return, lower it to instructions
+ * that store the return value (if there is one), set the return
+ * flag, and then break.
+ *
+ * It is safe to pass NULL to this function.
+ */
+ void lower_return_unconditionally(ir_instruction *ir)
+ {
+ if (get_jump_strength(ir) != strength_return) {
+ return;
+ }
+ insert_lowered_return((ir_return*)ir);
+ ir->replace_with(new(ir) ir_loop_jump(ir_loop_jump::jump_break));
+ }
+
+ /**
+ * Create the necessary instruction to replace a break instruction.
+ */
+ ir_instruction *create_lowered_break()
+ {
+ void *ctx = this->function.signature;
+ return new(ctx) ir_assignment(
+ new(ctx) ir_dereference_variable(this->loop.get_break_flag()),
+ new(ctx) ir_constant(true),
+ 0);
+ }
+
+ /**
+ * If the given instruction is a break, lower it to an instruction
+ * that sets the break flag, without consulting
+ * should_lower_jump().
+ *
+ * It is safe to pass NULL to this function.
+ */
+ void lower_break_unconditionally(ir_instruction *ir)
+ {
+ if (get_jump_strength(ir) != strength_break) {
+ return;
+ }
+ ir->replace_with(create_lowered_break());
+ }
+
+ /**
+ * If the block ends in a conditional or unconditional break, lower
+ * it, even though should_lower_jump() says it needn't be lowered.
+ */
+ void lower_final_breaks(exec_list *block)
+ {
+ ir_instruction *ir = (ir_instruction *) block->get_tail();
+ lower_break_unconditionally(ir);
+ ir_if *ir_if = ir->as_if();
+ if (ir_if) {
+ lower_break_unconditionally(
+ (ir_instruction *) ir_if->then_instructions.get_tail());
+ lower_break_unconditionally(
+ (ir_instruction *) ir_if->else_instructions.get_tail());
+ }
+ }
+
virtual void visit(class ir_loop_jump * ir)
{
+ /* Eliminate all instructions after each one, since they are
+ * unreachable. This satisfies the DEAD_CODE_ELIMINATION
+ * postcondition.
+ */
truncate_after_instruction(ir);
+
+ /* Set this->block.min_strength based on this instruction. This
+ * satisfies the ANALYSIS postcondition. It is not necessary to
+ * update this->block.may_clear_execute_flag or
+ * this->loop.may_set_return_flag, because an unlowered jump
+ * instruction can't change any flags.
+ */
this->block.min_strength = ir->is_break() ? strength_break : strength_continue;
+
+ /* The CONTAINED_JUMPS_LOWERED postcondition is already
+ * satisfied, because jump statements can't contain other
+ * statements.
+ */
}
virtual void visit(class ir_return * ir)
{
+ /* Eliminate all instructions after each one, since they are
+ * unreachable. This satisfies the DEAD_CODE_ELIMINATION
+ * postcondition.
+ */
truncate_after_instruction(ir);
+
+ /* Set this->block.min_strength based on this instruction. This
+ * satisfies the ANALYSIS postcondition. It is not necessary to
+ * update this->block.may_clear_execute_flag or
+ * this->loop.may_set_return_flag, because an unlowered return
+ * instruction can't change any flags.
+ */
this->block.min_strength = strength_return;
+
+ /* The CONTAINED_JUMPS_LOWERED postcondition is already
+ * satisfied, because jump statements can't contain other
+ * statements.
+ */
}
virtual void visit(class ir_discard * ir)
{
+ /* Nothing needs to be done. The ANALYSIS and
+ * DEAD_CODE_ELIMINATION postconditions are already satisfied,
+ * because discard statements are ignored by this optimization
+ * pass. The CONTAINED_JUMPS_LOWERED postcondition is already
+ * satisfied, because discard statements can't contain other
+ * statements.
+ */
}
enum jump_strength get_jump_strength(ir_instruction* ir)
@@ -274,10 +480,8 @@ struct ir_lower_jumps_visitor : public ir_control_flow_visitor {
/* never lower return at the end of a this->function */
if(this->function.nesting_depth == 0 && ir->get_next()->is_tail_sentinel())
lower = false;
- else if (this->function.is_main)
- lower = lower_main_return;
else
- lower = lower_sub_return;
+ lower = this->function.lower_return;
break;
}
return lower;
@@ -285,9 +489,20 @@ struct ir_lower_jumps_visitor : public ir_control_flow_visitor {
block_record visit_block(exec_list* list)
{
+ /* Note: since visiting a node may change that node's next
+ * pointer, we can't use visit_exec_list(), because
+ * visit_exec_list() caches the node's next pointer before
+ * visiting it. So we use foreach_list() instead.
+ *
+ * foreach_list() isn't safe if the node being visited gets
+ * removed, but fortunately this visitor doesn't do that.
+ */
+
block_record saved_block = this->block;
this->block = block_record();
- visit_exec_list(list, this);
+ foreach_list(node, list) {
+ ((ir_instruction *) node)->accept(this);
+ }
block_record ret = this->block;
this->block = saved_block;
return ret;
@@ -304,18 +519,34 @@ struct ir_lower_jumps_visitor : public ir_control_flow_visitor {
block_record block_records[2];
ir_jump* jumps[2];
+ /* Recursively lower nested jumps. This satisfies the
+ * CONTAINED_JUMPS_LOWERED postcondition, except in the case of
+ * unconditional jumps at the end of ir->then_instructions and
+ * ir->else_instructions, which are handled below.
+ */
block_records[0] = visit_block(&ir->then_instructions);
block_records[1] = visit_block(&ir->else_instructions);
retry: /* we get here if we put code after the if inside a branch */
- for(unsigned i = 0; i < 2; ++i) {
- exec_list& list = i ? ir->else_instructions : ir->then_instructions;
- jumps[i] = 0;
- if(!list.is_empty() && get_jump_strength((ir_instruction*)list.get_tail()))
- jumps[i] = (ir_jump*)list.get_tail();
- }
+ /* Determine which of ir->then_instructions and
+ * ir->else_instructions end with an unconditional jump.
+ */
+ for(unsigned i = 0; i < 2; ++i) {
+ exec_list& list = i ? ir->else_instructions : ir->then_instructions;
+ jumps[i] = 0;
+ if(!list.is_empty() && get_jump_strength((ir_instruction*)list.get_tail()))
+ jumps[i] = (ir_jump*)list.get_tail();
+ }
+
+ /* Loop until we have satisfied the CONTAINED_JUMPS_LOWERED
+ * postcondition by lowering jumps in both then_instructions and
+ * else_instructions.
+ */
for(;;) {
+ /* Determine the types of the jumps that terminate
+ * ir->then_instructions and ir->else_instructions.
+ */
jump_strength jump_strengths[2];
for(unsigned i = 0; i < 2; ++i) {
@@ -326,7 +557,12 @@ retry: /* we get here if we put code after the if inside a branch */
jump_strengths[i] = strength_none;
}
- /* move both jumps out if possible */
+ /* If both code paths end in a jump, and the jumps are the
+ * same, and we are pulling out jumps, replace them with a
+ * single jump that comes after the if instruction. The new
+ * jump will be visited next, and it will be lowered if
+ * necessary by the loop or conditional that encloses it.
+ */
if(pull_out_jumps && jump_strengths[0] == jump_strengths[1]) {
bool unify = true;
if(jump_strengths[0] == strength_continue)
@@ -344,10 +580,19 @@ retry: /* we get here if we put code after the if inside a branch */
jumps[1]->remove();
this->progress = true;
+ /* Update jumps[] to reflect the fact that the jumps
+ * are gone, and update block_records[] to reflect the
+ * fact that control can now flow to the next
+ * instruction.
+ */
jumps[0] = 0;
jumps[1] = 0;
block_records[0].min_strength = strength_none;
block_records[1].min_strength = strength_none;
+
+ /* The CONTAINED_JUMPS_LOWERED postcondition is now
+ * satisfied, so we can break out of the loop.
+ */
break;
}
}
@@ -367,50 +612,91 @@ retry: /* we get here if we put code after the if inside a branch */
else if(should_lower[1])
lower = 1;
else
+ /* Neither code path ends in a jump that needs to be
+ * lowered, so the CONTAINED_JUMPS_LOWERED postcondition
+ * is satisfied and we can break out of the loop.
+ */
break;
if(jump_strengths[lower] == strength_return) {
- ir_variable* return_flag = this->function.get_return_flag();
- if(!this->function.signature->return_type->is_void()) {
- ir_variable* return_value = this->function.get_return_value();
- jumps[lower]->insert_before(new(ir) ir_assignment(new (ir) ir_dereference_variable(return_value), ((ir_return*)jumps[lower])->value, NULL));
- }
- jumps[lower]->insert_before(new(ir) ir_assignment(new (ir) ir_dereference_variable(return_flag), new (ir) ir_constant(true), NULL));
- this->loop.may_set_return_flag = true;
+ /* To lower a return, we create a return flag (if the
+ * function doesn't have one already) and add instructions
+ * that: 1. store the return value (if this function has a
+ * non-void return) and 2. set the return flag
+ */
+ insert_lowered_return((ir_return*)jumps[lower]);
if(this->loop.loop) {
+ /* If we are in a loop, replace the return instruction
+ * with a break instruction, and then loop so that the
+ * break instruction can be lowered if necessary.
+ */
ir_loop_jump* lowered = 0;
lowered = new(ir) ir_loop_jump(ir_loop_jump::jump_break);
+ /* Note: we must update block_records and jumps to
+ * reflect the fact that the control path has been
+ * altered from a return to a break.
+ */
block_records[lower].min_strength = strength_break;
jumps[lower]->replace_with(lowered);
jumps[lower] = lowered;
- } else
+ } else {
+ /* If we are not in a loop, we then proceed as we would
+ * for a continue statement (set the execute flag to
+ * false to prevent the rest of the function from
+ * executing).
+ */
goto lower_continue;
+ }
this->progress = true;
} else if(jump_strengths[lower] == strength_break) {
- /* We can't lower to an actual continue because that would execute the increment.
+ /* To lower a break, we create a break flag (if the loop
+ * doesn't have one already) and add an instruction that
+ * sets it.
*
- * In the lowered code, we instead put the break check between the this->loop body and the increment,
- * which is impossible with a real continue as defined by the GLSL IR currently.
+ * Then we proceed as we would for a continue statement
+ * (set the execute flag to false to prevent the rest of
+ * the loop body from executing).
*
- * Smarter options (such as undoing the increment) are possible but it's not worth implementing them,
- * because if break is lowered, continue is almost surely lowered too.
+ * The visit() function for the loop will ensure that the
+ * break flag is checked after executing the loop body.
*/
- jumps[lower]->insert_before(new(ir) ir_assignment(new (ir) ir_dereference_variable(this->loop.get_break_flag()), new (ir) ir_constant(true), 0));
+ jumps[lower]->insert_before(create_lowered_break());
goto lower_continue;
} else if(jump_strengths[lower] == strength_continue) {
lower_continue:
+ /* To lower a continue, we create an execute flag (if the
+ * loop doesn't have one already) and replace the continue
+ * with an instruction that clears it.
+ *
+ * Note that this code path gets exercised when lowering
+ * return statements that are not inside a loop, so
+ * this->loop must be initialized even outside of loops.
+ */
ir_variable* execute_flag = this->loop.get_execute_flag();
jumps[lower]->replace_with(new(ir) ir_assignment(new (ir) ir_dereference_variable(execute_flag), new (ir) ir_constant(false), 0));
+ /* Note: we must update block_records and jumps to reflect
+ * the fact that the control path has been altered to an
+ * instruction that clears the execute flag.
+ */
jumps[lower] = 0;
block_records[lower].min_strength = strength_always_clears_execute_flag;
block_records[lower].may_clear_execute_flag = true;
this->progress = true;
- break;
+
+ /* Let the loop run again, in case the other branch of the
+ * if needs to be lowered too.
+ */
}
}
/* move out a jump out if possible */
if(pull_out_jumps) {
+ /* If one of the branches ends in a jump, and control cannot
+ * fall out the bottom of the other branch, then we can move
+ * the jump after the if.
+ *
+ * Set move_out to the branch we are moving a jump out of.
+ */
int move_out = -1;
if(jumps[0] && block_records[1].min_strength >= strength_continue)
move_out = 0;
@@ -421,22 +707,46 @@ lower_continue:
{
jumps[move_out]->remove();
ir->insert_after(jumps[move_out]);
+ /* Note: we must update block_records and jumps to reflect
+ * the fact that the jump has been moved out of the if.
+ */
jumps[move_out] = 0;
block_records[move_out].min_strength = strength_none;
this->progress = true;
}
}
+ /* Now satisfy the ANALYSIS postcondition by setting
+ * this->block.min_strength and
+ * this->block.may_clear_execute_flag based on the
+ * characteristics of the two branches.
+ */
if(block_records[0].min_strength < block_records[1].min_strength)
this->block.min_strength = block_records[0].min_strength;
else
this->block.min_strength = block_records[1].min_strength;
this->block.may_clear_execute_flag = this->block.may_clear_execute_flag || block_records[0].may_clear_execute_flag || block_records[1].may_clear_execute_flag;
+ /* Now we need to clean up the instructions that follow the
+ * if.
+ *
+ * If those instructions are unreachable, then satisfy the
+ * DEAD_CODE_ELIMINATION postcondition by eliminating them.
+ * Otherwise that postcondition is already satisfied.
+ */
if(this->block.min_strength)
truncate_after_instruction(ir);
else if(this->block.may_clear_execute_flag)
{
+ /* If the "if" instruction might clear the execute flag, then
+ * we need to guard any instructions that follow so that they
+ * are only executed if the execute flag is set.
+ *
+ * If one of the branches of the "if" always clears the
+ * execute flag, and the other branch never clears it, then
+ * this is easy: just move all the instructions following the
+ * "if" into the branch that never clears it.
+ */
int move_into = -1;
if(block_records[0].min_strength && !block_records[1].may_clear_execute_flag)
move_into = 1;
@@ -451,14 +761,34 @@ lower_continue:
if(!next->is_tail_sentinel()) {
move_outer_block_inside(ir, list);
+ /* If any instructions moved, then we need to visit
+ * them (since they are now inside the "if"). Since
+ * block_records[move_into] is in its default state
+ * (see assertion above), we can safely replace
+ * block_records[move_into] with the result of this
+ * analysis.
+ */
exec_list list;
list.head = next;
block_records[move_into] = visit_block(&list);
+ /*
+ * Then we need to re-start our jump lowering, since one
+ * of the instructions we moved might be a jump that
+ * needs to be lowered.
+ */
this->progress = true;
goto retry;
}
} else {
+ /* If we get here, then the simple case didn't apply; we
+ * need to actually guard the instructions that follow.
+ *
+ * To avoid creating unnecessarily-deep nesting, first
+ * look through the instructions that follow and unwrap
+ * any instructions that that are already wrapped in the
+ * appropriate guard.
+ */
ir_instruction* ir_after;
for(ir_after = (ir_instruction*)ir->get_next(); !ir_after->is_tail_sentinel();)
{
@@ -479,6 +809,9 @@ lower_continue:
this->progress = true;
}
+ /* Then, wrap all the instructions that follow in a single
+ * guard.
+ */
if(!ir->get_next()->is_tail_sentinel()) {
assert(this->loop.execute_flag);
ir_if* if_execute = new(ir) ir_if(new(ir) ir_dereference_variable(this->loop.execute_flag));
@@ -493,29 +826,111 @@ lower_continue:
virtual void visit(ir_loop *ir)
{
+ /* Visit the body of the loop, with a fresh data structure in
+ * this->loop so that the analysis we do here won't bleed into
+ * enclosing loops.
+ *
+ * We assume that all code after a loop is reachable from the
+ * loop (see comments on enum jump_strength), so the
+ * DEAD_CODE_ELIMINATION postcondition is automatically
+ * satisfied, as is the block.min_strength portion of the
+ * ANALYSIS postcondition.
+ *
+ * The block.may_clear_execute_flag portion of the ANALYSIS
+ * postcondition is automatically satisfied because execute
+ * flags do not propagate outside of loops.
+ *
+ * The loop.may_set_return_flag portion of the ANALYSIS
+ * postcondition is handled below.
+ */
++this->function.nesting_depth;
loop_record saved_loop = this->loop;
this->loop = loop_record(this->function.signature, ir);
+ /* Recursively lower nested jumps. This satisfies the
+ * CONTAINED_JUMPS_LOWERED postcondition, except in the case of
+ * an unconditional continue or return at the bottom of the
+ * loop, which are handled below.
+ */
block_record body = visit_block(&ir->body_instructions);
+ /* If the loop ends in an unconditional continue, eliminate it
+ * because it is redundant.
+ */
+ ir_instruction *ir_last
+ = (ir_instruction *) ir->body_instructions.get_tail();
+ if (get_jump_strength(ir_last) == strength_continue) {
+ ir_last->remove();
+ }
+
+ /* If the loop ends in an unconditional return, and we are
+ * lowering returns, lower it.
+ */
+ if (this->function.lower_return)
+ lower_return_unconditionally(ir_last);
+
if(body.min_strength >= strength_break) {
- /* FINISHME: turn the this->loop into an if, or replace it with its body */
+ /* FINISHME: If the min_strength of the loop body is
+ * strength_break or strength_return, that means that it
+ * isn't a loop at all, since control flow always leaves the
+ * body of the loop via break or return. In principle the
+ * loop could be eliminated in this case. This optimization
+ * is not implemented yet.
+ */
}
if(this->loop.break_flag) {
+ /* We only get here if we are lowering breaks */
+ assert (lower_break);
+
+ /* If a break flag was generated while visiting the body of
+ * the loop, then at least one break was lowered, so we need
+ * to generate an if statement at the end of the loop that
+ * does a "break" if the break flag is set. The break we
+ * generate won't violate the CONTAINED_JUMPS_LOWERED
+ * postcondition, because should_lower_jump() always returns
+ * false for a break that happens at the end of a loop.
+ *
+ * However, if the loop already ends in a conditional or
+ * unconditional break, then we need to lower that break,
+ * because it won't be at the end of the loop anymore.
+ */
+ lower_final_breaks(&ir->body_instructions);
+
ir_if* break_if = new(ir) ir_if(new(ir) ir_dereference_variable(this->loop.break_flag));
break_if->then_instructions.push_tail(new(ir) ir_loop_jump(ir_loop_jump::jump_break));
ir->body_instructions.push_tail(break_if);
}
+ /* If the body of the loop may set the return flag, then at
+ * least one return was lowered to a break, so we need to ensure
+ * that the return flag is checked after the body of the loop is
+ * executed.
+ */
if(this->loop.may_set_return_flag) {
assert(this->function.return_flag);
+ /* Generate the if statement to check the return flag */
ir_if* return_if = new(ir) ir_if(new(ir) ir_dereference_variable(this->function.return_flag));
+ /* Note: we also need to propagate the knowledge that the
+ * return flag may get set to the outer context. This
+ * satisfies the loop.may_set_return_flag part of the
+ * ANALYSIS postcondition.
+ */
saved_loop.may_set_return_flag = true;
if(saved_loop.loop)
+ /* If this loop is nested inside another one, then the if
+ * statement that we generated should break out of that
+ * loop if the return flag is set. Caller will lower that
+ * break statement if necessary.
+ */
return_if->then_instructions.push_tail(new(ir) ir_loop_jump(ir_loop_jump::jump_break));
else
+ /* Otherwise, all we need to do is ensure that the
+ * instructions that follow are only executed if the
+ * return flag is clear. We can do that by moving those
+ * instructions into the else clause of the generated if
+ * statement.
+ */
move_outer_block_inside(ir, &return_if->else_instructions);
ir->insert_after(return_if);
}
@@ -530,14 +945,39 @@ lower_continue:
assert(!this->function.signature);
assert(!this->loop.loop);
+ bool lower_return;
+ if (strcmp(ir->function_name(), "main") == 0)
+ lower_return = lower_main_return;
+ else
+ lower_return = lower_sub_return;
+
function_record saved_function = this->function;
loop_record saved_loop = this->loop;
- this->function = function_record(ir);
+ this->function = function_record(ir, lower_return);
this->loop = loop_record(ir);
assert(!this->loop.loop);
+
+ /* Visit the body of the function to lower any jumps that occur
+ * in it, except possibly an unconditional return statement at
+ * the end of it.
+ */
visit_block(&ir->body);
+ /* If the body ended in an unconditional return of non-void,
+ * then we don't need to lower it because it's the one canonical
+ * return.
+ *
+ * If the body ended in a return of void, eliminate it because
+ * it is redundant.
+ */
+ if (ir->return_type->is_void() &&
+ get_jump_strength((ir_instruction *) ir->body.get_tail())) {
+ ir_jump *jump = (ir_jump *) ir->body.get_tail();
+ assert (jump->ir_type == ir_type_return);
+ jump->remove();
+ }
+
if(this->function.return_value)
ir->body.push_tail(new(ir) ir_return(new (ir) ir_dereference_variable(this->function.return_value)));
diff --git a/src/glsl/opt_constant_propagation.cpp b/src/glsl/opt_constant_propagation.cpp
index 4425f421193..af77e490689 100644
--- a/src/glsl/opt_constant_propagation.cpp
+++ b/src/glsl/opt_constant_propagation.cpp
@@ -51,11 +51,23 @@ public:
this->var = var;
this->write_mask = write_mask;
this->constant = constant;
+ this->initial_values = write_mask;
+ }
+
+ acp_entry(const acp_entry *src)
+ {
+ this->var = src->var;
+ this->write_mask = src->write_mask;
+ this->constant = src->constant;
+ this->initial_values = src->initial_values;
}
ir_variable *var;
ir_constant *constant;
unsigned write_mask;
+
+ /** Mask of values initially available in the constant. */
+ unsigned initial_values;
};
@@ -172,7 +184,7 @@ ir_constant_propagation_visitor::handle_rvalue(ir_rvalue **rvalue)
for (int j = 0; j < 4; j++) {
if (j == channel)
break;
- if (found->write_mask & (1 << j))
+ if (found->initial_values & (1 << j))
rhs_channel++;
}
@@ -285,8 +297,7 @@ ir_constant_propagation_visitor::handle_if_block(exec_list *instructions)
/* Populate the initial acp with a constant of the original */
foreach_iter(exec_list_iterator, iter, *orig_acp) {
acp_entry *a = (acp_entry *)iter.get();
- this->acp->push_tail(new(this->mem_ctx) acp_entry(a->var, a->write_mask,
- a->constant));
+ this->acp->push_tail(new(this->mem_ctx) acp_entry(a));
}
visit_list_elements(this, instructions);