summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2015-04-07 16:28:10 -0700
committerKenneth Graunke <[email protected]>2015-04-10 02:16:37 -0700
commitae17f348502c0665fcead8daf8abaef8aa152b03 (patch)
tree704f570673bfc343dbd8934132bd0e45d3fc6704 /src
parent10d85ffc5a74bbce4decd7fe2aedd856ce7d39cb (diff)
i965: Move brw_link_shader's GLSL IR transformations into a helper.
This function was getting a bit large and unwieldy. Signed-off-by: Kenneth Graunke <[email protected]> Reviewed-by: Jason Ekstrand <[email protected]> Reviewed-by: Ian Romanick <[email protected]>
Diffstat (limited to 'src')
-rw-r--r--src/mesa/drivers/dri/i965/brw_shader.cpp192
1 files changed, 99 insertions, 93 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_shader.cpp b/src/mesa/drivers/dri/i965/brw_shader.cpp
index 9fad02cbbee..bf9aceb9923 100644
--- a/src/mesa/drivers/dri/i965/brw_shader.cpp
+++ b/src/mesa/drivers/dri/i965/brw_shader.cpp
@@ -120,6 +120,104 @@ brw_lower_packing_builtins(struct brw_context *brw,
lower_packing_builtins(ir, ops);
}
+static void
+process_glsl_ir(struct brw_context *brw,
+ struct gl_shader_program *shader_prog,
+ struct gl_shader *shader)
+{
+ struct gl_context *ctx = &brw->ctx;
+ const struct gl_shader_compiler_options *options =
+ &ctx->Const.ShaderCompilerOptions[shader->Stage];
+
+ /* Temporary memory context for any new IR. */
+ void *mem_ctx = ralloc_context(NULL);
+
+ ralloc_adopt(mem_ctx, shader->ir);
+
+ /* lower_packing_builtins() inserts arithmetic instructions, so it
+ * must precede lower_instructions().
+ */
+ brw_lower_packing_builtins(brw, shader->Stage, shader->ir);
+ do_mat_op_to_vec(shader->ir);
+ const int bitfield_insert = brw->gen >= 7 ? BITFIELD_INSERT_TO_BFM_BFI : 0;
+ lower_instructions(shader->ir,
+ MOD_TO_FLOOR |
+ DIV_TO_MUL_RCP |
+ SUB_TO_ADD_NEG |
+ EXP_TO_EXP2 |
+ LOG_TO_LOG2 |
+ bitfield_insert |
+ LDEXP_TO_ARITH);
+
+ /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
+ * if-statements need to be flattened.
+ */
+ if (brw->gen < 6)
+ lower_if_to_cond_assign(shader->ir, 16);
+
+ do_lower_texture_projection(shader->ir);
+ brw_lower_texture_gradients(brw, shader->ir);
+ do_vec_index_to_cond_assign(shader->ir);
+ lower_vector_insert(shader->ir, true);
+ if (options->NirOptions == NULL)
+ brw_do_cubemap_normalize(shader->ir);
+ lower_offset_arrays(shader->ir);
+ brw_do_lower_unnormalized_offset(shader->ir);
+ lower_noise(shader->ir);
+ lower_quadop_vector(shader->ir, false);
+
+ bool lowered_variable_indexing =
+ lower_variable_index_to_cond_assign(shader->ir,
+ options->EmitNoIndirectInput,
+ options->EmitNoIndirectOutput,
+ options->EmitNoIndirectTemp,
+ options->EmitNoIndirectUniform);
+
+ if (unlikely(brw->perf_debug && lowered_variable_indexing)) {
+ perf_debug("Unsupported form of variable indexing in FS; falling "
+ "back to very inefficient code generation\n");
+ }
+
+ lower_ubo_reference(shader, shader->ir);
+
+ bool progress;
+ do {
+ progress = false;
+
+ if (is_scalar_shader_stage(brw, shader->Stage)) {
+ brw_do_channel_expressions(shader->ir);
+ brw_do_vector_splitting(shader->ir);
+ }
+
+ progress = do_lower_jumps(shader->ir, true, true,
+ true, /* main return */
+ false, /* continue */
+ false /* loops */
+ ) || progress;
+
+ progress = do_common_optimization(shader->ir, true, true,
+ options, ctx->Const.NativeIntegers) || progress;
+ } while (progress);
+
+ validate_ir_tree(shader->ir);
+
+ /* Now that we've finished altering the linked IR, reparent any live IR back
+ * to the permanent memory context, and free the temporary one (discarding any
+ * junk we optimized away).
+ */
+ reparent_ir(shader->ir, shader->ir);
+ ralloc_free(mem_ctx);
+
+ if (ctx->_Shader->Flags & GLSL_DUMP) {
+ fprintf(stderr, "\n");
+ fprintf(stderr, "GLSL IR for linked %s program %d:\n",
+ _mesa_shader_stage_to_string(shader->Stage),
+ shader_prog->Name);
+ _mesa_print_ir(stderr, shader->ir, NULL);
+ fprintf(stderr, "\n");
+ }
+}
+
GLboolean
brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
{
@@ -127,8 +225,6 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
unsigned int stage;
for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
- const struct gl_shader_compiler_options *options =
- &ctx->Const.ShaderCompilerOptions[stage];
struct gl_shader *shader = shProg->_LinkedShaders[stage];
if (!shader)
@@ -143,79 +239,7 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
_mesa_copy_linked_program_data((gl_shader_stage) stage, shProg, prog);
- /* Temporary memory context for any new IR. */
- void *mem_ctx = ralloc_context(NULL);
-
- ralloc_adopt(mem_ctx, shader->ir);
-
- bool progress;
-
- /* lower_packing_builtins() inserts arithmetic instructions, so it
- * must precede lower_instructions().
- */
- brw_lower_packing_builtins(brw, (gl_shader_stage) stage, shader->ir);
- do_mat_op_to_vec(shader->ir);
- const int bitfield_insert = brw->gen >= 7
- ? BITFIELD_INSERT_TO_BFM_BFI
- : 0;
- lower_instructions(shader->ir,
- MOD_TO_FLOOR |
- DIV_TO_MUL_RCP |
- SUB_TO_ADD_NEG |
- EXP_TO_EXP2 |
- LOG_TO_LOG2 |
- bitfield_insert |
- LDEXP_TO_ARITH);
-
- /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
- * if-statements need to be flattened.
- */
- if (brw->gen < 6)
- lower_if_to_cond_assign(shader->ir, 16);
-
- do_lower_texture_projection(shader->ir);
- brw_lower_texture_gradients(brw, shader->ir);
- do_vec_index_to_cond_assign(shader->ir);
- lower_vector_insert(shader->ir, true);
- if (options->NirOptions == NULL)
- brw_do_cubemap_normalize(shader->ir);
- lower_offset_arrays(shader->ir);
- brw_do_lower_unnormalized_offset(shader->ir);
- lower_noise(shader->ir);
- lower_quadop_vector(shader->ir, false);
-
- bool lowered_variable_indexing =
- lower_variable_index_to_cond_assign(shader->ir,
- options->EmitNoIndirectInput,
- options->EmitNoIndirectOutput,
- options->EmitNoIndirectTemp,
- options->EmitNoIndirectUniform);
-
- if (unlikely(brw->perf_debug && lowered_variable_indexing)) {
- perf_debug("Unsupported form of variable indexing in FS; falling "
- "back to very inefficient code generation\n");
- }
-
- lower_ubo_reference(shader, shader->ir);
-
- do {
- progress = false;
-
- if (is_scalar_shader_stage(brw, stage)) {
- brw_do_channel_expressions(shader->ir);
- brw_do_vector_splitting(shader->ir);
- }
-
- progress = do_lower_jumps(shader->ir, true, true,
- true, /* main return */
- false, /* continue */
- false /* loops */
- ) || progress;
-
- progress = do_common_optimization(shader->ir, true, true,
- options, ctx->Const.NativeIntegers)
- || progress;
- } while (progress);
+ process_glsl_ir(brw, shProg, shader);
/* Make a pass over the IR to add state references for any built-in
* uniforms that are used. This has to be done now (during linking).
@@ -240,8 +264,6 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
}
}
- validate_ir_tree(shader->ir);
-
do_set_program_inouts(shader->ir, prog, shader->Stage);
prog->SamplersUsed = shader->active_samplers;
@@ -253,22 +275,6 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
brw_add_texrect_params(prog);
_mesa_reference_program(ctx, &prog, NULL);
-
- /* Now that we've finished altering the linked IR, reparent any live IR back
- * to the permanent memory context, and free the temporary one (discarding any
- * junk we optimized away).
- */
- reparent_ir(shader->ir, shader->ir);
- ralloc_free(mem_ctx);
-
- if (ctx->_Shader->Flags & GLSL_DUMP) {
- fprintf(stderr, "\n");
- fprintf(stderr, "GLSL IR for linked %s program %d:\n",
- _mesa_shader_stage_to_string(shader->Stage),
- shProg->Name);
- _mesa_print_ir(stderr, shader->ir, NULL);
- fprintf(stderr, "\n");
- }
}
if ((ctx->_Shader->Flags & GLSL_DUMP) && shProg->Name != 0) {