diff options
author | Christian Gmeiner <[email protected]> | 2018-06-15 12:29:59 +0200 |
---|---|---|
committer | Christian Gmeiner <[email protected]> | 2018-06-20 17:56:37 +0200 |
commit | 8b3099353e2511c2b2fa0e979f09129f493bd77c (patch) | |
tree | 621cb88074da0f98b739ef9e6ea8284189b65ac9 /src/mesa/state_tracker | |
parent | f485e5671c97468fc445da45a9ecc9d9c7dd8243 (diff) |
mesa/st: only do scalar lowerings if driver benefits
As not every (upcoming) backend compiler is happy with
nir_lower_xxx_to_scalar lowerings do them only if the backend
is scalar (and not vec4) based.
Signed-off-by: Christian Gmeiner <[email protected]>
Reviewed-by: Eric Anholt <[email protected]>
Diffstat (limited to 'src/mesa/state_tracker')
-rw-r--r-- | src/mesa/state_tracker/st_glsl_to_nir.cpp | 39 |
1 files changed, 30 insertions, 9 deletions
diff --git a/src/mesa/state_tracker/st_glsl_to_nir.cpp b/src/mesa/state_tracker/st_glsl_to_nir.cpp index 3fc04c7b8b7..d24944c6af5 100644 --- a/src/mesa/state_tracker/st_glsl_to_nir.cpp +++ b/src/mesa/state_tracker/st_glsl_to_nir.cpp @@ -317,15 +317,18 @@ st_nir_assign_uniform_locations(struct gl_context *ctx, } void -st_nir_opts(nir_shader *nir) +st_nir_opts(nir_shader *nir, bool scalar) { bool progress; do { progress = false; NIR_PASS_V(nir, nir_lower_vars_to_ssa); - NIR_PASS_V(nir, nir_lower_alu_to_scalar); - NIR_PASS_V(nir, nir_lower_phis_to_scalar); + + if (scalar) { + NIR_PASS_V(nir, nir_lower_alu_to_scalar); + NIR_PASS_V(nir, nir_lower_phis_to_scalar); + } NIR_PASS_V(nir, nir_lower_alu); NIR_PASS_V(nir, nir_lower_pack); @@ -364,6 +367,9 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog, { const nir_shader_compiler_options *options = st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions; + enum pipe_shader_type type = pipe_shader_type_from_mesa(stage); + struct pipe_screen *screen = st->pipe->screen; + bool is_scalar = screen->get_shader_param(screen, type, PIPE_SHADER_CAP_SCALAR_ISA); assert(options); if (prog->nir) @@ -406,7 +412,7 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog, NIR_PASS_V(nir, nir_split_var_copies); NIR_PASS_V(nir, nir_lower_var_copies); - st_nir_opts(nir); + st_nir_opts(nir, is_scalar); return nir; } @@ -588,7 +594,7 @@ st_nir_get_mesa_program(struct gl_context *ctx, } static void -st_nir_link_shaders(nir_shader **producer, nir_shader **consumer) +st_nir_link_shaders(nir_shader **producer, nir_shader **consumer, bool scalar) { nir_lower_io_arrays_to_elements(*producer, *consumer); @@ -615,8 +621,8 @@ st_nir_link_shaders(nir_shader **producer, nir_shader **consumer) NIR_PASS_V(*producer, nir_lower_indirect_derefs, indirect_mask); NIR_PASS_V(*consumer, nir_lower_indirect_derefs, indirect_mask); - st_nir_opts(*producer); - st_nir_opts(*consumer); + st_nir_opts(*producer, scalar); + st_nir_opts(*consumer, scalar); } } @@ -627,6 +633,20 @@ st_link_nir(struct gl_context *ctx, struct gl_shader_program *shader_program) { struct st_context *st = st_context(ctx); + struct pipe_screen *screen = st->pipe->screen; + bool is_scalar[MESA_SHADER_STAGES]; + + /* Determine scalar property of each shader stage */ + for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { + struct gl_linked_shader *shader = shader_program->_LinkedShaders[i]; + enum pipe_shader_type type; + + if (shader == NULL) + continue; + + type = pipe_shader_type_from_mesa(shader->Stage); + is_scalar[i] = screen->get_shader_param(screen, type, PIPE_SHADER_CAP_SCALAR_ISA); + } /* Determine first and last stage. */ unsigned first = MESA_SHADER_STAGES; @@ -655,7 +675,7 @@ st_link_nir(struct gl_context *ctx, nir_shader *nir = shader->Program->nir; NIR_PASS_V(nir, nir_lower_io_to_scalar_early, mask); - st_nir_opts(nir); + st_nir_opts(nir, is_scalar[i]); } /* Linking the stages in the opposite order (from fragment to vertex) @@ -670,7 +690,8 @@ st_link_nir(struct gl_context *ctx, continue; st_nir_link_shaders(&shader->Program->nir, - &shader_program->_LinkedShaders[next]->Program->nir); + &shader_program->_LinkedShaders[next]->Program->nir, + is_scalar[i]); next = i; } |