summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2016-02-24 22:34:51 -0800
committerKenneth Graunke <[email protected]>2016-02-26 15:55:59 -0800
commit15b3639bf1b0676e74b107d74653185eedbc6688 (patch)
tree0108605c5783517779ba98f99e21385895a3cb15 /src
parentcfbd9831f89ef165e7998d0b8524a1aefedec404 (diff)
i965: Avoid recalculating the tessellation VUE map for IO lowering.
The caller already computes it. Now that we have stage specific functions, it's really easy to pass this in. Signed-off-by: Kenneth Graunke <[email protected]> Reviewed-by: Iago Toral Quiroga <[email protected]>
Diffstat (limited to 'src')
-rw-r--r--src/mesa/drivers/dri/i965/brw_nir.c19
-rw-r--r--src/mesa/drivers/dri/i965/brw_nir.h4
-rw-r--r--src/mesa/drivers/dri/i965/brw_shader.cpp15
-rw-r--r--src/mesa/drivers/dri/i965/brw_vec4_tcs.cpp13
4 files changed, 25 insertions, 26 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_nir.c b/src/mesa/drivers/dri/i965/brw_nir.c
index 2bd6c4ed57c..90c4f668767 100644
--- a/src/mesa/drivers/dri/i965/brw_nir.c
+++ b/src/mesa/drivers/dri/i965/brw_nir.c
@@ -149,7 +149,7 @@ remap_inputs_with_vue_map(nir_block *block, void *closure)
struct remap_patch_urb_offsets_state {
nir_builder b;
- struct brw_vue_map vue_map;
+ const struct brw_vue_map *vue_map;
};
static bool
@@ -167,7 +167,7 @@ remap_patch_urb_offsets(nir_block *block, void *closure)
if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
(stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
- int vue_slot = state->vue_map.varying_to_slot[intrin->const_index[0]];
+ int vue_slot = state->vue_map->varying_to_slot[intrin->const_index[0]];
assert(vue_slot != -1);
intrin->const_index[0] = vue_slot;
@@ -176,7 +176,7 @@ remap_patch_urb_offsets(nir_block *block, void *closure)
nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
if (const_vertex) {
intrin->const_index[0] += const_vertex->u[0] *
- state->vue_map.num_per_vertex_slots;
+ state->vue_map->num_per_vertex_slots;
} else {
state->b.cursor = nir_before_instr(&intrin->instr);
@@ -185,7 +185,7 @@ remap_patch_urb_offsets(nir_block *block, void *closure)
nir_imul(&state->b,
nir_ssa_for_src(&state->b, *vertex, 1),
nir_imm_int(&state->b,
- state->vue_map.num_per_vertex_slots));
+ state->vue_map->num_per_vertex_slots));
/* Add it to the existing offset */
nir_src *offset = nir_get_io_offset_src(intrin);
@@ -298,12 +298,10 @@ brw_nir_lower_vue_inputs(nir_shader *nir,
}
void
-brw_nir_lower_tes_inputs(nir_shader *nir)
+brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
{
struct remap_patch_urb_offsets_state state;
- brw_compute_tess_vue_map(&state.vue_map,
- nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
- nir->info.patch_inputs_read);
+ state.vue_map = vue_map;
foreach_list_typed(nir_variable, var, node, &nir->inputs) {
var->data.driver_location = var->data.location;
@@ -347,11 +345,10 @@ brw_nir_lower_vue_outputs(nir_shader *nir,
}
void
-brw_nir_lower_tcs_outputs(nir_shader *nir)
+brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map)
{
struct remap_patch_urb_offsets_state state;
- brw_compute_tess_vue_map(&state.vue_map, nir->info.outputs_written,
- nir->info.patch_outputs_written);
+ state.vue_map = vue_map;
nir_foreach_variable(var, &nir->outputs) {
var->data.driver_location = var->data.location;
diff --git a/src/mesa/drivers/dri/i965/brw_nir.h b/src/mesa/drivers/dri/i965/brw_nir.h
index 0140f3a80be..0fbdc5fa625 100644
--- a/src/mesa/drivers/dri/i965/brw_nir.h
+++ b/src/mesa/drivers/dri/i965/brw_nir.h
@@ -91,10 +91,10 @@ void brw_nir_lower_vs_inputs(nir_shader *nir,
void brw_nir_lower_vue_inputs(nir_shader *nir,
const struct brw_device_info *devinfo,
bool is_scalar);
-void brw_nir_lower_tes_inputs(nir_shader *nir);
+void brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue);
void brw_nir_lower_fs_inputs(nir_shader *nir);
void brw_nir_lower_vue_outputs(nir_shader *nir, bool is_scalar);
-void brw_nir_lower_tcs_outputs(nir_shader *nir);
+void brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue);
void brw_nir_lower_fs_outputs(nir_shader *nir);
nir_shader *brw_postprocess_nir(nir_shader *nir,
diff --git a/src/mesa/drivers/dri/i965/brw_shader.cpp b/src/mesa/drivers/dri/i965/brw_shader.cpp
index 857a079c67b..dfe6afcf6d0 100644
--- a/src/mesa/drivers/dri/i965/brw_shader.cpp
+++ b/src/mesa/drivers/dri/i965/brw_shader.cpp
@@ -1227,10 +1227,16 @@ brw_compile_tes(const struct brw_compiler *compiler,
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_EVAL];
nir_shader *nir = nir_shader_clone(mem_ctx, src_shader);
- nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
nir->info.inputs_read = key->inputs_read;
nir->info.patch_inputs_read = key->patch_inputs_read;
- brw_nir_lower_tes_inputs(nir);
+
+ struct brw_vue_map input_vue_map;
+ brw_compute_tess_vue_map(&input_vue_map,
+ nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
+ nir->info.patch_inputs_read);
+
+ nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
+ brw_nir_lower_tes_inputs(nir, &input_vue_map);
brw_nir_lower_vue_outputs(nir, is_scalar);
nir = brw_postprocess_nir(nir, compiler->devinfo, is_scalar);
@@ -1250,11 +1256,6 @@ brw_compile_tes(const struct brw_compiler *compiler,
/* URB entry sizes are stored as a multiple of 64 bytes. */
prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
- struct brw_vue_map input_vue_map;
- brw_compute_tess_vue_map(&input_vue_map,
- nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
- nir->info.patch_inputs_read);
-
bool need_patch_header = nir->info.system_values_read &
(BITFIELD64_BIT(SYSTEM_VALUE_TESS_LEVEL_OUTER) |
BITFIELD64_BIT(SYSTEM_VALUE_TESS_LEVEL_INNER));
diff --git a/src/mesa/drivers/dri/i965/brw_vec4_tcs.cpp b/src/mesa/drivers/dri/i965/brw_vec4_tcs.cpp
index b6a759b73d1..53e7aef37f2 100644
--- a/src/mesa/drivers/dri/i965/brw_vec4_tcs.cpp
+++ b/src/mesa/drivers/dri/i965/brw_vec4_tcs.cpp
@@ -513,19 +513,20 @@ brw_compile_tcs(const struct brw_compiler *compiler,
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_CTRL];
nir_shader *nir = nir_shader_clone(mem_ctx, src_shader);
- nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
nir->info.outputs_written = key->outputs_written;
nir->info.patch_outputs_written = key->patch_outputs_written;
- brw_nir_lower_vue_inputs(nir, compiler->devinfo, is_scalar);
- brw_nir_lower_tcs_outputs(nir);
- nir = brw_postprocess_nir(nir, compiler->devinfo, is_scalar);
-
- prog_data->instances = DIV_ROUND_UP(nir->info.tcs.vertices_out, 2);
brw_compute_tess_vue_map(&vue_prog_data->vue_map,
nir->info.outputs_written,
nir->info.patch_outputs_written);
+ nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
+ brw_nir_lower_vue_inputs(nir, compiler->devinfo, is_scalar);
+ brw_nir_lower_tcs_outputs(nir, &vue_prog_data->vue_map);
+ nir = brw_postprocess_nir(nir, compiler->devinfo, is_scalar);
+
+ prog_data->instances = DIV_ROUND_UP(nir->info.tcs.vertices_out, 2);
+
/* Compute URB entry size. The maximum allowed URB entry size is 32k.
* That divides up as follows:
*