summaryrefslogtreecommitdiffstats
path: root/src/intel
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2018-10-20 10:05:33 -0500
committerJason Ekstrand <[email protected]>2018-11-08 10:09:06 -0600
commit6b2918709ab785be53607ff65ae203b2e88594b4 (patch)
treefb3369734746b7c2e2939997fa47f193f843df79 /src/intel
parentc472ad82e48e139e03ed28a7a98481814260d08e (diff)
intel/fs,vec4: Clean up a repeated pattern with SSBOs
Everywhere we handle SSBO intrinsics, we have exactly the same pattern for computing the index so we may as well make a helper for it. We also add a get_nir_src_imm to vec4 and use it for SSBO offsets. Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/intel')
-rw-r--r--src/intel/compiler/brw_fs.h2
-rw-r--r--src/intel/compiler/brw_fs_nir.cpp123
-rw-r--r--src/intel/compiler/brw_vec4.h2
-rw-r--r--src/intel/compiler/brw_vec4_nir.cpp124
4 files changed, 85 insertions, 166 deletions
diff --git a/src/intel/compiler/brw_fs.h b/src/intel/compiler/brw_fs.h
index aba19d5ab2c..163c0008820 100644
--- a/src/intel/compiler/brw_fs.h
+++ b/src/intel/compiler/brw_fs.h
@@ -218,6 +218,8 @@ public:
nir_intrinsic_instr *instr);
fs_reg get_nir_image_intrinsic_image(const brw::fs_builder &bld,
nir_intrinsic_instr *instr);
+ fs_reg get_nir_ssbo_intrinsic_index(const brw::fs_builder &bld,
+ nir_intrinsic_instr *instr);
void nir_emit_intrinsic(const brw::fs_builder &bld,
nir_intrinsic_instr *instr);
void nir_emit_tes_intrinsic(const brw::fs_builder &bld,
diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp
index c845d87d59b..a016977c509 100644
--- a/src/intel/compiler/brw_fs_nir.cpp
+++ b/src/intel/compiler/brw_fs_nir.cpp
@@ -3762,6 +3762,37 @@ fs_visitor::get_nir_image_intrinsic_image(const brw::fs_builder &bld,
return bld.emit_uniformize(image);
}
+fs_reg
+fs_visitor::get_nir_ssbo_intrinsic_index(const brw::fs_builder &bld,
+ nir_intrinsic_instr *instr)
+{
+ /* SSBO stores are weird in that their index is in src[1] */
+ const unsigned src = instr->intrinsic == nir_intrinsic_store_ssbo ? 1 : 0;
+ nir_const_value *const_uniform_block =
+ nir_src_as_const_value(instr->src[src]);
+
+ fs_reg surf_index;
+ if (const_uniform_block) {
+ unsigned index = stage_prog_data->binding_table.ssbo_start +
+ const_uniform_block->u32[0];
+ surf_index = brw_imm_ud(index);
+ brw_mark_surface_used(prog_data, index);
+ } else {
+ surf_index = vgrf(glsl_type::uint_type);
+ bld.ADD(surf_index, get_nir_src(instr->src[src]),
+ brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
+
+ /* Assume this may touch any UBO. It would be nice to provide
+ * a tighter bound, but the array information is already lowered away.
+ */
+ brw_mark_surface_used(prog_data,
+ stage_prog_data->binding_table.ssbo_start +
+ nir->info.num_ssbos - 1);
+ }
+
+ return surf_index;
+}
+
static unsigned
image_intrinsic_coord_components(nir_intrinsic_instr *instr)
{
@@ -4139,35 +4170,8 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
case nir_intrinsic_load_ssbo: {
assert(devinfo->gen >= 7);
- nir_const_value *const_uniform_block =
- nir_src_as_const_value(instr->src[0]);
-
- fs_reg surf_index;
- if (const_uniform_block) {
- unsigned index = stage_prog_data->binding_table.ssbo_start +
- const_uniform_block->u32[0];
- surf_index = brw_imm_ud(index);
- brw_mark_surface_used(prog_data, index);
- } else {
- surf_index = vgrf(glsl_type::uint_type);
- bld.ADD(surf_index, get_nir_src(instr->src[0]),
- brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
-
- /* Assume this may touch any UBO. It would be nice to provide
- * a tighter bound, but the array information is already lowered away.
- */
- brw_mark_surface_used(prog_data,
- stage_prog_data->binding_table.ssbo_start +
- nir->info.num_ssbos - 1);
- }
-
- fs_reg offset_reg;
- nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
- if (const_offset) {
- offset_reg = brw_imm_ud(const_offset->u32[0]);
- } else {
- offset_reg = retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_UD);
- }
+ fs_reg surf_index = get_nir_ssbo_intrinsic_index(bld, instr);
+ fs_reg offset_reg = get_nir_src_imm(instr->src[1]);
/* Read the vector */
do_untyped_vector_read(bld, dest, surf_index, offset_reg,
@@ -4182,24 +4186,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
if (stage == MESA_SHADER_FRAGMENT)
brw_wm_prog_data(prog_data)->has_side_effects = true;
- /* Block index */
- fs_reg surf_index;
- nir_const_value *const_uniform_block =
- nir_src_as_const_value(instr->src[1]);
- if (const_uniform_block) {
- unsigned index = stage_prog_data->binding_table.ssbo_start +
- const_uniform_block->u32[0];
- surf_index = brw_imm_ud(index);
- brw_mark_surface_used(prog_data, index);
- } else {
- surf_index = vgrf(glsl_type::uint_type);
- bld.ADD(surf_index, get_nir_src(instr->src[1]),
- brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
-
- brw_mark_surface_used(prog_data,
- stage_prog_data->binding_table.ssbo_start +
- nir->info.num_ssbos - 1);
- }
+ fs_reg surf_index = get_nir_ssbo_intrinsic_index(bld, instr);
/* Value */
fs_reg val_reg = get_nir_src(instr->src[0]);
@@ -4836,26 +4823,7 @@ fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
dest = get_nir_dest(instr->dest);
- fs_reg surface;
- nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
- if (const_surface) {
- unsigned surf_index = stage_prog_data->binding_table.ssbo_start +
- const_surface->u32[0];
- surface = brw_imm_ud(surf_index);
- brw_mark_surface_used(prog_data, surf_index);
- } else {
- surface = vgrf(glsl_type::uint_type);
- bld.ADD(surface, get_nir_src(instr->src[0]),
- brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
-
- /* Assume this may touch any SSBO. This is the same we do for other
- * UBO/SSBO accesses with non-constant surface.
- */
- brw_mark_surface_used(prog_data,
- stage_prog_data->binding_table.ssbo_start +
- nir->info.num_ssbos - 1);
- }
-
+ fs_reg surface = get_nir_ssbo_intrinsic_index(bld, instr);
fs_reg offset = get_nir_src(instr->src[1]);
fs_reg data1;
if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
@@ -4886,26 +4854,7 @@ fs_visitor::nir_emit_ssbo_atomic_float(const fs_builder &bld,
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
dest = get_nir_dest(instr->dest);
- fs_reg surface;
- nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
- if (const_surface) {
- unsigned surf_index = stage_prog_data->binding_table.ssbo_start +
- const_surface->u32[0];
- surface = brw_imm_ud(surf_index);
- brw_mark_surface_used(prog_data, surf_index);
- } else {
- surface = vgrf(glsl_type::uint_type);
- bld.ADD(surface, get_nir_src(instr->src[0]),
- brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
-
- /* Assume this may touch any SSBO. This is the same we do for other
- * UBO/SSBO accesses with non-constant surface.
- */
- brw_mark_surface_used(prog_data,
- stage_prog_data->binding_table.ssbo_start +
- nir->info.num_ssbos - 1);
- }
-
+ fs_reg surface = get_nir_ssbo_intrinsic_index(bld, instr);
fs_reg offset = get_nir_src(instr->src[1]);
fs_reg data1 = get_nir_src(instr->src[2]);
fs_reg data2;
diff --git a/src/intel/compiler/brw_vec4.h b/src/intel/compiler/brw_vec4.h
index 71880db969e..8ef0b5319b0 100644
--- a/src/intel/compiler/brw_vec4.h
+++ b/src/intel/compiler/brw_vec4.h
@@ -338,6 +338,7 @@ public:
virtual void nir_emit_block(nir_block *block);
virtual void nir_emit_instr(nir_instr *instr);
virtual void nir_emit_load_const(nir_load_const_instr *instr);
+ src_reg get_nir_ssbo_intrinsic_index(nir_intrinsic_instr *instr);
virtual void nir_emit_intrinsic(nir_intrinsic_instr *instr);
virtual void nir_emit_alu(nir_alu_instr *instr);
virtual void nir_emit_jump(nir_jump_instr *instr);
@@ -354,6 +355,7 @@ public:
unsigned num_components = 4);
src_reg get_nir_src(const nir_src &src,
unsigned num_components = 4);
+ src_reg get_nir_src_imm(const nir_src &src);
src_reg get_indirect_offset(nir_intrinsic_instr *instr);
dst_reg *nir_locals;
diff --git a/src/intel/compiler/brw_vec4_nir.cpp b/src/intel/compiler/brw_vec4_nir.cpp
index 19ee79367c8..f59574a3f3c 100644
--- a/src/intel/compiler/brw_vec4_nir.cpp
+++ b/src/intel/compiler/brw_vec4_nir.cpp
@@ -253,6 +253,16 @@ vec4_visitor::get_nir_src(const nir_src &src, unsigned num_components)
}
src_reg
+vec4_visitor::get_nir_src_imm(const nir_src &src)
+{
+ assert(nir_src_num_components(src) == 1);
+ assert(nir_src_bit_size(src) == 32);
+ nir_const_value *const_val = nir_src_as_const_value(src);
+ return const_val ? src_reg(brw_imm_d(const_val->i32[0])) :
+ get_nir_src(src, 1);
+}
+
+src_reg
vec4_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
{
nir_src *offset_src = nir_get_io_offset_src(instr);
@@ -368,6 +378,34 @@ vec4_visitor::nir_emit_load_const(nir_load_const_instr *instr)
nir_ssa_values[instr->def.index] = reg;
}
+src_reg
+vec4_visitor::get_nir_ssbo_intrinsic_index(nir_intrinsic_instr *instr)
+{
+ /* SSBO stores are weird in that their index is in src[1] */
+ const unsigned src = instr->intrinsic == nir_intrinsic_store_ssbo ? 1 : 0;
+
+ src_reg surf_index;
+ nir_const_value *const_uniform_block =
+ nir_src_as_const_value(instr->src[src]);
+ if (const_uniform_block) {
+ unsigned index = prog_data->base.binding_table.ssbo_start +
+ const_uniform_block->u32[0];
+ surf_index = brw_imm_ud(index);
+ brw_mark_surface_used(&prog_data->base, index);
+ } else {
+ surf_index = src_reg(this, glsl_type::uint_type);
+ emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[src], 1),
+ brw_imm_ud(prog_data->base.binding_table.ssbo_start)));
+ surf_index = emit_uniformize(surf_index);
+
+ brw_mark_surface_used(&prog_data->base,
+ prog_data->base.binding_table.ssbo_start +
+ nir->info.num_ssbos - 1);
+ }
+
+ return surf_index;
+}
+
void
vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
{
@@ -470,34 +508,9 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
case nir_intrinsic_store_ssbo: {
assert(devinfo->gen >= 7);
- /* Block index */
- src_reg surf_index;
- nir_const_value *const_uniform_block =
- nir_src_as_const_value(instr->src[1]);
- if (const_uniform_block) {
- unsigned index = prog_data->base.binding_table.ssbo_start +
- const_uniform_block->u32[0];
- surf_index = brw_imm_ud(index);
- brw_mark_surface_used(&prog_data->base, index);
- } else {
- surf_index = src_reg(this, glsl_type::uint_type);
- emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[1], 1),
- brw_imm_ud(prog_data->base.binding_table.ssbo_start)));
- surf_index = emit_uniformize(surf_index);
-
- brw_mark_surface_used(&prog_data->base,
- prog_data->base.binding_table.ssbo_start +
- nir->info.num_ssbos - 1);
- }
-
- /* Offset */
- src_reg offset_reg;
- nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
- if (const_offset) {
- offset_reg = brw_imm_ud(const_offset->u32[0]);
- } else {
- offset_reg = get_nir_src(instr->src[2], 1);
- }
+ src_reg surf_index = get_nir_ssbo_intrinsic_index(instr);
+ src_reg offset_reg = retype(get_nir_src_imm(instr->src[2]),
+ BRW_REGISTER_TYPE_UD);
/* Value */
src_reg val_reg = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_F, 4);
@@ -632,37 +645,9 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
case nir_intrinsic_load_ssbo: {
assert(devinfo->gen >= 7);
- nir_const_value *const_uniform_block =
- nir_src_as_const_value(instr->src[0]);
-
- src_reg surf_index;
- if (const_uniform_block) {
- unsigned index = prog_data->base.binding_table.ssbo_start +
- const_uniform_block->u32[0];
- surf_index = brw_imm_ud(index);
-
- brw_mark_surface_used(&prog_data->base, index);
- } else {
- surf_index = src_reg(this, glsl_type::uint_type);
- emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[0], 1),
- brw_imm_ud(prog_data->base.binding_table.ssbo_start)));
- surf_index = emit_uniformize(surf_index);
-
- /* Assume this may touch any UBO. It would be nice to provide
- * a tighter bound, but the array information is already lowered away.
- */
- brw_mark_surface_used(&prog_data->base,
- prog_data->base.binding_table.ssbo_start +
- nir->info.num_ssbos - 1);
- }
-
- src_reg offset_reg;
- nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
- if (const_offset) {
- offset_reg = brw_imm_ud(const_offset->u32[0]);
- } else {
- offset_reg = get_nir_src(instr->src[1], 1);
- }
+ src_reg surf_index = get_nir_ssbo_intrinsic_index(instr);
+ src_reg offset_reg = retype(get_nir_src_imm(instr->src[1]),
+ BRW_REGISTER_TYPE_UD);
/* Read the vector */
const vec4_builder bld = vec4_builder(this).at_end()
@@ -922,26 +907,7 @@ vec4_visitor::nir_emit_ssbo_atomic(int op, nir_intrinsic_instr *instr)
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
dest = get_nir_dest(instr->dest);
- src_reg surface;
- nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
- if (const_surface) {
- unsigned surf_index = prog_data->base.binding_table.ssbo_start +
- const_surface->u32[0];
- surface = brw_imm_ud(surf_index);
- brw_mark_surface_used(&prog_data->base, surf_index);
- } else {
- surface = src_reg(this, glsl_type::uint_type);
- emit(ADD(dst_reg(surface), get_nir_src(instr->src[0]),
- brw_imm_ud(prog_data->base.binding_table.ssbo_start)));
-
- /* Assume this may touch any UBO. This is the same we do for other
- * UBO/SSBO accesses with non-constant surface.
- */
- brw_mark_surface_used(&prog_data->base,
- prog_data->base.binding_table.ssbo_start +
- nir->info.num_ssbos - 1);
- }
-
+ src_reg surface = get_nir_ssbo_intrinsic_index(instr);
src_reg offset = get_nir_src(instr->src[1], 1);
src_reg data1;
if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)