summaryrefslogtreecommitdiffstats
path: root/src/compiler/glsl
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2016-05-18 20:28:07 -0700
committerJason Ekstrand <[email protected]>2016-05-23 19:12:34 -0700
commit27b9481d03959a7bee6d906c62b4a519b6b1dc38 (patch)
tree4578aca510eef5c8e2e27a00f83d7791377e3dd4 /src/compiler/glsl
parentac242aac3d2c38fd6843d85363a455271b1348c2 (diff)
glsl: Add an option to clamp block indices when lowering UBO/SSBOs
This prevents array overflow when the block is actually an array of UBOs or SSBOs. On some hardware such as i965, such overflows can cause GPU hangs. Reviewed-by: Ian Romanick <[email protected]> Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/compiler/glsl')
-rw-r--r--src/compiler/glsl/ir_optimization.h2
-rw-r--r--src/compiler/glsl/linker.cpp3
-rw-r--r--src/compiler/glsl/lower_ubo_reference.cpp36
3 files changed, 35 insertions, 6 deletions
diff --git a/src/compiler/glsl/ir_optimization.h b/src/compiler/glsl/ir_optimization.h
index 71b10e4d643..ba14e34b71e 100644
--- a/src/compiler/glsl/ir_optimization.h
+++ b/src/compiler/glsl/ir_optimization.h
@@ -123,7 +123,7 @@ bool lower_clip_cull_distance(struct gl_shader_program *prog, gl_shader *shader)
void lower_output_reads(unsigned stage, exec_list *instructions);
bool lower_packing_builtins(exec_list *instructions, int op_mask);
void lower_shared_reference(struct gl_shader *shader, unsigned *shared_size);
-void lower_ubo_reference(struct gl_shader *shader);
+void lower_ubo_reference(struct gl_shader *shader, bool clamp_block_indices);
void lower_packed_varyings(void *mem_ctx,
unsigned locations_used, ir_variable_mode mode,
unsigned gs_input_vertices, gl_shader *shader,
diff --git a/src/compiler/glsl/linker.cpp b/src/compiler/glsl/linker.cpp
index 89cd9f795cc..3d95540256c 100644
--- a/src/compiler/glsl/linker.cpp
+++ b/src/compiler/glsl/linker.cpp
@@ -4906,7 +4906,8 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
&ctx->Const.ShaderCompilerOptions[i];
if (options->LowerBufferInterfaceBlocks)
- lower_ubo_reference(prog->_LinkedShaders[i]);
+ lower_ubo_reference(prog->_LinkedShaders[i],
+ options->ClampBlockIndicesToArrayBounds);
if (options->LowerShaderSharedVariables)
lower_shared_reference(prog->_LinkedShaders[i],
diff --git a/src/compiler/glsl/lower_ubo_reference.cpp b/src/compiler/glsl/lower_ubo_reference.cpp
index 1a0140fad15..749deedcd1c 100644
--- a/src/compiler/glsl/lower_ubo_reference.cpp
+++ b/src/compiler/glsl/lower_ubo_reference.cpp
@@ -44,8 +44,10 @@ namespace {
class lower_ubo_reference_visitor :
public lower_buffer_access::lower_buffer_access {
public:
- lower_ubo_reference_visitor(struct gl_shader *shader)
- : shader(shader), struct_field(NULL), variable(NULL)
+ lower_ubo_reference_visitor(struct gl_shader *shader,
+ bool clamp_block_indices)
+ : shader(shader), clamp_block_indices(clamp_block_indices),
+ struct_field(NULL), variable(NULL)
{
}
@@ -104,6 +106,7 @@ public:
ir_visitor_status visit_enter(ir_call *ir);
struct gl_shader *shader;
+ bool clamp_block_indices;
struct gl_uniform_buffer_variable *ubo_var;
const struct glsl_struct_field *struct_field;
ir_variable *variable;
@@ -242,6 +245,26 @@ interface_field_name(void *mem_ctx, char *base_name, ir_rvalue *d,
return NULL;
}
+static ir_rvalue *
+clamp_to_array_bounds(void *mem_ctx, ir_rvalue *index, const glsl_type *type)
+{
+ assert(type->is_array());
+
+ const unsigned array_size = type->arrays_of_arrays_size();
+
+ ir_constant *max_index = new(mem_ctx) ir_constant(array_size - 1);
+ max_index->type = index->type;
+
+ ir_constant *zero = new(mem_ctx) ir_constant(0);
+ zero->type = index->type;
+
+ if (index->type->base_type == GLSL_TYPE_INT)
+ index = max2(index, zero);
+ index = min2(index, max_index);
+
+ return index;
+}
+
void
lower_ubo_reference_visitor::setup_for_load_or_store(void *mem_ctx,
ir_variable *var,
@@ -258,6 +281,11 @@ lower_ubo_reference_visitor::setup_for_load_or_store(void *mem_ctx,
interface_field_name(mem_ctx, (char *) var->get_interface_type()->name,
deref, &nonconst_block_index);
+ if (nonconst_block_index && clamp_block_indices) {
+ nonconst_block_index =
+ clamp_to_array_bounds(mem_ctx, nonconst_block_index, var->type);
+ }
+
/* Locate the block by interface name */
unsigned num_blocks;
struct gl_uniform_block **blocks;
@@ -1062,9 +1090,9 @@ lower_ubo_reference_visitor::visit_enter(ir_call *ir)
} /* unnamed namespace */
void
-lower_ubo_reference(struct gl_shader *shader)
+lower_ubo_reference(struct gl_shader *shader, bool clamp_block_indices)
{
- lower_ubo_reference_visitor v(shader);
+ lower_ubo_reference_visitor v(shader, clamp_block_indices);
/* Loop over the instructions lowering references, because we take
* a deref of a UBO array using a UBO dereference as the index will