summaryrefslogtreecommitdiffstats
path: root/src/glsl
diff options
context:
space:
mode:
Diffstat (limited to 'src/glsl')
-rw-r--r--src/glsl/link_atomics.cpp43
-rw-r--r--src/glsl/nir/glsl_to_nir.cpp2
-rw-r--r--src/glsl/nir/nir.h4
-rw-r--r--src/glsl/nir/nir_lower_atomics.c25
4 files changed, 62 insertions, 12 deletions
diff --git a/src/glsl/link_atomics.cpp b/src/glsl/link_atomics.cpp
index 70ef0e1c891..cdcc06d53e2 100644
--- a/src/glsl/link_atomics.cpp
+++ b/src/glsl/link_atomics.cpp
@@ -198,6 +198,7 @@ link_assign_atomic_counter_resources(struct gl_context *ctx,
struct gl_shader_program *prog)
{
unsigned num_buffers;
+ unsigned num_atomic_buffers[MESA_SHADER_STAGES] = {};
active_atomic_buffer *abs =
find_active_atomic_counters(ctx, prog, &num_buffers);
@@ -242,13 +243,49 @@ link_assign_atomic_counter_resources(struct gl_context *ctx,
}
/* Assign stage-specific fields. */
- for (unsigned j = 0; j < MESA_SHADER_STAGES; ++j)
- mab.StageReferences[j] =
- (ab.stage_references[j] ? GL_TRUE : GL_FALSE);
+ for (unsigned j = 0; j < MESA_SHADER_STAGES; ++j) {
+ if (ab.stage_references[j]) {
+ mab.StageReferences[j] = GL_TRUE;
+ num_atomic_buffers[j]++;
+ } else {
+ mab.StageReferences[j] = GL_FALSE;
+ }
+ }
i++;
}
+ /* Store a list pointers to atomic buffers per stage and store the index
+ * to the intra-stage buffer list in uniform storage.
+ */
+ for (unsigned j = 0; j < MESA_SHADER_STAGES; ++j) {
+ if (prog->_LinkedShaders[j] && num_atomic_buffers[j] > 0) {
+ prog->_LinkedShaders[j]->NumAtomicBuffers = num_atomic_buffers[j];
+ prog->_LinkedShaders[j]->AtomicBuffers =
+ rzalloc_array(prog, gl_active_atomic_buffer *,
+ num_atomic_buffers[j]);
+
+ unsigned intra_stage_idx = 0;
+ for (unsigned i = 0; i < num_buffers; i++) {
+ struct gl_active_atomic_buffer *atomic_buffer =
+ &prog->AtomicBuffers[i];
+ if (atomic_buffer->StageReferences[j]) {
+ prog->_LinkedShaders[j]->AtomicBuffers[intra_stage_idx] =
+ atomic_buffer;
+
+ for (unsigned u = 0; u < atomic_buffer->NumUniforms; u++) {
+ prog->UniformStorage[atomic_buffer->Uniforms[u]].opaque[j].index =
+ intra_stage_idx;
+ prog->UniformStorage[atomic_buffer->Uniforms[u]].opaque[j].active =
+ true;
+ }
+
+ intra_stage_idx++;
+ }
+ }
+ }
+ }
+
delete [] abs;
assert(i == num_buffers);
}
diff --git a/src/glsl/nir/glsl_to_nir.cpp b/src/glsl/nir/glsl_to_nir.cpp
index 9b50a93e7f6..01f16d70eb1 100644
--- a/src/glsl/nir/glsl_to_nir.cpp
+++ b/src/glsl/nir/glsl_to_nir.cpp
@@ -392,8 +392,6 @@ nir_visitor::visit(ir_variable *ir)
var->data.index = ir->data.index;
var->data.binding = ir->data.binding;
- /* XXX Get rid of buffer_index */
- var->data.atomic.buffer_index = ir->data.binding;
var->data.atomic.offset = ir->data.atomic.offset;
var->data.image.read_only = ir->data.image_read_only;
var->data.image.write_only = ir->data.image_write_only;
diff --git a/src/glsl/nir/nir.h b/src/glsl/nir/nir.h
index e3777f926e2..04a21a7ead6 100644
--- a/src/glsl/nir/nir.h
+++ b/src/glsl/nir/nir.h
@@ -308,7 +308,6 @@ typedef struct {
* Location an atomic counter is stored at.
*/
struct {
- unsigned buffer_index;
unsigned offset;
} atomic;
@@ -1978,7 +1977,8 @@ void nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables);
void nir_lower_two_sided_color(nir_shader *shader);
-void nir_lower_atomics(nir_shader *shader);
+void nir_lower_atomics(nir_shader *shader,
+ const struct gl_shader_program *shader_program);
void nir_lower_to_source_mods(nir_shader *shader);
bool nir_lower_gs_intrinsics(nir_shader *shader);
diff --git a/src/glsl/nir/nir_lower_atomics.c b/src/glsl/nir/nir_lower_atomics.c
index 46e137652a1..40ca3de96cf 100644
--- a/src/glsl/nir/nir_lower_atomics.c
+++ b/src/glsl/nir/nir_lower_atomics.c
@@ -25,17 +25,24 @@
*
*/
+#include "ir_uniform.h"
#include "nir.h"
#include "main/config.h"
#include <assert.h>
+typedef struct {
+ const struct gl_shader_program *shader_program;
+ nir_shader *shader;
+} lower_atomic_state;
+
/*
* replace atomic counter intrinsics that use a variable with intrinsics
* that directly store the buffer index and byte offset
*/
static void
-lower_instr(nir_intrinsic_instr *instr, nir_function_impl *impl)
+lower_instr(nir_intrinsic_instr *instr,
+ lower_atomic_state *state)
{
nir_intrinsic_op op;
switch (instr->intrinsic) {
@@ -60,10 +67,11 @@ lower_instr(nir_intrinsic_instr *instr, nir_function_impl *impl)
return; /* atomics passed as function arguments can't be lowered */
void *mem_ctx = ralloc_parent(instr);
+ unsigned uniform_loc = instr->variables[0]->var->data.location;
nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(mem_ctx, op);
new_instr->const_index[0] =
- (int) instr->variables[0]->var->data.atomic.buffer_index;
+ state->shader_program->UniformStorage[uniform_loc].opaque[state->shader->stage].index;
nir_load_const_instr *offset_const = nir_load_const_instr_create(mem_ctx, 1);
offset_const->value.u[0] = instr->variables[0]->var->data.atomic.offset;
@@ -132,18 +140,25 @@ lower_block(nir_block *block, void *state)
{
nir_foreach_instr_safe(block, instr) {
if (instr->type == nir_instr_type_intrinsic)
- lower_instr(nir_instr_as_intrinsic(instr), state);
+ lower_instr(nir_instr_as_intrinsic(instr),
+ (lower_atomic_state *) state);
}
return true;
}
void
-nir_lower_atomics(nir_shader *shader)
+nir_lower_atomics(nir_shader *shader,
+ const struct gl_shader_program *shader_program)
{
+ lower_atomic_state state = {
+ .shader = shader,
+ .shader_program = shader_program,
+ };
+
nir_foreach_overload(shader, overload) {
if (overload->impl) {
- nir_foreach_block(overload->impl, lower_block, overload->impl);
+ nir_foreach_block(overload->impl, lower_block, (void *) &state);
nir_metadata_preserve(overload->impl, nir_metadata_block_index |
nir_metadata_dominance);
}