summaryrefslogtreecommitdiffstats
path: root/src/glsl
diff options
context:
space:
mode:
Diffstat (limited to 'src/glsl')
-rw-r--r--src/glsl/nir/glsl_to_nir.cpp29
-rw-r--r--src/glsl/nir/nir_intrinsics.h1
2 files changed, 30 insertions, 0 deletions
diff --git a/src/glsl/nir/glsl_to_nir.cpp b/src/glsl/nir/glsl_to_nir.cpp
index 45d045cd4d6..38f980465b6 100644
--- a/src/glsl/nir/glsl_to_nir.cpp
+++ b/src/glsl/nir/glsl_to_nir.cpp
@@ -727,6 +727,8 @@ nir_visitor::visit(ir_call *ir)
op = nir_intrinsic_memory_barrier_image;
} else if (strcmp(ir->callee_name(), "__intrinsic_memory_barrier_shared") == 0) {
op = nir_intrinsic_memory_barrier_shared;
+ } else if (strcmp(ir->callee_name(), "__intrinsic_load_shared") == 0) {
+ op = nir_intrinsic_load_shared;
} else {
unreachable("not reached");
}
@@ -974,6 +976,33 @@ nir_visitor::visit(ir_call *ir)
nir_builder_instr_insert(&b, &instr->instr);
break;
}
+ case nir_intrinsic_load_shared: {
+ exec_node *param = ir->actual_parameters.get_head();
+ ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
+
+ /* Check if we need the indirect version */
+ ir_constant *const_offset = offset->as_constant();
+ if (!const_offset) {
+ op = nir_intrinsic_load_shared_indirect;
+ ralloc_free(instr);
+ instr = nir_intrinsic_instr_create(shader, op);
+ instr->src[0] = nir_src_for_ssa(evaluate_rvalue(offset));
+ instr->const_index[0] = 0;
+ dest = &instr->dest;
+ } else {
+ instr->const_index[0] = const_offset->value.u[0];
+ }
+
+ const glsl_type *type = ir->return_deref->var->type;
+ instr->num_components = type->vector_elements;
+
+ /* Setup destination register */
+ nir_ssa_dest_init(&instr->instr, &instr->dest,
+ type->vector_elements, NULL);
+
+ nir_builder_instr_insert(&b, &instr->instr);
+ break;
+ }
default:
unreachable("not reached");
}
diff --git a/src/glsl/nir/nir_intrinsics.h b/src/glsl/nir/nir_intrinsics.h
index b2565c54b20..011b95db9d6 100644
--- a/src/glsl/nir/nir_intrinsics.h
+++ b/src/glsl/nir/nir_intrinsics.h
@@ -258,6 +258,7 @@ LOAD(per_vertex_input, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REO
LOAD(ssbo, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE)
LOAD(output, 0, 1, NIR_INTRINSIC_CAN_ELIMINATE)
LOAD(per_vertex_output, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE)
+LOAD(shared, 0, 1, NIR_INTRINSIC_CAN_ELIMINATE)
/*
* Stores work the same way as loads, except now the first register input is