summaryrefslogtreecommitdiffstats
path: root/src/broadcom
diff options
context:
space:
mode:
authorEric Anholt <[email protected]>2017-12-11 12:52:27 -0800
committerEric Anholt <[email protected]>2019-01-14 15:40:55 -0800
commit6281f26f064ada36b57d45feb68d8e7d783198c9 (patch)
treed06588f36d6e2d273ae33cd9aaca640b3d027039 /src/broadcom
parent5932c2f0b9b56e6eeee87baa7b0b493227850f69 (diff)
v3d: Add support for shader_image_load_store.
This is only exposed on V3D 4.1+, because we didn't have the TMU write operations for images on 3.3 (To do GLES 3.1 there, you have to lower it to SSBO load/stores, which is a problem to solve later).
Diffstat (limited to 'src/broadcom')
-rw-r--r--src/broadcom/Makefile.sources1
-rw-r--r--src/broadcom/compiler/meson.build1
-rw-r--r--src/broadcom/compiler/nir_to_vir.c48
-rw-r--r--src/broadcom/compiler/v3d40_tex.c182
-rw-r--r--src/broadcom/compiler/v3d_compiler.h13
-rw-r--r--src/broadcom/compiler/v3d_nir_lower_image_load_store.c390
-rw-r--r--src/broadcom/compiler/vir.c1
-rw-r--r--src/broadcom/compiler/vir_dump.c19
8 files changed, 652 insertions, 3 deletions
diff --git a/src/broadcom/Makefile.sources b/src/broadcom/Makefile.sources
index f535447b476..f8710c49752 100644
--- a/src/broadcom/Makefile.sources
+++ b/src/broadcom/Makefile.sources
@@ -38,6 +38,7 @@ BROADCOM_FILES = \
compiler/v3d33_vpm_setup.c \
compiler/v3d40_tex.c \
compiler/v3d_compiler.h \
+ compiler/v3d_nir_lower_image_load_store.c \
compiler/v3d_nir_lower_io.c \
compiler/v3d_nir_lower_txf_ms.c \
qpu/qpu_disasm.c \
diff --git a/src/broadcom/compiler/meson.build b/src/broadcom/compiler/meson.build
index 86ef365aa5c..c80918db30f 100644
--- a/src/broadcom/compiler/meson.build
+++ b/src/broadcom/compiler/meson.build
@@ -36,6 +36,7 @@ libbroadcom_compiler_files = files(
'v3d33_vpm_setup.c',
'v3d_compiler.h',
'v3d_nir_lower_io.c',
+ 'v3d_nir_lower_image_load_store.c',
'v3d_nir_lower_txf_ms.c',
)
diff --git a/src/broadcom/compiler/nir_to_vir.c b/src/broadcom/compiler/nir_to_vir.c
index b8e39f357f7..f10ed5975c1 100644
--- a/src/broadcom/compiler/nir_to_vir.c
+++ b/src/broadcom/compiler/nir_to_vir.c
@@ -1693,6 +1693,32 @@ ntq_emit_ssa_undef(struct v3d_compile *c, nir_ssa_undef_instr *instr)
}
static void
+ntq_emit_image_size(struct v3d_compile *c, nir_intrinsic_instr *instr)
+{
+ assert(instr->intrinsic == nir_intrinsic_image_deref_size);
+ nir_variable *var = nir_intrinsic_get_var(instr, 0);
+ unsigned image_index = var->data.driver_location;
+ const struct glsl_type *sampler_type = glsl_without_array(var->type);
+ bool is_array = glsl_sampler_type_is_array(sampler_type);
+
+ ntq_store_dest(c, &instr->dest, 0,
+ vir_uniform(c, QUNIFORM_IMAGE_WIDTH, image_index));
+ if (instr->num_components > 1) {
+ ntq_store_dest(c, &instr->dest, 1,
+ vir_uniform(c, QUNIFORM_IMAGE_HEIGHT,
+ image_index));
+ }
+ if (instr->num_components > 2) {
+ ntq_store_dest(c, &instr->dest, 2,
+ vir_uniform(c,
+ is_array ?
+ QUNIFORM_IMAGE_ARRAY_SIZE :
+ QUNIFORM_IMAGE_DEPTH,
+ image_index));
+ }
+}
+
+static void
ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
{
unsigned offset;
@@ -1734,6 +1760,19 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
ntq_emit_tmu_general(c, instr);
break;
+ case nir_intrinsic_image_deref_load:
+ case nir_intrinsic_image_deref_store:
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_image_deref_atomic_min:
+ case nir_intrinsic_image_deref_atomic_max:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_image_deref_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ v3d40_vir_emit_image_load_store(c, instr);
+ break;
+
case nir_intrinsic_get_buffer_size:
ntq_store_dest(c, &instr->dest, 0,
vir_uniform(c, QUNIFORM_GET_BUFFER_SIZE,
@@ -1807,6 +1846,10 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
offset + instr->num_components);
break;
+ case nir_intrinsic_image_deref_size:
+ ntq_emit_image_size(c, instr);
+ break;
+
case nir_intrinsic_discard:
if (c->execute.file != QFILE_NULL) {
vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
@@ -1846,6 +1889,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
case nir_intrinsic_memory_barrier:
case nir_intrinsic_memory_barrier_atomic_counter:
case nir_intrinsic_memory_barrier_buffer:
+ case nir_intrinsic_memory_barrier_image:
/* We don't do any instruction scheduling of these NIR
* instructions between each other, so we just need to make
* sure that the TMU operations before the barrier are flushed
@@ -2066,6 +2110,10 @@ static void
ntq_emit_instr(struct v3d_compile *c, nir_instr *instr)
{
switch (instr->type) {
+ case nir_instr_type_deref:
+ /* ignored, will be walked by the intrinsic using it. */
+ break;
+
case nir_instr_type_alu:
ntq_emit_alu(c, nir_instr_as_alu(instr));
break;
diff --git a/src/broadcom/compiler/v3d40_tex.c b/src/broadcom/compiler/v3d40_tex.c
index 7cac6d5ca41..c547e0a850b 100644
--- a/src/broadcom/compiler/v3d40_tex.c
+++ b/src/broadcom/compiler/v3d40_tex.c
@@ -22,6 +22,7 @@
*/
#include "v3d_compiler.h"
+#include "nir_deref.h"
/* We don't do any address packing. */
#define __gen_user_data void
@@ -51,14 +52,19 @@ vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data
inst->src[0] = vir_uniform(c, contents, data);
}
+static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default = {
+ .per_pixel_mask_enable = true,
+};
+
+static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
+ .op = V3D_TMU_OP_REGULAR,
+};
+
void
v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
{
unsigned unit = instr->texture_index;
int tmu_writes = 0;
- static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
- .op = V3D_TMU_OP_REGULAR,
- };
struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
};
@@ -229,3 +235,173 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
}
}
+
+static void
+type_size_align_1(const struct glsl_type *type, unsigned *size, unsigned *align)
+{
+ *size = 1;
+ *align = 1;
+}
+
+void
+v3d40_vir_emit_image_load_store(struct v3d_compile *c,
+ nir_intrinsic_instr *instr)
+{
+ nir_variable *var = nir_intrinsic_get_var(instr, 0);
+ const struct glsl_type *sampler_type = glsl_without_array(var->type);
+ unsigned unit = (var->data.driver_location +
+ nir_deref_instr_get_const_offset(nir_src_as_deref(instr->src[0]),
+ type_size_align_1));
+ int tmu_writes = 0;
+
+ struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
+ };
+
+ struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
+ .per_pixel_mask_enable = true,
+ .output_type_32_bit = v3d_gl_format_is_return_32(var->data.image.format),
+ };
+
+ struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 };
+
+ /* XXX perf: We should turn add/sub of 1 to inc/dec. Perhaps NIR
+ * wants to have support for inc/dec?
+ */
+ switch (instr->intrinsic) {
+ case nir_intrinsic_image_deref_load:
+ case nir_intrinsic_image_deref_store:
+ p2_unpacked.op = V3D_TMU_OP_REGULAR;
+ break;
+ case nir_intrinsic_image_deref_atomic_add:
+ p2_unpacked.op = V3D_TMU_OP_WRITE_ADD_READ_PREFETCH;
+ break;
+ case nir_intrinsic_image_deref_atomic_min:
+ p2_unpacked.op = V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
+ break;
+
+ case nir_intrinsic_image_deref_atomic_max:
+ p2_unpacked.op = V3D_TMU_OP_WRITE_UMAX;
+ break;
+ case nir_intrinsic_image_deref_atomic_and:
+ p2_unpacked.op = V3D_TMU_OP_WRITE_AND_READ_INC;
+ break;
+ case nir_intrinsic_image_deref_atomic_or:
+ p2_unpacked.op = V3D_TMU_OP_WRITE_OR_READ_DEC;
+ break;
+ case nir_intrinsic_image_deref_atomic_xor:
+ p2_unpacked.op = V3D_TMU_OP_WRITE_XOR_READ_NOT;
+ break;
+ case nir_intrinsic_image_deref_atomic_exchange:
+ p2_unpacked.op = V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
+ break;
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ p2_unpacked.op = V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
+ break;
+ default:
+ unreachable("unknown image intrinsic");
+ };
+
+ bool is_1d = false;
+ switch (glsl_get_sampler_dim(sampler_type)) {
+ case GLSL_SAMPLER_DIM_1D:
+ is_1d = true;
+ break;
+ case GLSL_SAMPLER_DIM_BUF:
+ break;
+ case GLSL_SAMPLER_DIM_2D:
+ case GLSL_SAMPLER_DIM_RECT:
+ vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
+ ntq_get_src(c, instr->src[1], 1), &tmu_writes);
+ break;
+ case GLSL_SAMPLER_DIM_3D:
+ case GLSL_SAMPLER_DIM_CUBE:
+ vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
+ ntq_get_src(c, instr->src[1], 1), &tmu_writes);
+ vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
+ ntq_get_src(c, instr->src[1], 2), &tmu_writes);
+ break;
+ default:
+ unreachable("bad image sampler dim");
+ }
+
+ if (glsl_sampler_type_is_array(sampler_type)) {
+ vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
+ ntq_get_src(c, instr->src[1],
+ is_1d ? 1 : 2), &tmu_writes);
+ }
+
+ /* Limit the number of channels returned to both how many the NIR
+ * instruction writes and how many the instruction could produce.
+ */
+ uint32_t instr_return_channels = nir_intrinsic_dest_components(instr);
+ if (!p1_unpacked.output_type_32_bit)
+ instr_return_channels = (instr_return_channels + 1) / 2;
+
+ p0_unpacked.return_words_of_texture_data =
+ (1 << instr_return_channels) - 1;
+
+ uint32_t p0_packed;
+ V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
+ (uint8_t *)&p0_packed,
+ &p0_unpacked);
+
+ uint32_t p1_packed;
+ V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
+ (uint8_t *)&p1_packed,
+ &p1_unpacked);
+
+ uint32_t p2_packed;
+ V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
+ (uint8_t *)&p2_packed,
+ &p2_unpacked);
+
+ /* Load unit number into the high bits of the texture or sampler
+ * address field, which will be be used by the driver to decide which
+ * texture to put in the actual address field.
+ */
+ p0_packed |= unit << 24;
+
+ vir_WRTMUC(c, QUNIFORM_IMAGE_TMU_CONFIG_P0, p0_packed);
+ if (memcmp(&p1_unpacked, &p1_unpacked_default, sizeof(p1_unpacked)) != 0)
+ vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);
+ if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0)
+ vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
+
+ /* Emit the data writes for atomics or image store. */
+ if (instr->intrinsic != nir_intrinsic_image_deref_load) {
+ /* Vector for stores, or first atomic argument */
+ struct qreg src[4];
+ for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {
+ src[i] = ntq_get_src(c, instr->src[3], i);
+ vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD, src[i],
+ &tmu_writes);
+ }
+
+ /* Second atomic argument */
+ if (instr->intrinsic ==
+ nir_intrinsic_image_deref_atomic_comp_swap) {
+ vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD,
+ ntq_get_src(c, instr->src[4], 0),
+ &tmu_writes);
+ }
+ }
+
+ vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, ntq_get_src(c, instr->src[1], 0),
+ &tmu_writes);
+
+ vir_emit_thrsw(c);
+
+ /* The input FIFO has 16 slots across all threads, so make sure we
+ * don't overfill our allocation.
+ */
+ while (tmu_writes > 16 / c->threads)
+ c->threads /= 2;
+
+ for (int i = 0; i < 4; i++) {
+ if (p0_unpacked.return_words_of_texture_data & (1 << i))
+ ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
+ }
+
+ if (nir_intrinsic_dest_components(instr) == 0)
+ vir_TMUWT(c);
+}
diff --git a/src/broadcom/compiler/v3d_compiler.h b/src/broadcom/compiler/v3d_compiler.h
index a35a46c3316..a5f99d3dae8 100644
--- a/src/broadcom/compiler/v3d_compiler.h
+++ b/src/broadcom/compiler/v3d_compiler.h
@@ -230,6 +230,8 @@ enum quniform_contents {
QUNIFORM_TMU_CONFIG_P0,
QUNIFORM_TMU_CONFIG_P1,
+ QUNIFORM_IMAGE_TMU_CONFIG_P0,
+
QUNIFORM_TEXTURE_FIRST_LEVEL,
QUNIFORM_TEXTURE_WIDTH,
@@ -249,6 +251,12 @@ enum quniform_contents {
/* Returns the size of the SSBO given by the data value. */
QUNIFORM_GET_BUFFER_SIZE,
+ /* Sizes (in pixels) of a shader image given by the data value. */
+ QUNIFORM_IMAGE_WIDTH,
+ QUNIFORM_IMAGE_HEIGHT,
+ QUNIFORM_IMAGE_DEPTH,
+ QUNIFORM_IMAGE_ARRAY_SIZE,
+
QUNIFORM_ALPHA_REF,
/**
@@ -792,12 +800,15 @@ bool vir_opt_vpm(struct v3d_compile *c);
void v3d_nir_lower_blend(nir_shader *s, struct v3d_compile *c);
void v3d_nir_lower_io(nir_shader *s, struct v3d_compile *c);
void v3d_nir_lower_txf_ms(nir_shader *s, struct v3d_compile *c);
+void v3d_nir_lower_image_load_store(nir_shader *s);
void vir_lower_uniforms(struct v3d_compile *c);
void v3d33_vir_vpm_read_setup(struct v3d_compile *c, int num_components);
void v3d33_vir_vpm_write_setup(struct v3d_compile *c);
void v3d33_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr);
void v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr);
+void v3d40_vir_emit_image_load_store(struct v3d_compile *c,
+ nir_intrinsic_instr *instr);
void v3d_vir_to_qpu(struct v3d_compile *c, struct qpu_reg *temp_registers);
uint32_t v3d_qpu_schedule_instructions(struct v3d_compile *c);
@@ -805,6 +816,8 @@ void qpu_validate(struct v3d_compile *c);
struct qpu_reg *v3d_register_allocate(struct v3d_compile *c, bool *spilled);
bool vir_init_reg_sets(struct v3d_compiler *compiler);
+bool v3d_gl_format_is_return_32(GLenum format);
+
void vir_PF(struct v3d_compile *c, struct qreg src, enum v3d_qpu_pf pf);
static inline bool
diff --git a/src/broadcom/compiler/v3d_nir_lower_image_load_store.c b/src/broadcom/compiler/v3d_nir_lower_image_load_store.c
new file mode 100644
index 00000000000..e74206b3949
--- /dev/null
+++ b/src/broadcom/compiler/v3d_nir_lower_image_load_store.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright © 2018 Intel Corporation
+ * Copyright © 2018 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "v3d_compiler.h"
+#include "compiler/nir/nir_builder.h"
+#include "compiler/nir/nir_format_convert.h"
+
+/** @file v3d_nir_lower_image_load_store.c
+ *
+ * Performs any necessary lowering of GL_ARB_shader_image_load_store
+ * operations.
+ *
+ * On V3D 4.x, we just need to do format conversion for stores such that the
+ * GPU can effectively memcpy the arguments (in increments of 32-bit words)
+ * into the texel. Loads are the same as texturing, where we may need to
+ * unpack from 16-bit ints or floats.
+ *
+ * On V3D 3.x, to implement image load store we would need to do manual tiling
+ * calculations and load/store using the TMU general memory access path.
+ */
+
+bool
+v3d_gl_format_is_return_32(GLenum format)
+{
+ switch (format) {
+ case GL_R8:
+ case GL_R8_SNORM:
+ case GL_R8UI:
+ case GL_R8I:
+ case GL_RG8:
+ case GL_RG8_SNORM:
+ case GL_RG8UI:
+ case GL_RG8I:
+ case GL_RGBA8:
+ case GL_RGBA8_SNORM:
+ case GL_RGBA8UI:
+ case GL_RGBA8I:
+ case GL_R11F_G11F_B10F:
+ case GL_RGB10_A2:
+ case GL_RGB10_A2UI:
+ case GL_R16F:
+ case GL_R16UI:
+ case GL_R16I:
+ case GL_RG16F:
+ case GL_RG16UI:
+ case GL_RG16I:
+ case GL_RGBA16F:
+ case GL_RGBA16UI:
+ case GL_RGBA16I:
+ return false;
+ case GL_R16:
+ case GL_R16_SNORM:
+ case GL_RG16:
+ case GL_RG16_SNORM:
+ case GL_RGBA16:
+ case GL_RGBA16_SNORM:
+ case GL_R32F:
+ case GL_R32UI:
+ case GL_R32I:
+ case GL_RG32F:
+ case GL_RG32UI:
+ case GL_RG32I:
+ case GL_RGBA32F:
+ case GL_RGBA32UI:
+ case GL_RGBA32I:
+ return true;
+ default:
+ unreachable("Invalid image format");
+ }
+}
+
+/* Packs a 32-bit vector of colors in the range [0, (1 << bits[i]) - 1] to a
+ * 32-bit SSA value, with as many channels as necessary to store all the bits
+ */
+static nir_ssa_def *
+pack_bits(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
+ int num_components, bool mask)
+{
+ nir_ssa_def *results[4];
+ int offset = 0;
+ for (int i = 0; i < num_components; i++) {
+ nir_ssa_def *chan = nir_channel(b, color, i);
+
+ /* Channels being stored shouldn't cross a 32-bit boundary. */
+ assert((offset & ~31) == ((offset + bits[i] - 1) & ~31));
+
+ if (mask) {
+ chan = nir_iand(b, chan,
+ nir_imm_int(b, (1 << bits[i]) - 1));
+ }
+
+ if (offset % 32 == 0) {
+ results[offset / 32] = chan;
+ } else {
+ results[offset / 32] =
+ nir_ior(b, results[offset / 32],
+ nir_ishl(b, chan,
+ nir_imm_int(b, offset % 32)));
+ }
+ offset += bits[i];
+ }
+
+ return nir_vec(b, results, DIV_ROUND_UP(offset, 32));
+}
+
+static nir_ssa_def *
+pack_unorm(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
+ int num_components)
+{
+ color = nir_channels(b, color, (1 << num_components) - 1);
+ color = nir_format_float_to_unorm(b, color, bits);
+ return pack_bits(b, color, bits, color->num_components, false);
+}
+
+static nir_ssa_def *
+pack_snorm(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
+ int num_components)
+{
+ color = nir_channels(b, color, (1 << num_components) - 1);
+ color = nir_format_float_to_snorm(b, color, bits);
+ return pack_bits(b, color, bits, color->num_components, true);
+}
+
+static nir_ssa_def *
+pack_uint(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
+ int num_components)
+{
+ color = nir_channels(b, color, (1 << num_components) - 1);
+ color = nir_format_clamp_uint(b, color, bits);
+ return pack_bits(b, color, bits, num_components, false);
+}
+
+static nir_ssa_def *
+pack_sint(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
+ int num_components)
+{
+ color = nir_channels(b, color, (1 << num_components) - 1);
+ color = nir_format_clamp_uint(b, color, bits);
+ return pack_bits(b, color, bits, num_components, true);
+}
+
+static nir_ssa_def *
+pack_half(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
+ int num_components)
+{
+ color = nir_channels(b, color, (1 << num_components) - 1);
+ color = nir_format_float_to_half(b, color);
+ return pack_bits(b, color, bits, color->num_components, false);
+}
+
+static void
+v3d_nir_lower_image_store(nir_builder *b, nir_intrinsic_instr *instr)
+{
+ nir_variable *var = nir_intrinsic_get_var(instr, 0);
+ GLenum format = var->data.image.format;
+ static const unsigned bits_8[4] = {8, 8, 8, 8};
+ static const unsigned bits_16[4] = {16, 16, 16, 16};
+ static const unsigned bits_1010102[4] = {10, 10, 10, 2};
+
+ b->cursor = nir_before_instr(&instr->instr);
+
+ nir_ssa_def *unformatted = nir_ssa_for_src(b, instr->src[3], 4);
+ nir_ssa_def *formatted = NULL;
+ switch (format) {
+ case GL_RGBA32F:
+ case GL_RGBA32UI:
+ case GL_RGBA32I:
+ /* For 4-component 32-bit components, there's no packing to be
+ * done.
+ */
+ return;
+
+ case GL_R32F:
+ case GL_R32UI:
+ case GL_R32I:
+ /* For other 32-bit components, just reduce the size of
+ * the input vector.
+ */
+ formatted = nir_channels(b, unformatted, 1);
+ break;
+ case GL_RG32F:
+ case GL_RG32UI:
+ case GL_RG32I:
+ formatted = nir_channels(b, unformatted, 2);
+ break;
+
+ case GL_R8:
+ formatted = pack_unorm(b, unformatted, bits_8, 1);
+ break;
+ case GL_RG8:
+ formatted = pack_unorm(b, unformatted, bits_8, 2);
+ break;
+ case GL_RGBA8:
+ formatted = pack_unorm(b, unformatted, bits_8, 4);
+ break;
+
+ case GL_R8_SNORM:
+ formatted = pack_snorm(b, unformatted, bits_8, 1);
+ break;
+ case GL_RG8_SNORM:
+ formatted = pack_snorm(b, unformatted, bits_8, 2);
+ break;
+ case GL_RGBA8_SNORM:
+ formatted = pack_snorm(b, unformatted, bits_8, 4);
+ break;
+
+ case GL_R16:
+ formatted = pack_unorm(b, unformatted, bits_16, 1);
+ break;
+ case GL_RG16:
+ formatted = pack_unorm(b, unformatted, bits_16, 2);
+ break;
+ case GL_RGBA16:
+ formatted = pack_unorm(b, unformatted, bits_16, 4);
+ break;
+
+ case GL_R16_SNORM:
+ formatted = pack_snorm(b, unformatted, bits_16, 1);
+ break;
+ case GL_RG16_SNORM:
+ formatted = pack_snorm(b, unformatted, bits_16, 2);
+ break;
+ case GL_RGBA16_SNORM:
+ formatted = pack_snorm(b, unformatted, bits_16, 4);
+ break;
+
+ case GL_R16F:
+ formatted = pack_half(b, unformatted, bits_16, 1);
+ break;
+ case GL_RG16F:
+ formatted = pack_half(b, unformatted, bits_16, 2);
+ break;
+ case GL_RGBA16F:
+ formatted = pack_half(b, unformatted, bits_16, 4);
+ break;
+
+ case GL_R8UI:
+ formatted = pack_uint(b, unformatted, bits_8, 1);
+ break;
+ case GL_R8I:
+ formatted = pack_sint(b, unformatted, bits_8, 1);
+ break;
+ case GL_RG8UI:
+ formatted = pack_uint(b, unformatted, bits_8, 2);
+ break;
+ case GL_RG8I:
+ formatted = pack_sint(b, unformatted, bits_8, 2);
+ break;
+ case GL_RGBA8UI:
+ formatted = pack_uint(b, unformatted, bits_8, 4);
+ break;
+ case GL_RGBA8I:
+ formatted = pack_sint(b, unformatted, bits_8, 4);
+ break;
+
+ case GL_R16UI:
+ formatted = pack_uint(b, unformatted, bits_16, 1);
+ break;
+ case GL_R16I:
+ formatted = pack_sint(b, unformatted, bits_16, 1);
+ break;
+ case GL_RG16UI:
+ formatted = pack_uint(b, unformatted, bits_16, 2);
+ break;
+ case GL_RG16I:
+ formatted = pack_sint(b, unformatted, bits_16, 2);
+ break;
+ case GL_RGBA16UI:
+ formatted = pack_uint(b, unformatted, bits_16, 4);
+ break;
+ case GL_RGBA16I:
+ formatted = pack_sint(b, unformatted, bits_16, 4);
+ break;
+
+ case GL_R11F_G11F_B10F:
+ formatted = nir_format_pack_11f11f10f(b, unformatted);
+ break;
+ case GL_RGB9_E5:
+ formatted = nir_format_pack_r9g9b9e5(b, unformatted);
+ break;
+
+ case GL_RGB10_A2:
+ formatted = pack_unorm(b, unformatted, bits_1010102, 4);
+ break;
+
+ case GL_RGB10_A2UI:
+ formatted = pack_uint(b, unformatted, bits_1010102, 4);
+ break;
+
+ default:
+ unreachable("bad format");
+ }
+
+ nir_instr_rewrite_src(&instr->instr, &instr->src[3],
+ nir_src_for_ssa(formatted));
+ instr->num_components = formatted->num_components;
+}
+
+static void
+v3d_nir_lower_image_load(nir_builder *b, nir_intrinsic_instr *instr)
+{
+ static const unsigned bits16[] = {16, 16, 16, 16};
+ nir_variable *var = nir_intrinsic_get_var(instr, 0);
+ const struct glsl_type *sampler_type = glsl_without_array(var->type);
+ enum glsl_base_type base_type =
+ glsl_get_sampler_result_type(sampler_type);
+
+ if (v3d_gl_format_is_return_32(var->data.image.format))
+ return;
+
+ b->cursor = nir_after_instr(&instr->instr);
+
+ assert(instr->dest.is_ssa);
+ nir_ssa_def *result = &instr->dest.ssa;
+ if (base_type == GLSL_TYPE_FLOAT) {
+ nir_ssa_def *rg = nir_channel(b, result, 0);
+ nir_ssa_def *ba = nir_channel(b, result, 1);
+ result = nir_vec4(b,
+ nir_unpack_half_2x16_split_x(b, rg),
+ nir_unpack_half_2x16_split_y(b, rg),
+ nir_unpack_half_2x16_split_x(b, ba),
+ nir_unpack_half_2x16_split_y(b, ba));
+ } else if (base_type == GLSL_TYPE_INT) {
+ result = nir_format_unpack_sint(b, result, bits16, 4);
+ } else {
+ assert(base_type == GLSL_TYPE_UINT);
+ result = nir_format_unpack_uint(b, result, bits16, 4);
+ }
+
+ nir_ssa_def_rewrite_uses_after(&instr->dest.ssa, nir_src_for_ssa(result),
+ result->parent_instr);
+}
+
+void
+v3d_nir_lower_image_load_store(nir_shader *s)
+{
+ nir_foreach_function(function, s) {
+ if (!function->impl)
+ continue;
+
+ nir_builder b;
+ nir_builder_init(&b, function->impl);
+
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr_safe(instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intr =
+ nir_instr_as_intrinsic(instr);
+
+ switch (intr->intrinsic) {
+ case nir_intrinsic_image_deref_load:
+ v3d_nir_lower_image_load(&b, intr);
+ break;
+ case nir_intrinsic_image_deref_store:
+ v3d_nir_lower_image_store(&b, intr);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ nir_metadata_preserve(function->impl,
+ nir_metadata_block_index |
+ nir_metadata_dominance);
+ }
+}
diff --git a/src/broadcom/compiler/vir.c b/src/broadcom/compiler/vir.c
index ae7b362f3ba..55a02123322 100644
--- a/src/broadcom/compiler/vir.c
+++ b/src/broadcom/compiler/vir.c
@@ -976,6 +976,7 @@ uint64_t *v3d_compile(const struct v3d_compiler *compiler,
NIR_PASS_V(c->s, v3d_nir_lower_io, c);
NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c);
+ NIR_PASS_V(c->s, v3d_nir_lower_image_load_store);
NIR_PASS_V(c->s, nir_lower_idiv);
v3d_optimize_nir(c->s);
diff --git a/src/broadcom/compiler/vir_dump.c b/src/broadcom/compiler/vir_dump.c
index 5bef6c6a42d..028e2b36c4b 100644
--- a/src/broadcom/compiler/vir_dump.c
+++ b/src/broadcom/compiler/vir_dump.c
@@ -61,6 +61,12 @@ vir_dump_uniform(enum quniform_contents contents,
v3d_tmu_config_data_get_value(data));
break;
+ case QUNIFORM_IMAGE_TMU_CONFIG_P0:
+ fprintf(stderr, "img[%d].p0 | 0x%x",
+ v3d_tmu_config_data_get_unit(data),
+ v3d_tmu_config_data_get_value(data));
+ break;
+
case QUNIFORM_TEXTURE_WIDTH:
fprintf(stderr, "tex[%d].width", data);
break;
@@ -77,6 +83,19 @@ vir_dump_uniform(enum quniform_contents contents,
fprintf(stderr, "tex[%d].levels", data);
break;
+ case QUNIFORM_IMAGE_WIDTH:
+ fprintf(stderr, "img[%d].width", data);
+ break;
+ case QUNIFORM_IMAGE_HEIGHT:
+ fprintf(stderr, "img[%d].height", data);
+ break;
+ case QUNIFORM_IMAGE_DEPTH:
+ fprintf(stderr, "img[%d].depth", data);
+ break;
+ case QUNIFORM_IMAGE_ARRAY_SIZE:
+ fprintf(stderr, "img[%d].array_size", data);
+ break;
+
case QUNIFORM_UBO_ADDR:
fprintf(stderr, "ubo[%d]", data);
break;