diff options
Diffstat (limited to 'src/freedreno')
-rw-r--r-- | src/freedreno/ir3/ir3_a4xx.c | 49 | ||||
-rw-r--r-- | src/freedreno/ir3/ir3_a6xx.c | 41 | ||||
-rw-r--r-- | src/freedreno/ir3/ir3_compiler_nir.c | 28 | ||||
-rw-r--r-- | src/freedreno/ir3/ir3_nir.c | 1 |
4 files changed, 59 insertions, 60 deletions
diff --git a/src/freedreno/ir3/ir3_a4xx.c b/src/freedreno/ir3/ir3_a4xx.c index 1f86cd5533c..609ecf00c1e 100644 --- a/src/freedreno/ir3/ir3_a4xx.c +++ b/src/freedreno/ir3/ir3_a4xx.c @@ -40,7 +40,7 @@ emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr, struct ir3_instruction **dst) { struct ir3_block *b = ctx->block; - struct ir3_instruction *ldgb, *src0, *src1, *offset; + struct ir3_instruction *ldgb, *src0, *src1, *byte_offset, *offset; nir_const_value *const_offset; /* can this be non-const buffer_index? how do we handle that? */ @@ -49,14 +49,15 @@ emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr, int ibo_idx = ir3_ssbo_to_ibo(&ctx->so->image_mapping, const_offset->u32[0]); - offset = ir3_get_src(ctx, &intr->src[1])[0]; + byte_offset = ir3_get_src(ctx, &intr->src[1])[0]; + offset = ir3_get_src(ctx, &intr->src[2])[0]; /* src0 is uvec2(offset*4, 0), src1 is offset.. nir already *= 4: */ src0 = ir3_create_collect(ctx, (struct ir3_instruction*[]){ - offset, + byte_offset, create_immed(b, 0), }, 2); - src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0); + src1 = offset; ldgb = ir3_LDGB(b, create_immed(b, ibo_idx), 0, src0, 0, src1, 0); @@ -75,7 +76,7 @@ static void emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr) { struct ir3_block *b = ctx->block; - struct ir3_instruction *stgb, *src0, *src1, *src2, *offset; + struct ir3_instruction *stgb, *src0, *src1, *src2, *byte_offset, *offset; nir_const_value *const_offset; /* TODO handle wrmask properly, see _store_shared().. but I think * it is more a PITA than that, since blob ends up loading the @@ -90,15 +91,16 @@ emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr) int ibo_idx = ir3_ssbo_to_ibo(&ctx->so->image_mapping, const_offset->u32[0]); - offset = ir3_get_src(ctx, &intr->src[2])[0]; + byte_offset = ir3_get_src(ctx, &intr->src[2])[0]; + offset = ir3_get_src(ctx, &intr->src[3])[0]; /* src0 is value, src1 is offset, src2 is uvec2(offset*4, 0).. * nir already *= 4: */ src0 = ir3_create_collect(ctx, ir3_get_src(ctx, &intr->src[0]), ncomp); - src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0); + src1 = offset; src2 = ir3_create_collect(ctx, (struct ir3_instruction*[]){ - offset, + byte_offset, create_immed(b, 0), }, 2); @@ -133,7 +135,8 @@ static struct ir3_instruction * emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr) { struct ir3_block *b = ctx->block; - struct ir3_instruction *atomic, *ssbo, *src0, *src1, *src2, *offset; + struct ir3_instruction *atomic, *ssbo, *src0, *src1, *src2, *byte_offset, + *offset; nir_const_value *const_offset; type_t type = TYPE_U32; @@ -144,7 +147,8 @@ emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr) int ibo_idx = ir3_ssbo_to_ibo(&ctx->so->image_mapping, const_offset->u32[0]); ssbo = create_immed(b, ibo_idx); - offset = ir3_get_src(ctx, &intr->src[1])[0]; + byte_offset = ir3_get_src(ctx, &intr->src[1])[0]; + offset = ir3_get_src(ctx, &intr->src[3])[0]; /* src0 is data (or uvec2(data, compare)) * src1 is offset @@ -153,48 +157,49 @@ emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr) * Note that nir already multiplies the offset by four */ src0 = ir3_get_src(ctx, &intr->src[2])[0]; - src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0); + src1 = offset; src2 = ir3_create_collect(ctx, (struct ir3_instruction*[]){ - offset, + byte_offset, create_immed(b, 0), }, 2); switch (intr->intrinsic) { - case nir_intrinsic_ssbo_atomic_add: + case nir_intrinsic_ssbo_atomic_add_ir3: atomic = ir3_ATOMIC_ADD_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0); break; - case nir_intrinsic_ssbo_atomic_imin: + case nir_intrinsic_ssbo_atomic_imin_ir3: atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0); type = TYPE_S32; break; - case nir_intrinsic_ssbo_atomic_umin: + case nir_intrinsic_ssbo_atomic_umin_ir3: atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0); break; - case nir_intrinsic_ssbo_atomic_imax: + case nir_intrinsic_ssbo_atomic_imax_ir3: atomic = ir3_ATOMIC_MAX_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0); type = TYPE_S32; break; - case nir_intrinsic_ssbo_atomic_umax: + case nir_intrinsic_ssbo_atomic_umax_ir3: atomic = ir3_ATOMIC_MAX_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0); break; - case nir_intrinsic_ssbo_atomic_and: + case nir_intrinsic_ssbo_atomic_and_ir3: atomic = ir3_ATOMIC_AND_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0); break; - case nir_intrinsic_ssbo_atomic_or: + case nir_intrinsic_ssbo_atomic_or_ir3: atomic = ir3_ATOMIC_OR_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0); break; - case nir_intrinsic_ssbo_atomic_xor: + case nir_intrinsic_ssbo_atomic_xor_ir3: atomic = ir3_ATOMIC_XOR_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0); break; - case nir_intrinsic_ssbo_atomic_exchange: + case nir_intrinsic_ssbo_atomic_exchange_ir3: atomic = ir3_ATOMIC_XCHG_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0); break; - case nir_intrinsic_ssbo_atomic_comp_swap: + case nir_intrinsic_ssbo_atomic_comp_swap_ir3: /* for cmpxchg, src0 is [ui]vec2(data, compare): */ src0 = ir3_create_collect(ctx, (struct ir3_instruction*[]){ ir3_get_src(ctx, &intr->src[3])[0], src0, }, 2); + src1 = ir3_get_src(ctx, &intr->src[4])[0]; atomic = ir3_ATOMIC_CMPXCHG_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0); break; default: diff --git a/src/freedreno/ir3/ir3_a6xx.c b/src/freedreno/ir3/ir3_a6xx.c index 00260a4c534..048b84c3370 100644 --- a/src/freedreno/ir3/ir3_a6xx.c +++ b/src/freedreno/ir3/ir3_a6xx.c @@ -38,17 +38,6 @@ */ -static struct ir3_instruction * -ssbo_offset(struct ir3_block *b, struct ir3_instruction *byte_offset) -{ - /* TODO hardware wants offset in terms of elements, not bytes. Which - * is kinda nice but opposite of what nir does. It would be nice if - * we had a way to request the units of the offset to avoid the extra - * shift instructions.. - */ - return ir3_SHR_B(b, byte_offset, 0, create_immed(b, 2), 0); -} - /* src[] = { buffer_index, offset }. No const_index */ static void emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr, @@ -65,7 +54,7 @@ emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr, int ibo_idx = ir3_ssbo_to_ibo(&ctx->so->image_mapping, buffer_index->u32[0]); - offset = ssbo_offset(b, ir3_get_src(ctx, &intr->src[1])[0]); + offset = ir3_get_src(ctx, &intr->src[2])[0]; ldib = ir3_LDIB(b, create_immed(b, ibo_idx), 0, offset, 0); ldib->regs[0]->wrmask = MASK(intr->num_components); @@ -101,7 +90,7 @@ emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr) /* src0 is offset, src1 is value: */ val = ir3_create_collect(ctx, ir3_get_src(ctx, &intr->src[0]), ncomp); - offset = ssbo_offset(b, ir3_get_src(ctx, &intr->src[2])[0]); + offset = ir3_get_src(ctx, &intr->src[3])[0]; stib = ir3_STIB(b, create_immed(b, ibo_idx), 0, offset, 0, val, 0); stib->cat6.iim_val = ncomp; @@ -134,7 +123,7 @@ static struct ir3_instruction * emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr) { struct ir3_block *b = ctx->block; - struct ir3_instruction *atomic, *ibo, *src0, *src1, *offset, *data, *dummy; + struct ir3_instruction *atomic, *ibo, *src0, *src1, *data, *dummy; nir_const_value *buffer_index; type_t type = TYPE_U32; @@ -145,7 +134,6 @@ emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr) int ibo_idx = ir3_ssbo_to_ibo(&ctx->so->image_mapping, buffer_index->u32[0]); ibo = create_immed(b, ibo_idx); - offset = ir3_get_src(ctx, &intr->src[1])[0]; data = ir3_get_src(ctx, &intr->src[2])[0]; /* So this gets a bit creative: @@ -163,50 +151,51 @@ emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr) * Note that nir already multiplies the offset by four */ dummy = create_immed(b, 0); - src0 = ssbo_offset(b, offset); if (intr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap) { + src0 = ir3_get_src(ctx, &intr->src[4])[0]; struct ir3_instruction *compare = ir3_get_src(ctx, &intr->src[3])[0]; src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){ dummy, compare, data }, 3); } else { + src0 = ir3_get_src(ctx, &intr->src[3])[0]; src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){ dummy, data }, 2); } switch (intr->intrinsic) { - case nir_intrinsic_ssbo_atomic_add: + case nir_intrinsic_ssbo_atomic_add_ir3: atomic = ir3_ATOMIC_ADD_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_ssbo_atomic_imin: + case nir_intrinsic_ssbo_atomic_imin_ir3: atomic = ir3_ATOMIC_MIN_G(b, ibo, 0, src0, 0, src1, 0); type = TYPE_S32; break; - case nir_intrinsic_ssbo_atomic_umin: + case nir_intrinsic_ssbo_atomic_umin_ir3: atomic = ir3_ATOMIC_MIN_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_ssbo_atomic_imax: + case nir_intrinsic_ssbo_atomic_imax_ir3: atomic = ir3_ATOMIC_MAX_G(b, ibo, 0, src0, 0, src1, 0); type = TYPE_S32; break; - case nir_intrinsic_ssbo_atomic_umax: + case nir_intrinsic_ssbo_atomic_umax_ir3: atomic = ir3_ATOMIC_MAX_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_ssbo_atomic_and: + case nir_intrinsic_ssbo_atomic_and_ir3: atomic = ir3_ATOMIC_AND_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_ssbo_atomic_or: + case nir_intrinsic_ssbo_atomic_or_ir3: atomic = ir3_ATOMIC_OR_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_ssbo_atomic_xor: + case nir_intrinsic_ssbo_atomic_xor_ir3: atomic = ir3_ATOMIC_XOR_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_ssbo_atomic_exchange: + case nir_intrinsic_ssbo_atomic_exchange_ir3: atomic = ir3_ATOMIC_XCHG_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_ssbo_atomic_comp_swap: + case nir_intrinsic_ssbo_atomic_comp_swap_ir3: atomic = ir3_ATOMIC_CMPXCHG_G(b, ibo, 0, src0, 0, src1, 0); break; default: diff --git a/src/freedreno/ir3/ir3_compiler_nir.c b/src/freedreno/ir3/ir3_compiler_nir.c index 1e3fbeb3117..72549fef70d 100644 --- a/src/freedreno/ir3/ir3_compiler_nir.c +++ b/src/freedreno/ir3/ir3_compiler_nir.c @@ -1156,25 +1156,29 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) } } break; - case nir_intrinsic_load_ssbo: + /* All SSBO intrinsics should have been lowered by 'lower_io_offsets' + * pass and replaced by an ir3-specifc version that adds the + * dword-offset in the last source. + */ + case nir_intrinsic_load_ssbo_ir3: ctx->funcs->emit_intrinsic_load_ssbo(ctx, intr, dst); break; - case nir_intrinsic_store_ssbo: + case nir_intrinsic_store_ssbo_ir3: ctx->funcs->emit_intrinsic_store_ssbo(ctx, intr); break; case nir_intrinsic_get_buffer_size: emit_intrinsic_ssbo_size(ctx, intr, dst); break; - case nir_intrinsic_ssbo_atomic_add: - case nir_intrinsic_ssbo_atomic_imin: - case nir_intrinsic_ssbo_atomic_umin: - case nir_intrinsic_ssbo_atomic_imax: - case nir_intrinsic_ssbo_atomic_umax: - case nir_intrinsic_ssbo_atomic_and: - case nir_intrinsic_ssbo_atomic_or: - case nir_intrinsic_ssbo_atomic_xor: - case nir_intrinsic_ssbo_atomic_exchange: - case nir_intrinsic_ssbo_atomic_comp_swap: + case nir_intrinsic_ssbo_atomic_add_ir3: + case nir_intrinsic_ssbo_atomic_imin_ir3: + case nir_intrinsic_ssbo_atomic_umin_ir3: + case nir_intrinsic_ssbo_atomic_imax_ir3: + case nir_intrinsic_ssbo_atomic_umax_ir3: + case nir_intrinsic_ssbo_atomic_and_ir3: + case nir_intrinsic_ssbo_atomic_or_ir3: + case nir_intrinsic_ssbo_atomic_xor_ir3: + case nir_intrinsic_ssbo_atomic_exchange_ir3: + case nir_intrinsic_ssbo_atomic_comp_swap_ir3: dst[0] = ctx->funcs->emit_intrinsic_atomic_ssbo(ctx, intr); break; case nir_intrinsic_load_shared: diff --git a/src/freedreno/ir3/ir3_nir.c b/src/freedreno/ir3/ir3_nir.c index 57595e00306..138f8f1af66 100644 --- a/src/freedreno/ir3/ir3_nir.c +++ b/src/freedreno/ir3/ir3_nir.c @@ -188,6 +188,7 @@ ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s, OPT_V(s, nir_opt_global_to_local); OPT_V(s, nir_lower_regs_to_ssa); + OPT_V(s, ir3_nir_lower_io_offsets); if (key) { if (s->info.stage == MESA_SHADER_VERTEX) { |