diff options
author | Marek Olšák <[email protected]> | 2019-12-30 14:23:16 -0500 |
---|---|---|
committer | Marek Olšák <[email protected]> | 2020-01-20 16:16:11 -0500 |
commit | 8db00a51f85109e958631ef74a458b0614f37097 (patch) | |
tree | b4e03a19362b84a6cde22e8372130d7f3aae0e13 /src/gallium | |
parent | aa2d846604b7e46f98c05242f4f97b3508bf183e (diff) |
radeonsi/gfx10: implement NGG culling for 4x wave32 subgroups
Acked-by: Pierre-Eric Pelloux-Prayer <[email protected]>
Diffstat (limited to 'src/gallium')
-rw-r--r-- | src/gallium/drivers/radeonsi/gfx10_shader_ngg.c | 681 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_gfx_cs.c | 1 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_pipe.c | 8 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_pipe.h | 11 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_shader.c | 146 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_shader.h | 14 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_shader_internal.h | 17 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_shader_llvm_gs.c | 2 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_state.c | 1 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_state_draw.c | 39 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_state_shaders.c | 32 | ||||
-rw-r--r-- | src/gallium/drivers/radeonsi/si_state_viewport.c | 53 |
12 files changed, 951 insertions, 54 deletions
diff --git a/src/gallium/drivers/radeonsi/gfx10_shader_ngg.c b/src/gallium/drivers/radeonsi/gfx10_shader_ngg.c index a25c89bac56..8092b796b5d 100644 --- a/src/gallium/drivers/radeonsi/gfx10_shader_ngg.c +++ b/src/gallium/drivers/radeonsi/gfx10_shader_ngg.c @@ -28,6 +28,7 @@ #include "util/u_memory.h" #include "util/u_prim.h" +#include "ac_llvm_cull.h" static LLVMValueRef get_wave_id_in_tg(struct si_shader_context *ctx) { @@ -141,14 +142,44 @@ void gfx10_ngg_build_sendmsg_gs_alloc_req(struct si_shader_context *ctx) } void gfx10_ngg_build_export_prim(struct si_shader_context *ctx, - LLVMValueRef user_edgeflags[3]) + LLVMValueRef user_edgeflags[3], + LLVMValueRef prim_passthrough) { - if (gfx10_is_ngg_passthrough(ctx->shader)) { + LLVMBuilderRef builder = ctx->ac.builder; + + if (gfx10_is_ngg_passthrough(ctx->shader) || + ctx->shader->key.opt.ngg_culling) { ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 6001); { struct ac_ngg_prim prim = {}; - prim.passthrough = ac_get_arg(&ctx->ac, ctx->gs_vtx01_offset); + if (prim_passthrough) + prim.passthrough = prim_passthrough; + else + prim.passthrough = ac_get_arg(&ctx->ac, ctx->gs_vtx01_offset); + + /* This is only used with NGG culling, which returns the NGG + * passthrough prim export encoding. + */ + if (ctx->shader->selector->info.writes_edgeflag) { + unsigned all_bits_no_edgeflags = ~SI_NGG_PRIM_EDGE_FLAG_BITS; + LLVMValueRef edgeflags = LLVMConstInt(ctx->i32, all_bits_no_edgeflags, 0); + + unsigned num_vertices; + ngg_get_vertices_per_prim(ctx, &num_vertices); + + for (unsigned i = 0; i < num_vertices; i++) { + unsigned shift = 9 + i*10; + LLVMValueRef edge; + + edge = LLVMBuildLoad(builder, user_edgeflags[i], ""); + edge = LLVMBuildZExt(builder, edge, ctx->i32, ""); + edge = LLVMBuildShl(builder, edge, LLVMConstInt(ctx->i32, shift, 0), ""); + edgeflags = LLVMBuildOr(builder, edgeflags, edge, ""); + } + prim.passthrough = LLVMBuildAnd(builder, prim.passthrough, edgeflags, ""); + } + ac_build_export_prim(&ctx->ac, &prim); } ac_build_endif(&ctx->ac, 6001); @@ -535,6 +566,51 @@ static void build_streamout(struct si_shader_context *ctx, } } +/* LDS layout of ES vertex data for NGG culling. */ +enum { + /* Byte 0: Boolean ES thread accepted (unculled) flag, and later the old + * ES thread ID. After vertex compaction, compacted ES threads + * store the old thread ID here to copy input VGPRs from uncompacted + * ES threads. + * Byte 1: New ES thread ID, loaded by GS to prepare the prim export value. + * Byte 2: TES rel patch ID + * Byte 3: Unused + */ + lds_byte0_accept_flag = 0, + lds_byte0_old_thread_id = 0, + lds_byte1_new_thread_id, + lds_byte2_tes_rel_patch_id, + lds_byte3_unused, + + lds_packed_data = 0, /* lds_byteN_... */ + + lds_pos_x, + lds_pos_y, + lds_pos_z, + lds_pos_w, + lds_pos_x_div_w, + lds_pos_y_div_w, + /* If VS: */ + lds_vertex_id, + lds_instance_id, /* optional */ + /* If TES: */ + lds_tes_u = lds_vertex_id, + lds_tes_v = lds_instance_id, + lds_tes_patch_id, /* optional */ +}; + +static LLVMValueRef si_build_gep_i8(struct si_shader_context *ctx, + LLVMValueRef ptr, unsigned byte_index) +{ + assert(byte_index < 4); + LLVMTypeRef pi8 = LLVMPointerType(ctx->i8, AC_ADDR_SPACE_LDS); + LLVMValueRef index = LLVMConstInt(ctx->i32, byte_index, 0); + + return LLVMBuildGEP(ctx->ac.builder, + LLVMBuildPointerCast(ctx->ac.builder, ptr, pi8, ""), + &index, 1, ""); +} + static unsigned ngg_nogs_vertex_size(struct si_shader *shader) { unsigned lds_vertex_size = 0; @@ -555,6 +631,24 @@ static unsigned ngg_nogs_vertex_size(struct si_shader *shader) shader->key.mono.u.vs_export_prim_id) lds_vertex_size = MAX2(lds_vertex_size, 1); + if (shader->key.opt.ngg_culling) { + if (shader->selector->type == PIPE_SHADER_VERTEX) { + STATIC_ASSERT(lds_instance_id + 1 == 9); + lds_vertex_size = MAX2(lds_vertex_size, 9); + } else { + assert(shader->selector->type == PIPE_SHADER_TESS_EVAL); + + if (shader->selector->info.uses_primid || + shader->key.mono.u.vs_export_prim_id) { + STATIC_ASSERT(lds_tes_patch_id + 2 == 11); + lds_vertex_size = MAX2(lds_vertex_size, 11); + } else { + STATIC_ASSERT(lds_tes_v + 1 == 9); + lds_vertex_size = MAX2(lds_vertex_size, 9); + } + } + } + return lds_vertex_size; } @@ -573,6 +667,540 @@ static LLVMValueRef ngg_nogs_vertex_ptr(struct si_shader_context *ctx, return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, ""); } +static void load_bitmasks_2x64(struct si_shader_context *ctx, + LLVMValueRef lds_ptr, unsigned dw_offset, + LLVMValueRef mask[2], LLVMValueRef *total_bitcount) +{ + LLVMBuilderRef builder = ctx->ac.builder; + LLVMValueRef ptr64 = LLVMBuildPointerCast(builder, lds_ptr, + LLVMPointerType(LLVMArrayType(ctx->i64, 2), + AC_ADDR_SPACE_LDS), ""); + for (unsigned i = 0; i < 2; i++) { + LLVMValueRef index = LLVMConstInt(ctx->i32, dw_offset / 2 + i, 0); + mask[i] = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ptr64, index), ""); + } + + /* We get better code if we don't use the 128-bit bitcount. */ + *total_bitcount = LLVMBuildAdd(builder, ac_build_bit_count(&ctx->ac, mask[0]), + ac_build_bit_count(&ctx->ac, mask[1]), ""); +} + +/** + * Given a total thread count, update total and per-wave thread counts in input SGPRs + * and return the per-wave thread count. + * + * \param new_num_threads Total thread count on the input, per-wave thread count on the output. + * \param tg_info tg_info SGPR value + * \param tg_info_num_bits the bit size of thread count field in tg_info + * \param tg_info_shift the bit offset of the thread count field in tg_info + * \param wave_info merged_wave_info SGPR value + * \param wave_info_num_bits the bit size of thread count field in merged_wave_info + * \param wave_info_shift the bit offset of the thread count field in merged_wave_info + */ +static void update_thread_counts(struct si_shader_context *ctx, + LLVMValueRef *new_num_threads, + LLVMValueRef *tg_info, + unsigned tg_info_num_bits, + unsigned tg_info_shift, + LLVMValueRef *wave_info, + unsigned wave_info_num_bits, + unsigned wave_info_shift) +{ + LLVMBuilderRef builder = ctx->ac.builder; + + /* Update the total thread count. */ + unsigned tg_info_mask = ~(u_bit_consecutive(0, tg_info_num_bits) << tg_info_shift); + *tg_info = LLVMBuildAnd(builder, *tg_info, + LLVMConstInt(ctx->i32, tg_info_mask, 0), ""); + *tg_info = LLVMBuildOr(builder, *tg_info, + LLVMBuildShl(builder, *new_num_threads, + LLVMConstInt(ctx->i32, tg_info_shift, 0), ""), ""); + + /* Update the per-wave thread count. */ + LLVMValueRef prev_threads = LLVMBuildMul(builder, get_wave_id_in_tg(ctx), + LLVMConstInt(ctx->i32, ctx->ac.wave_size, 0), ""); + *new_num_threads = LLVMBuildSub(builder, *new_num_threads, prev_threads, ""); + *new_num_threads = ac_build_imax(&ctx->ac, *new_num_threads, ctx->i32_0); + *new_num_threads = ac_build_imin(&ctx->ac, *new_num_threads, + LLVMConstInt(ctx->i32, ctx->ac.wave_size, 0)); + unsigned wave_info_mask = ~(u_bit_consecutive(0, wave_info_num_bits) << wave_info_shift); + *wave_info = LLVMBuildAnd(builder, *wave_info, + LLVMConstInt(ctx->i32, wave_info_mask, 0), ""); + *wave_info = LLVMBuildOr(builder, *wave_info, + LLVMBuildShl(builder, *new_num_threads, + LLVMConstInt(ctx->i32, wave_info_shift, 0), ""), ""); +} + +/** + * Cull primitives for NGG VS or TES, then compact vertices, which happens + * before the VS or TES main function. Return values for the main function. + * Also return the position, which is passed to the shader as an input, + * so that we don't compute it twice. + */ +void gfx10_emit_ngg_culling_epilogue_4x_wave32(struct ac_shader_abi *abi, + unsigned max_outputs, + LLVMValueRef *addrs) +{ + struct si_shader_context *ctx = si_shader_context_from_abi(abi); + struct si_shader *shader = ctx->shader; + struct si_shader_selector *sel = shader->selector; + struct si_shader_info *info = &sel->info; + LLVMBuilderRef builder = ctx->ac.builder; + + assert(shader->key.opt.ngg_culling); + assert(shader->key.as_ngg); + assert(sel->type == PIPE_SHADER_VERTEX || + (sel->type == PIPE_SHADER_TESS_EVAL && !shader->key.as_es)); + + LLVMValueRef position[4] = {}; + for (unsigned i = 0; i < info->num_outputs; i++) { + switch (info->output_semantic_name[i]) { + case TGSI_SEMANTIC_POSITION: + for (unsigned j = 0; j < 4; j++) { + position[j] = LLVMBuildLoad(ctx->ac.builder, + addrs[4 * i + j], ""); + } + break; + } + } + assert(position[0]); + + /* Store Position.XYZW into LDS. */ + LLVMValueRef es_vtxptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx)); + for (unsigned chan = 0; chan < 4; chan++) { + LLVMBuildStore(builder, ac_to_integer(&ctx->ac, position[chan]), + ac_build_gep0(&ctx->ac, es_vtxptr, + LLVMConstInt(ctx->i32, lds_pos_x + chan, 0))); + } + /* Store Position.XY / W into LDS. */ + for (unsigned chan = 0; chan < 2; chan++) { + LLVMValueRef val = ac_build_fdiv(&ctx->ac, position[chan], position[3]); + LLVMBuildStore(builder, ac_to_integer(&ctx->ac, val), + ac_build_gep0(&ctx->ac, es_vtxptr, + LLVMConstInt(ctx->i32, lds_pos_x_div_w + chan, 0))); + } + + /* Store VertexID and InstanceID. ES threads will have to load them + * from LDS after vertex compaction and use them instead of their own + * system values. + */ + bool uses_instance_id = false; + bool uses_tes_prim_id = false; + LLVMValueRef packed_data = ctx->i32_0; + + if (ctx->type == PIPE_SHADER_VERTEX) { + uses_instance_id = sel->info.uses_instanceid || + shader->key.part.vs.prolog.instance_divisor_is_one || + shader->key.part.vs.prolog.instance_divisor_is_fetched; + + LLVMBuildStore(builder, ctx->abi.vertex_id, + ac_build_gep0(&ctx->ac, es_vtxptr, + LLVMConstInt(ctx->i32, lds_vertex_id, 0))); + if (uses_instance_id) { + LLVMBuildStore(builder, ctx->abi.instance_id, + ac_build_gep0(&ctx->ac, es_vtxptr, + LLVMConstInt(ctx->i32, lds_instance_id, 0))); + } + } else { + uses_tes_prim_id = sel->info.uses_primid || + shader->key.mono.u.vs_export_prim_id; + + assert(ctx->type == PIPE_SHADER_TESS_EVAL); + LLVMBuildStore(builder, ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, ctx->tes_u)), + ac_build_gep0(&ctx->ac, es_vtxptr, + LLVMConstInt(ctx->i32, lds_tes_u, 0))); + LLVMBuildStore(builder, ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, ctx->tes_v)), + ac_build_gep0(&ctx->ac, es_vtxptr, + LLVMConstInt(ctx->i32, lds_tes_v, 0))); + packed_data = LLVMBuildShl(builder, ac_get_arg(&ctx->ac, ctx->tes_rel_patch_id), + LLVMConstInt(ctx->i32, lds_byte2_tes_rel_patch_id * 8, 0), ""); + if (uses_tes_prim_id) { + LLVMBuildStore(builder, ac_get_arg(&ctx->ac, ctx->args.tes_patch_id), + ac_build_gep0(&ctx->ac, es_vtxptr, + LLVMConstInt(ctx->i32, lds_tes_patch_id, 0))); + } + } + /* Initialize the packed data. */ + LLVMBuildStore(builder, packed_data, + ac_build_gep0(&ctx->ac, es_vtxptr, + LLVMConstInt(ctx->i32, lds_packed_data, 0))); + ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label); + + LLVMValueRef tid = ac_get_thread_id(&ctx->ac); + + /* Initialize the last 3 gs_ngg_scratch dwords to 0, because we may have less + * than 4 waves, but we always read all 4 values. This is where the thread + * bitmasks of unculled threads will be stored. + * + * gs_ngg_scratch layout: esmask[0..3] + */ + ac_build_ifcc(&ctx->ac, + LLVMBuildICmp(builder, LLVMIntULT, get_thread_id_in_tg(ctx), + LLVMConstInt(ctx->i32, 3, 0), ""), 16101); + { + LLVMValueRef index = LLVMBuildAdd(builder, tid, ctx->i32_1, ""); + LLVMBuildStore(builder, ctx->i32_0, + ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, index)); + } + ac_build_endif(&ctx->ac, 16101); + ac_build_s_barrier(&ctx->ac); + + /* The hardware requires that there are no holes between unculled vertices, + * which means we have to pack ES threads, i.e. reduce the ES thread count + * and move ES input VGPRs to lower threads. The upside is that varyings + * are only fetched and computed for unculled vertices. + * + * Vertex compaction in GS threads: + * + * Part 1: Compute the surviving vertex mask in GS threads: + * - Compute 4 32-bit surviving vertex masks in LDS. (max 4 waves) + * - In GS, notify ES threads whether the vertex survived. + * - Barrier + * - ES threads will create the mask and store it in LDS. + * - Barrier + * - Each GS thread loads the vertex masks from LDS. + * + * Part 2: Compact ES threads in GS threads: + * - Compute the prefix sum for all 3 vertices from the masks. These are the new + * thread IDs for each vertex within the primitive. + * - Write the value of the old thread ID into the LDS address of the new thread ID. + * The ES thread will load the old thread ID and use it to load the position, VertexID, + * and InstanceID. + * - Update vertex indices and null flag in the GS input VGPRs. + * - Barrier + * + * Part 3: Update inputs GPRs + * - For all waves, update per-wave thread counts in input SGPRs. + * - In ES threads, update the ES input VGPRs (VertexID, InstanceID, TES inputs). + */ + + LLVMValueRef vtxindex[] = { + si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16), + si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16), + si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16), + }; + LLVMValueRef gs_vtxptr[] = { + ngg_nogs_vertex_ptr(ctx, vtxindex[0]), + ngg_nogs_vertex_ptr(ctx, vtxindex[1]), + ngg_nogs_vertex_ptr(ctx, vtxindex[2]), + }; + es_vtxptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx)); + + LLVMValueRef gs_accepted = ac_build_alloca(&ctx->ac, ctx->i32, ""); + + /* Do culling in GS threads. */ + ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 16002); + { + /* Load positions. */ + LLVMValueRef pos[3][4] = {}; + for (unsigned vtx = 0; vtx < 3; vtx++) { + for (unsigned chan = 0; chan < 4; chan++) { + unsigned index; + if (chan == 0 || chan == 1) + index = lds_pos_x_div_w + chan; + else if (chan == 3) + index = lds_pos_w; + else + continue; + + LLVMValueRef addr = ac_build_gep0(&ctx->ac, gs_vtxptr[vtx], + LLVMConstInt(ctx->i32, index, 0)); + pos[vtx][chan] = LLVMBuildLoad(builder, addr, ""); + pos[vtx][chan] = ac_to_float(&ctx->ac, pos[vtx][chan]); + } + } + + /* Load the viewport state for small prim culling. */ + LLVMValueRef vp = ac_build_load_invariant(&ctx->ac, + ac_get_arg(&ctx->ac, ctx->small_prim_cull_info), + ctx->i32_0); + vp = LLVMBuildBitCast(builder, vp, ctx->v4f32, ""); + LLVMValueRef vp_scale[2], vp_translate[2]; + vp_scale[0] = ac_llvm_extract_elem(&ctx->ac, vp, 0); + vp_scale[1] = ac_llvm_extract_elem(&ctx->ac, vp, 1); + vp_translate[0] = ac_llvm_extract_elem(&ctx->ac, vp, 2); + vp_translate[1] = ac_llvm_extract_elem(&ctx->ac, vp, 3); + + /* Get the small prim filter precision. */ + LLVMValueRef small_prim_precision = si_unpack_param(ctx, ctx->vs_state_bits, 7, 4); + small_prim_precision = LLVMBuildOr(builder, small_prim_precision, + LLVMConstInt(ctx->i32, 0x70, 0), ""); + small_prim_precision = LLVMBuildShl(builder, small_prim_precision, + LLVMConstInt(ctx->i32, 23, 0), ""); + small_prim_precision = LLVMBuildBitCast(builder, small_prim_precision, ctx->f32, ""); + + /* Execute culling code. */ + struct ac_cull_options options = {}; + options.cull_front = shader->key.opt.ngg_culling & SI_NGG_CULL_FRONT_FACE; + options.cull_back = shader->key.opt.ngg_culling & SI_NGG_CULL_BACK_FACE; + options.cull_view_xy = shader->key.opt.ngg_culling & SI_NGG_CULL_VIEW_SMALLPRIMS; + options.cull_small_prims = options.cull_view_xy; + options.cull_zero_area = options.cull_front || options.cull_back; + options.cull_w = true; + + /* Tell ES threads whether their vertex survived. */ + ac_build_ifcc(&ctx->ac, ac_cull_triangle(&ctx->ac, pos, ctx->i1true, + vp_scale, vp_translate, + small_prim_precision, &options), 16003); + { + LLVMBuildStore(builder, ctx->ac.i32_1, gs_accepted); + for (unsigned vtx = 0; vtx < 3; vtx++) { + LLVMBuildStore(builder, ctx->ac.i8_1, + si_build_gep_i8(ctx, gs_vtxptr[vtx], lds_byte0_accept_flag)); + } + } + ac_build_endif(&ctx->ac, 16003); + } + ac_build_endif(&ctx->ac, 16002); + ac_build_s_barrier(&ctx->ac); + + gs_accepted = LLVMBuildLoad(builder, gs_accepted, ""); + + LLVMValueRef es_accepted = ac_build_alloca(&ctx->ac, ctx->i1, ""); + + /* Convert the per-vertex flag to a thread bitmask in ES threads and store it in LDS. */ + ac_build_ifcc(&ctx->ac, si_is_es_thread(ctx), 16007); + { + LLVMValueRef es_accepted_flag = + LLVMBuildLoad(builder, + si_build_gep_i8(ctx, es_vtxptr, lds_byte0_accept_flag), ""); + + LLVMValueRef es_accepted_bool = LLVMBuildICmp(builder, LLVMIntNE, + es_accepted_flag, ctx->ac.i8_0, ""); + LLVMValueRef es_mask = ac_get_i1_sgpr_mask(&ctx->ac, es_accepted_bool); + + LLVMBuildStore(builder, es_accepted_bool, es_accepted); + + ac_build_ifcc(&ctx->ac, LLVMBuildICmp(builder, LLVMIntEQ, + tid, ctx->i32_0, ""), 16008); + { + LLVMBuildStore(builder, es_mask, + ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, + get_wave_id_in_tg(ctx))); + } + ac_build_endif(&ctx->ac, 16008); + } + ac_build_endif(&ctx->ac, 16007); + ac_build_s_barrier(&ctx->ac); + + /* Load the vertex masks and compute the new ES thread count. */ + LLVMValueRef es_mask[2], new_num_es_threads, kill_wave; + load_bitmasks_2x64(ctx, ctx->gs_ngg_scratch, 0, es_mask, &new_num_es_threads); + new_num_es_threads = ac_build_readlane_no_opt_barrier(&ctx->ac, new_num_es_threads, NULL); + + /* ES threads compute their prefix sum, which is the new ES thread ID. + * Then they write the value of the old thread ID into the LDS address + * of the new thread ID. It will be used it to load input VGPRs from + * the old thread's LDS location. + */ + ac_build_ifcc(&ctx->ac, LLVMBuildLoad(builder, es_accepted, ""), 16009); + { + LLVMValueRef old_id = get_thread_id_in_tg(ctx); + LLVMValueRef new_id = ac_prefix_bitcount_2x64(&ctx->ac, es_mask, old_id); + + LLVMBuildStore(builder, LLVMBuildTrunc(builder, old_id, ctx->i8, ""), + si_build_gep_i8(ctx, ngg_nogs_vertex_ptr(ctx, new_id), + lds_byte0_old_thread_id)); + LLVMBuildStore(builder, LLVMBuildTrunc(builder, new_id, ctx->i8, ""), + si_build_gep_i8(ctx, es_vtxptr, lds_byte1_new_thread_id)); + } + ac_build_endif(&ctx->ac, 16009); + + /* Kill waves that have inactive threads. */ + kill_wave = LLVMBuildICmp(builder, LLVMIntULE, + ac_build_imax(&ctx->ac, new_num_es_threads, ngg_get_prim_cnt(ctx)), + LLVMBuildMul(builder, get_wave_id_in_tg(ctx), + LLVMConstInt(ctx->i32, ctx->ac.wave_size, 0), ""), ""); + ac_build_ifcc(&ctx->ac, kill_wave, 19202); + { + /* If we are killing wave 0, send that there are no primitives + * in this threadgroup. + */ + ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx), + ctx->i32_0, ctx->i32_0); + ac_build_s_endpgm(&ctx->ac); + } + ac_build_endif(&ctx->ac, 19202); + ac_build_s_barrier(&ctx->ac); + + /* Send the final vertex and primitive counts. */ + ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx), + new_num_es_threads, ngg_get_prim_cnt(ctx)); + + /* Update thread counts in SGPRs. */ + LLVMValueRef new_gs_tg_info = ac_get_arg(&ctx->ac, ctx->gs_tg_info); + LLVMValueRef new_merged_wave_info = ac_get_arg(&ctx->ac, ctx->merged_wave_info); + + /* This also converts the thread count from the total count to the per-wave count. */ + update_thread_counts(ctx, &new_num_es_threads, &new_gs_tg_info, 9, 12, + &new_merged_wave_info, 8, 0); + + /* Update vertex indices in VGPR0 (same format as NGG passthrough). */ + LLVMValueRef new_vgpr0 = ac_build_alloca_undef(&ctx->ac, ctx->i32, ""); + + /* Set the null flag at the beginning (culled), and then + * overwrite it for accepted primitives. + */ + LLVMBuildStore(builder, LLVMConstInt(ctx->i32, 1u << 31, 0), new_vgpr0); + + /* Get vertex indices after vertex compaction. */ + ac_build_ifcc(&ctx->ac, LLVMBuildTrunc(builder, gs_accepted, ctx->i1, ""), 16011); + { + struct ac_ngg_prim prim = {}; + prim.num_vertices = 3; + prim.isnull = ctx->i1false; + + for (unsigned vtx = 0; vtx < 3; vtx++) { + prim.index[vtx] = + LLVMBuildLoad(builder, + si_build_gep_i8(ctx, gs_vtxptr[vtx], + lds_byte1_new_thread_id), ""); + prim.index[vtx] = LLVMBuildZExt(builder, prim.index[vtx], ctx->i32, ""); + prim.edgeflag[vtx] = ngg_get_initial_edgeflag(ctx, vtx); + } + + /* Set the new GS input VGPR. */ + LLVMBuildStore(builder, ac_pack_prim_export(&ctx->ac, &prim), new_vgpr0); + } + ac_build_endif(&ctx->ac, 16011); + + if (gfx10_ngg_export_prim_early(shader)) + gfx10_ngg_build_export_prim(ctx, NULL, LLVMBuildLoad(builder, new_vgpr0, "")); + + /* Set the new ES input VGPRs. */ + LLVMValueRef es_data[4]; + LLVMValueRef old_thread_id = ac_build_alloca_undef(&ctx->ac, ctx->i32, ""); + + for (unsigned i = 0; i < 4; i++) + es_data[i] = ac_build_alloca_undef(&ctx->ac, ctx->i32, ""); + + ac_build_ifcc(&ctx->ac, LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, tid, + new_num_es_threads, ""), 16012); + { + LLVMValueRef old_id, old_es_vtxptr, tmp; + + /* Load ES input VGPRs from the ES thread before compaction. */ + old_id = LLVMBuildLoad(builder, + si_build_gep_i8(ctx, es_vtxptr, lds_byte0_old_thread_id), ""); + old_id = LLVMBuildZExt(builder, old_id, ctx->i32, ""); + + LLVMBuildStore(builder, old_id, old_thread_id); + old_es_vtxptr = ngg_nogs_vertex_ptr(ctx, old_id); + + for (unsigned i = 0; i < 2; i++) { + tmp = LLVMBuildLoad(builder, + ac_build_gep0(&ctx->ac, old_es_vtxptr, + LLVMConstInt(ctx->i32, lds_vertex_id + i, 0)), ""); + LLVMBuildStore(builder, tmp, es_data[i]); + } + + if (ctx->type == PIPE_SHADER_TESS_EVAL) { + tmp = LLVMBuildLoad(builder, + si_build_gep_i8(ctx, old_es_vtxptr, + lds_byte2_tes_rel_patch_id), ""); + tmp = LLVMBuildZExt(builder, tmp, ctx->i32, ""); + LLVMBuildStore(builder, tmp, es_data[2]); + + if (uses_tes_prim_id) { + tmp = LLVMBuildLoad(builder, + ac_build_gep0(&ctx->ac, old_es_vtxptr, + LLVMConstInt(ctx->i32, lds_tes_patch_id, 0)), ""); + LLVMBuildStore(builder, tmp, es_data[3]); + } + } + } + ac_build_endif(&ctx->ac, 16012); + + /* Return values for the main function. */ + LLVMValueRef ret = ctx->return_value; + LLVMValueRef val; + + ret = LLVMBuildInsertValue(ctx->ac.builder, ret, new_gs_tg_info, 2, ""); + ret = LLVMBuildInsertValue(ctx->ac.builder, ret, new_merged_wave_info, 3, ""); + if (ctx->type == PIPE_SHADER_TESS_EVAL) + ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 4); + + ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers, + 8 + SI_SGPR_RW_BUFFERS); + ret = si_insert_input_ptr(ctx, ret, + ctx->bindless_samplers_and_images, + 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES); + ret = si_insert_input_ptr(ctx, ret, + ctx->const_and_shader_buffers, + 8 + SI_SGPR_CONST_AND_SHADER_BUFFERS); + ret = si_insert_input_ptr(ctx, ret, + ctx->samplers_and_images, + 8 + SI_SGPR_SAMPLERS_AND_IMAGES); + ret = si_insert_input_ptr(ctx, ret, ctx->vs_state_bits, + 8 + SI_SGPR_VS_STATE_BITS); + + if (ctx->type == PIPE_SHADER_VERTEX) { + ret = si_insert_input_ptr(ctx, ret, ctx->args.base_vertex, + 8 + SI_SGPR_BASE_VERTEX); + ret = si_insert_input_ptr(ctx, ret, ctx->args.start_instance, + 8 + SI_SGPR_START_INSTANCE); + ret = si_insert_input_ptr(ctx, ret, ctx->args.draw_id, + 8 + SI_SGPR_DRAWID); + ret = si_insert_input_ptr(ctx, ret, ctx->vertex_buffers, + 8 + SI_VS_NUM_USER_SGPR); + } else { + assert(ctx->type == PIPE_SHADER_TESS_EVAL); + ret = si_insert_input_ptr(ctx, ret, ctx->tcs_offchip_layout, + 8 + SI_SGPR_TES_OFFCHIP_LAYOUT); + ret = si_insert_input_ptr(ctx, ret, ctx->tes_offchip_addr, + 8 + SI_SGPR_TES_OFFCHIP_ADDR); + } + + unsigned vgpr; + if (ctx->type == PIPE_SHADER_VERTEX) + vgpr = 8 + GFX9_VSGS_NUM_USER_SGPR + 1; + else + vgpr = 8 + GFX9_TESGS_NUM_USER_SGPR; + + val = LLVMBuildLoad(builder, new_vgpr0, ""); + ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), + vgpr++, ""); + vgpr++; /* gs_vtx23_offset */ + + ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_prim_id, vgpr++); + ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_invocation_id, vgpr++); + vgpr++; /* gs_vtx45_offset */ + + if (ctx->type == PIPE_SHADER_VERTEX) { + val = LLVMBuildLoad(builder, es_data[0], ""); + ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), + vgpr++, ""); /* VGPR5 - VertexID */ + vgpr += 2; + if (uses_instance_id) { + val = LLVMBuildLoad(builder, es_data[1], ""); + ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), + vgpr++, ""); /* VGPR8 - InstanceID */ + } else { + vgpr++; + } + } else { + assert(ctx->type == PIPE_SHADER_TESS_EVAL); + unsigned num_vgprs = uses_tes_prim_id ? 4 : 3; + for (unsigned i = 0; i < num_vgprs; i++) { + val = LLVMBuildLoad(builder, es_data[i], ""); + ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), + vgpr++, ""); + } + if (num_vgprs == 3) + vgpr++; + } + /* Return the old thread ID. */ + val = LLVMBuildLoad(builder, old_thread_id, ""); + ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++, ""); + + /* These two also use LDS. */ + if (sel->info.writes_edgeflag || + (ctx->type == PIPE_SHADER_VERTEX && shader->key.mono.u.vs_export_prim_id)) + ac_build_s_barrier(&ctx->ac); + + ctx->return_value = ret; +} + /** * Emit the epilogue of an API VS or TES shader compiled as ESGS shader. */ @@ -630,7 +1258,8 @@ void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi, } bool unterminated_es_if_block = - gfx10_is_ngg_passthrough(ctx->shader) && + !sel->so.num_outputs && + !sel->info.writes_edgeflag && !ctx->screen->use_ngg_streamout && /* no query buffer */ (ctx->type != PIPE_SHADER_VERTEX || !ctx->shader->key.mono.u.vs_export_prim_id); @@ -640,11 +1269,17 @@ void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi, LLVMValueRef is_gs_thread = si_is_gs_thread(ctx); LLVMValueRef is_es_thread = si_is_es_thread(ctx); - LLVMValueRef vtxindex[] = { - si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16), - si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16), - si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16), - }; + LLVMValueRef vtxindex[3]; + + if (ctx->shader->key.opt.ngg_culling) { + vtxindex[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 9); + vtxindex[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 10, 9); + vtxindex[2] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 20, 9); + } else { + vtxindex[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16); + vtxindex[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16); + vtxindex[2] = si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16); + } /* Determine the number of vertices per primitive. */ unsigned num_vertices; @@ -758,7 +1393,7 @@ void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi, /* Build the primitive export. */ if (!gfx10_ngg_export_prim_early(ctx->shader)) { assert(!unterminated_es_if_block); - gfx10_ngg_build_export_prim(ctx, user_edgeflags); + gfx10_ngg_build_export_prim(ctx, user_edgeflags, NULL); } /* Export per-vertex data (positions and parameters). */ @@ -769,11 +1404,27 @@ void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi, /* Unconditionally (re-)load the values for proper SSA form. */ for (i = 0; i < info->num_outputs; i++) { - for (unsigned j = 0; j < 4; j++) { - outputs[i].values[j] = - LLVMBuildLoad(builder, - addrs[4 * i + j], - ""); + /* If the NGG cull shader part computed the position, don't + * use the position from the current shader part. Instead, + * load it from LDS. + */ + if (info->output_semantic_name[i] == TGSI_SEMANTIC_POSITION && + ctx->shader->key.opt.ngg_culling) { + vertex_ptr = ngg_nogs_vertex_ptr(ctx, + ac_get_arg(&ctx->ac, ctx->ngg_old_thread_id)); + + for (unsigned j = 0; j < 4; j++) { + tmp = LLVMConstInt(ctx->i32, lds_pos_x + j, 0); + tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp); + tmp = LLVMBuildLoad(builder, tmp, ""); + outputs[i].values[j] = ac_to_float(&ctx->ac, tmp); + } + } else { + for (unsigned j = 0; j < 4; j++) { + outputs[i].values[j] = + LLVMBuildLoad(builder, + addrs[4 * i + j], ""); + } } } diff --git a/src/gallium/drivers/radeonsi/si_gfx_cs.c b/src/gallium/drivers/radeonsi/si_gfx_cs.c index 38b2abd1acb..9311b6e6386 100644 --- a/src/gallium/drivers/radeonsi/si_gfx_cs.c +++ b/src/gallium/drivers/radeonsi/si_gfx_cs.c @@ -445,6 +445,7 @@ void si_begin_new_gfx_cs(struct si_context *ctx) ctx->last_num_tcs_input_cp = -1; ctx->last_ls_hs_config = -1; /* impossible value */ ctx->last_binning_enabled = -1; + ctx->small_prim_cull_info_dirty = ctx->small_prim_cull_info_buf != NULL; ctx->prim_discard_compute_ib_initialized = false; diff --git a/src/gallium/drivers/radeonsi/si_pipe.c b/src/gallium/drivers/radeonsi/si_pipe.c index f49961d2b43..13c13031b54 100644 --- a/src/gallium/drivers/radeonsi/si_pipe.c +++ b/src/gallium/drivers/radeonsi/si_pipe.c @@ -94,6 +94,8 @@ static const struct debug_named_value debug_options[] = { /* 3D engine options: */ { "nogfx", DBG(NO_GFX), "Disable graphics. Only multimedia compute paths can be used." }, { "nongg", DBG(NO_NGG), "Disable NGG and use the legacy pipeline." }, + { "nggc", DBG(ALWAYS_NGG_CULLING), "Always use NGG culling even when it can hurt." }, + { "nonggc", DBG(NO_NGG_CULLING), "Disable NGG culling." }, { "alwayspd", DBG(ALWAYS_PD), "Always enable the primitive discard compute shader." }, { "pd", DBG(PD), "Enable the primitive discard compute shader for large draw calls." }, { "nopd", DBG(NO_PD), "Disable the primitive discard compute shader." }, @@ -190,6 +192,7 @@ static void si_destroy_context(struct pipe_context *context) si_resource_reference(&sctx->scratch_buffer, NULL); si_resource_reference(&sctx->compute_scratch_buffer, NULL); si_resource_reference(&sctx->wait_mem_scratch, NULL); + si_resource_reference(&sctx->small_prim_cull_info_buf, NULL); si_pm4_free_state(sctx, sctx->init_config, ~0); if (sctx->init_config_gs_rings) @@ -1173,6 +1176,10 @@ radeonsi_screen_create_impl(struct radeon_winsys *ws, sscreen->use_ngg = sscreen->info.chip_class >= GFX10 && sscreen->info.family != CHIP_NAVI14 && !(sscreen->debug_flags & DBG(NO_NGG)); + sscreen->use_ngg_culling = sscreen->use_ngg && + !(sscreen->debug_flags & DBG(NO_NGG_CULLING)); + sscreen->always_use_ngg_culling = sscreen->use_ngg_culling && + sscreen->debug_flags & DBG(ALWAYS_NGG_CULLING); sscreen->use_ngg_streamout = false; /* Only enable primitive binning on APUs by default. */ @@ -1305,6 +1312,7 @@ radeonsi_screen_create_impl(struct radeon_winsys *ws, 4, 1, RADEON_DOMAIN_OA); } + STATIC_ASSERT(sizeof(union si_vgt_stages_key) == 4); return &sscreen->b; } diff --git a/src/gallium/drivers/radeonsi/si_pipe.h b/src/gallium/drivers/radeonsi/si_pipe.h index 563a201ec33..0a9c787dd76 100644 --- a/src/gallium/drivers/radeonsi/si_pipe.h +++ b/src/gallium/drivers/radeonsi/si_pipe.h @@ -183,6 +183,8 @@ enum { /* 3D engine options: */ DBG_NO_GFX, DBG_NO_NGG, + DBG_ALWAYS_NGG_CULLING, + DBG_NO_NGG_CULLING, DBG_ALWAYS_PD, DBG_PD, DBG_NO_PD, @@ -506,6 +508,8 @@ struct si_screen { bool dfsm_allowed; bool llvm_has_working_vgpr_indexing; bool use_ngg; + bool use_ngg_culling; + bool always_use_ngg_culling; bool use_ngg_streamout; struct { @@ -1072,6 +1076,7 @@ struct si_context { bool ls_vgpr_fix:1; bool prim_discard_cs_instancing:1; bool ngg:1; + uint8_t ngg_culling; int last_index_size; int last_base_vertex; int last_start_instance; @@ -1088,6 +1093,11 @@ struct si_context { unsigned last_vs_state; enum pipe_prim_type current_rast_prim; /* primitive type after TES, GS */ + struct si_small_prim_cull_info last_small_prim_cull_info; + struct si_resource *small_prim_cull_info_buf; + uint64_t small_prim_cull_info_address; + bool small_prim_cull_info_dirty; + /* Scratch buffer */ struct si_resource *scratch_buffer; unsigned scratch_waves; @@ -1499,6 +1509,7 @@ struct pipe_video_buffer *si_video_buffer_create(struct pipe_context *pipe, const struct pipe_video_buffer *tmpl); /* si_viewport.c */ +void si_update_ngg_small_prim_precision(struct si_context *ctx); void si_get_small_prim_cull_info(struct si_context *sctx, struct si_small_prim_cull_info *out); void si_update_vs_viewport_state(struct si_context *ctx); diff --git a/src/gallium/drivers/radeonsi/si_shader.c b/src/gallium/drivers/radeonsi/si_shader.c index 24f744ba5cd..e54b9fb97ba 100644 --- a/src/gallium/drivers/radeonsi/si_shader.c +++ b/src/gallium/drivers/radeonsi/si_shader.c @@ -1192,7 +1192,8 @@ static void declare_vb_descriptor_input_sgprs(struct si_shader_context *ctx) } static void declare_vs_input_vgprs(struct si_shader_context *ctx, - unsigned *num_prolog_vgprs) + unsigned *num_prolog_vgprs, + bool ngg_cull_shader) { struct si_shader *shader = ctx->shader; @@ -1218,6 +1219,11 @@ static void declare_vs_input_vgprs(struct si_shader_context *ctx, } if (!shader->is_gs_copy_shader) { + if (shader->key.opt.ngg_culling && !ngg_cull_shader) { + ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, + &ctx->ngg_old_thread_id); + } + /* Vertex load indices. */ if (shader->selector->info.num_inputs) { ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, @@ -1252,12 +1258,17 @@ static void declare_vs_blit_inputs(struct si_shader_context *ctx, } } -static void declare_tes_input_vgprs(struct si_shader_context *ctx) +static void declare_tes_input_vgprs(struct si_shader_context *ctx, bool ngg_cull_shader) { ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_u); ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_v); ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->tes_rel_patch_id); ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tes_patch_id); + + if (ctx->shader->key.opt.ngg_culling && !ngg_cull_shader) { + ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, + &ctx->ngg_old_thread_id); + } } enum { @@ -1276,7 +1287,7 @@ void si_add_arg_checked(struct ac_shader_args *args, ac_add_arg(args, file, registers, type, arg); } -void si_create_function(struct si_shader_context *ctx) +void si_create_function(struct si_shader_context *ctx, bool ngg_cull_shader) { struct si_shader *shader = ctx->shader; LLVMTypeRef returns[AC_MAX_ARGS]; @@ -1305,7 +1316,7 @@ void si_create_function(struct si_shader_context *ctx) declare_vs_blit_inputs(ctx, vs_blit_property); /* VGPRs */ - declare_vs_input_vgprs(ctx, &num_prolog_vgprs); + declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader); break; } @@ -1325,7 +1336,7 @@ void si_create_function(struct si_shader_context *ctx) } /* VGPRs */ - declare_vs_input_vgprs(ctx, &num_prolog_vgprs); + declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader); /* Return values */ if (shader->key.opt.vs_as_prim_discard_cs) { @@ -1384,7 +1395,7 @@ void si_create_function(struct si_shader_context *ctx) ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids); if (ctx->type == PIPE_SHADER_VERTEX) { - declare_vs_input_vgprs(ctx, &num_prolog_vgprs); + declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader); /* LS return values are inputs to the TCS main shader part. */ for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++) @@ -1419,7 +1430,8 @@ void si_create_function(struct si_shader_context *ctx) ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info); ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset); ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset); - ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */ + ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, + &ctx->small_prim_cull_info); /* SPI_SHADER_PGM_LO_GS << 8 */ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */ declare_global_desc_pointers(ctx); @@ -1452,25 +1464,33 @@ void si_create_function(struct si_shader_context *ctx) ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx45_offset); if (ctx->type == PIPE_SHADER_VERTEX) { - declare_vs_input_vgprs(ctx, &num_prolog_vgprs); + declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader); } else if (ctx->type == PIPE_SHADER_TESS_EVAL) { - declare_tes_input_vgprs(ctx); + declare_tes_input_vgprs(ctx, ngg_cull_shader); } - if (ctx->shader->key.as_es && + if ((ctx->shader->key.as_es || ngg_cull_shader) && (ctx->type == PIPE_SHADER_VERTEX || ctx->type == PIPE_SHADER_TESS_EVAL)) { - unsigned num_user_sgprs; + unsigned num_user_sgprs, num_vgprs; + /* For the NGG cull shader, add 1 SGPR to hold the vertex buffer pointer. */ if (ctx->type == PIPE_SHADER_VERTEX) - num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR; + num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR + ngg_cull_shader; else num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR; + /* The NGG cull shader has to return all 9 VGPRs + the old thread ID. + * + * The normal merged ESGS shader only has to return the 5 VGPRs + * for the GS stage. + */ + num_vgprs = ngg_cull_shader ? 10 : 5; + /* ES return values are inputs to GS. */ for (i = 0; i < 8 + num_user_sgprs; i++) returns[num_returns++] = ctx->i32; /* SGPRs */ - for (i = 0; i < 5; i++) + for (i = 0; i < num_vgprs; i++) returns[num_returns++] = ctx->f32; /* VGPRs */ } break; @@ -1492,7 +1512,7 @@ void si_create_function(struct si_shader_context *ctx) } /* VGPRs */ - declare_tes_input_vgprs(ctx); + declare_tes_input_vgprs(ctx, ngg_cull_shader); break; case PIPE_SHADER_GEOMETRY: @@ -1622,8 +1642,8 @@ void si_create_function(struct si_shader_context *ctx) return; } - si_llvm_create_func(ctx, "main", returns, num_returns, - si_get_max_workgroup_size(shader)); + si_llvm_create_func(ctx, ngg_cull_shader ? "ngg_cull_main" : "main", + returns, num_returns, si_get_max_workgroup_size(shader)); /* Reserve register locations for VGPR inputs the PS prolog may need. */ if (ctx->type == PIPE_SHADER_FRAGMENT && !ctx->shader->is_monolithic) { @@ -2222,6 +2242,8 @@ static void si_dump_shader_key(const struct si_shader *shader, FILE *f) !key->as_es && !key->as_ls) { fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs); fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable); + if (shader_type != PIPE_SHADER_GEOMETRY) + fprintf(f, " opt.ngg_culling = 0x%x\n", key->opt.ngg_culling); } } @@ -2266,7 +2288,8 @@ static bool si_vs_needs_prolog(const struct si_shader_selector *sel, } static bool si_build_main_function(struct si_shader_context *ctx, - struct nir_shader *nir, bool free_nir) + struct nir_shader *nir, bool free_nir, + bool ngg_cull_shader) { struct si_shader *shader = ctx->shader; struct si_shader_selector *sel = shader->selector; @@ -2281,6 +2304,8 @@ static bool si_build_main_function(struct si_shader_context *ctx, ctx->abi.emit_outputs = si_llvm_emit_es_epilogue; else if (shader->key.opt.vs_as_prim_discard_cs) ctx->abi.emit_outputs = si_llvm_emit_prim_discard_cs_epilogue; + else if (ngg_cull_shader) + ctx->abi.emit_outputs = gfx10_emit_ngg_culling_epilogue_4x_wave32; else if (shader->key.as_ngg) ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue; else @@ -2295,6 +2320,8 @@ static bool si_build_main_function(struct si_shader_context *ctx, if (shader->key.as_es) ctx->abi.emit_outputs = si_llvm_emit_es_epilogue; + else if (ngg_cull_shader) + ctx->abi.emit_outputs = gfx10_emit_ngg_culling_epilogue_4x_wave32; else if (shader->key.as_ngg) ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue; else @@ -2314,7 +2341,7 @@ static bool si_build_main_function(struct si_shader_context *ctx, return false; } - si_create_function(ctx); + si_create_function(ctx, ngg_cull_shader); if (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY) si_preload_esgs_ring(ctx); @@ -2349,6 +2376,7 @@ static bool si_build_main_function(struct si_shader_context *ctx, if (sel->so.num_outputs) scratch_size = 44; + assert(!ctx->gs_ngg_scratch); LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, scratch_size); ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module, ai32, "ngg_scratch", AC_ADDR_SPACE_LDS); @@ -2377,7 +2405,8 @@ static bool si_build_main_function(struct si_shader_context *ctx, /* This is really only needed when streamout and / or vertex * compaction is enabled. */ - if (sel->so.num_outputs && !ctx->gs_ngg_scratch) { + if (!ctx->gs_ngg_scratch && + (sel->so.num_outputs || shader->key.opt.ngg_culling)) { LLVMTypeRef asi32 = LLVMArrayType(ctx->i32, 8); ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module, asi32, "ngg_scratch", AC_ADDR_SPACE_LDS); @@ -2418,19 +2447,21 @@ static bool si_build_main_function(struct si_shader_context *ctx, if (!shader->is_monolithic || (ctx->type == PIPE_SHADER_TESS_EVAL && - (shader->key.as_ngg && !shader->key.as_es))) + shader->key.as_ngg && !shader->key.as_es && + !shader->key.opt.ngg_culling)) ac_init_exec_full_mask(&ctx->ac); if ((ctx->type == PIPE_SHADER_VERTEX || ctx->type == PIPE_SHADER_TESS_EVAL) && - shader->key.as_ngg && !shader->key.as_es) { + shader->key.as_ngg && !shader->key.as_es && + !shader->key.opt.ngg_culling) { gfx10_ngg_build_sendmsg_gs_alloc_req(ctx); /* Build the primitive export at the beginning * of the shader if possible. */ if (gfx10_ngg_export_prim_early(shader)) - gfx10_ngg_build_export_prim(ctx, NULL); + gfx10_ngg_build_export_prim(ctx, NULL, NULL); } if (ctx->type == PIPE_SHADER_TESS_CTRL || @@ -2500,12 +2531,14 @@ static bool si_build_main_function(struct si_shader_context *ctx, * * \param info Shader info of the vertex shader. * \param num_input_sgprs Number of input SGPRs for the vertex shader. + * \param has_old_ Whether the preceding shader part is the NGG cull shader. * \param prolog_key Key of the VS prolog * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS. * \param key Output shader part key. */ static void si_get_vs_prolog_key(const struct si_shader_info *info, unsigned num_input_sgprs, + bool ngg_cull_shader, const struct si_vs_prolog_bits *prolog_key, struct si_shader *shader_out, union si_shader_part_key *key) @@ -2518,6 +2551,9 @@ static void si_get_vs_prolog_key(const struct si_shader_info *info, key->vs_prolog.as_es = shader_out->key.as_es; key->vs_prolog.as_ngg = shader_out->key.as_ngg; + if (!ngg_cull_shader) + key->vs_prolog.has_ngg_cull_inputs = !!shader_out->key.opt.ngg_culling; + if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) { key->vs_prolog.as_ls = 1; key->vs_prolog.num_merged_next_stage_vgprs = 2; @@ -2881,33 +2917,70 @@ int si_compile_shader(struct si_screen *sscreen, shader->info.uses_instanceid = sel->info.uses_instanceid; - if (!si_build_main_function(&ctx, nir, free_nir)) { + LLVMValueRef ngg_cull_main_fn = NULL; + if (ctx.shader->key.opt.ngg_culling) { + if (!si_build_main_function(&ctx, nir, false, true)) { + si_llvm_dispose(&ctx); + return -1; + } + ngg_cull_main_fn = ctx.main_fn; + ctx.main_fn = NULL; + /* Re-set the IR. */ + si_llvm_context_set_ir(&ctx, shader); + } + + if (!si_build_main_function(&ctx, nir, free_nir, false)) { si_llvm_dispose(&ctx); return -1; } if (shader->is_monolithic && ctx.type == PIPE_SHADER_VERTEX) { - LLVMValueRef parts[2]; + LLVMValueRef parts[4]; + unsigned num_parts = 0; bool need_prolog = si_vs_needs_prolog(sel, &shader->key.part.vs.prolog); - - parts[1] = ctx.main_fn; + LLVMValueRef main_fn = ctx.main_fn; + + if (ngg_cull_main_fn) { + if (need_prolog) { + union si_shader_part_key prolog_key; + si_get_vs_prolog_key(&sel->info, + shader->info.num_input_sgprs, + true, + &shader->key.part.vs.prolog, + shader, &prolog_key); + prolog_key.vs_prolog.is_monolithic = true; + si_build_vs_prolog_function(&ctx, &prolog_key); + parts[num_parts++] = ctx.main_fn; + } + parts[num_parts++] = ngg_cull_main_fn; + } if (need_prolog) { union si_shader_part_key prolog_key; si_get_vs_prolog_key(&sel->info, shader->info.num_input_sgprs, + false, &shader->key.part.vs.prolog, shader, &prolog_key); prolog_key.vs_prolog.is_monolithic = true; si_build_vs_prolog_function(&ctx, &prolog_key); - parts[0] = ctx.main_fn; + parts[num_parts++] = ctx.main_fn; } + parts[num_parts++] = main_fn; - si_build_wrapper_function(&ctx, parts + !need_prolog, - 1 + need_prolog, need_prolog, 0); + si_build_wrapper_function(&ctx, parts, num_parts, + need_prolog ? 1 : 0, 0); if (ctx.shader->key.opt.vs_as_prim_discard_cs) si_build_prim_discard_compute_shader(&ctx); + } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_EVAL && + ngg_cull_main_fn) { + LLVMValueRef parts[2]; + + parts[0] = ngg_cull_main_fn; + parts[1] = ctx.main_fn; + + si_build_wrapper_function(&ctx, parts, 2, 0, 0); } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) { if (sscreen->info.chip_class >= GFX9) { struct si_shader_selector *ls = shader->key.part.tcs.ls; @@ -2935,7 +3008,7 @@ int si_compile_shader(struct si_screen *sscreen, shader_ls.is_monolithic = true; si_llvm_context_set_ir(&ctx, &shader_ls); - if (!si_build_main_function(&ctx, nir, free_nir)) { + if (!si_build_main_function(&ctx, nir, free_nir, false)) { si_llvm_dispose(&ctx); return -1; } @@ -2947,6 +3020,7 @@ int si_compile_shader(struct si_screen *sscreen, union si_shader_part_key vs_prolog_key; si_get_vs_prolog_key(&ls->info, shader_ls.info.num_input_sgprs, + false, &shader->key.part.tcs.ls_prolog, shader, &vs_prolog_key); vs_prolog_key.vs_prolog.is_monolithic = true; @@ -3003,7 +3077,7 @@ int si_compile_shader(struct si_screen *sscreen, shader_es.is_monolithic = true; si_llvm_context_set_ir(&ctx, &shader_es); - if (!si_build_main_function(&ctx, nir, free_nir)) { + if (!si_build_main_function(&ctx, nir, free_nir, false)) { si_llvm_dispose(&ctx); return -1; } @@ -3016,6 +3090,7 @@ int si_compile_shader(struct si_screen *sscreen, union si_shader_part_key vs_prolog_key; si_get_vs_prolog_key(&es->info, shader_es.info.num_input_sgprs, + false, &shader->key.part.gs.vs_prolog, shader, &vs_prolog_key); vs_prolog_key.vs_prolog.is_monolithic = true; @@ -3249,10 +3324,11 @@ static void si_build_vs_prolog_function(struct si_shader_context *ctx, LLVMValueRef ret, func; int num_returns, i; unsigned first_vs_vgpr = key->vs_prolog.num_merged_next_stage_vgprs; - unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4; + unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4 + + (key->vs_prolog.has_ngg_cull_inputs ? 1 : 0); struct ac_arg input_sgpr_param[key->vs_prolog.num_input_sgprs]; - struct ac_arg input_vgpr_param[9]; - LLVMValueRef input_vgprs[9]; + struct ac_arg input_vgpr_param[13]; + LLVMValueRef input_vgprs[13]; unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs + num_input_vgprs; unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0; @@ -3427,7 +3503,7 @@ static bool si_get_vs_prolog(struct si_screen *sscreen, /* Get the prolog. */ union si_shader_part_key prolog_key; - si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs, + si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs, false, key, shader, &prolog_key); shader->prolog = diff --git a/src/gallium/drivers/radeonsi/si_shader.h b/src/gallium/drivers/radeonsi/si_shader.h index 295db469be3..ee1ca9cda1d 100644 --- a/src/gallium/drivers/radeonsi/si_shader.h +++ b/src/gallium/drivers/radeonsi/si_shader.h @@ -157,6 +157,8 @@ struct si_context; */ #define SI_MAX_IO_GENERIC 32 +#define SI_NGG_PRIM_EDGE_FLAG_BITS ((1 << 9) | (1 << 19) | (1 << 29)) + /* SGPR user data indices */ enum { SI_SGPR_RW_BUFFERS, /* rings (& stream-out, VS only) */ @@ -254,6 +256,8 @@ enum { #define C_VS_STATE_PROVOKING_VTX_INDEX 0xFFFFFFCF #define S_VS_STATE_STREAMOUT_QUERY_ENABLED(x) (((unsigned)(x) & 0x1) << 6) #define C_VS_STATE_STREAMOUT_QUERY_ENABLED 0xFFFFFFBF +#define S_VS_STATE_SMALL_PRIM_PRECISION(x) (((unsigned)(x) & 0xF) << 7) +#define C_VS_STATE_SMALL_PRIM_PRECISION 0xFFFFF87F #define S_VS_STATE_LS_OUT_PATCH_SIZE(x) (((unsigned)(x) & 0x1FFF) << 11) #define C_VS_STATE_LS_OUT_PATCH_SIZE 0xFF0007FF #define S_VS_STATE_LS_OUT_VERTEX_SIZE(x) (((unsigned)(x) & 0xFF) << 24) @@ -269,6 +273,10 @@ enum { SI_VS_BLIT_SGPRS_POS_TEXCOORD = 9, }; +#define SI_NGG_CULL_VIEW_SMALLPRIMS (1 << 0) /* view.xy + small prims */ +#define SI_NGG_CULL_BACK_FACE (1 << 1) /* back faces */ +#define SI_NGG_CULL_FRONT_FACE (1 << 2) /* front faces */ + /** * For VS shader keys, describe any fixups required for vertex fetch. * @@ -425,6 +433,7 @@ struct si_shader_selector { bool vs_needs_prolog; bool force_correct_derivs_after_kill; bool prim_discard_cs_allowed; + bool ngg_culling_allowed; unsigned num_vs_inputs; unsigned num_vbos_in_user_sgprs; unsigned pa_cl_vs_out_cntl; @@ -554,6 +563,7 @@ union si_shader_part_key { unsigned as_ls:1; unsigned as_es:1; unsigned as_ngg:1; + unsigned has_ngg_cull_inputs:1; /* from the NGG cull shader */ /* Prologs for monolithic shaders shouldn't set EXEC. */ unsigned is_monolithic:1; } vs_prolog; @@ -644,6 +654,9 @@ struct si_shader_key { uint64_t kill_outputs; /* "get_unique_index" bits */ unsigned clip_disable:1; + /* For NGG VS and TES. */ + unsigned ngg_culling:3; /* SI_NGG_CULL_* */ + /* For shaders where monolithic variants have better code. * * This is a flag that has no effect on code generation, @@ -883,6 +896,7 @@ gfx10_is_ngg_passthrough(struct si_shader *shader) return sel->type != PIPE_SHADER_GEOMETRY && !sel->so.num_outputs && !sel->info.writes_edgeflag && + !shader->key.opt.ngg_culling && (sel->type != PIPE_SHADER_VERTEX || !shader->key.mono.u.vs_export_prim_id); } diff --git a/src/gallium/drivers/radeonsi/si_shader_internal.h b/src/gallium/drivers/radeonsi/si_shader_internal.h index 6509edb8181..492ebaa744b 100644 --- a/src/gallium/drivers/radeonsi/si_shader_internal.h +++ b/src/gallium/drivers/radeonsi/si_shader_internal.h @@ -83,6 +83,7 @@ struct si_shader_context { /* Common inputs for merged shaders. */ struct ac_arg merged_wave_info; struct ac_arg merged_scratch_offset; + struct ac_arg small_prim_cull_info; /* API VS */ struct ac_arg vertex_buffers; struct ac_arg vb_descriptors[5]; @@ -95,6 +96,13 @@ struct si_shader_context { * [2:3] = NGG: output primitive type * [4:5] = NGG: provoking vertex index * [6] = NGG: streamout queries enabled + * [7:10] = NGG: small prim filter precision = num_samples / quant_mode, + * but in reality it's: 1/2^n, from 1/16 to 1/4096 = 1/2^4 to 1/2^12 + * Only the first 4 bits of the exponent are stored. + * Set it like this: (fui(num_samples / quant_mode) >> 23) + * Expand to FP32 like this: ((0x70 | value) << 23); + * With 0x70 = 112, we get 2^(112 + value - 127) = 2^(value - 15) + * = 1/2^(15 - value) in FP32 * [11:23] = stride between patches in DW = num_inputs * num_vertices * 4 * max = 32*32*4 + 32*4 * [24:31] = stride between vertices in DW = num_inputs * 4 @@ -102,6 +110,7 @@ struct si_shader_context { */ struct ac_arg vs_state_bits; struct ac_arg vs_blit_inputs; + struct ac_arg ngg_old_thread_id; /* generated by the NGG cull shader */ /* HW VS */ struct ac_arg streamout_config; struct ac_arg streamout_write_index; @@ -297,12 +306,16 @@ void si_fix_resource_usage(struct si_screen *sscreen, struct si_shader *shader); void si_llvm_emit_streamout(struct si_shader_context *ctx, struct si_shader_output_values *outputs, unsigned noutput, unsigned stream); -void si_create_function(struct si_shader_context *ctx); +void si_create_function(struct si_shader_context *ctx, bool ngg_cull_shader); bool gfx10_ngg_export_prim_early(struct si_shader *shader); void gfx10_ngg_build_sendmsg_gs_alloc_req(struct si_shader_context *ctx); void gfx10_ngg_build_export_prim(struct si_shader_context *ctx, - LLVMValueRef user_edgeflags[3]); + LLVMValueRef user_edgeflags[3], + LLVMValueRef prim_passthrough); +void gfx10_emit_ngg_culling_epilogue_4x_wave32(struct ac_shader_abi *abi, + unsigned max_outputs, + LLVMValueRef *addrs); void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi, unsigned max_outputs, LLVMValueRef *addrs); diff --git a/src/gallium/drivers/radeonsi/si_shader_llvm_gs.c b/src/gallium/drivers/radeonsi/si_shader_llvm_gs.c index a622ca5b0d2..7f47f31bcdf 100644 --- a/src/gallium/drivers/radeonsi/si_shader_llvm_gs.c +++ b/src/gallium/drivers/radeonsi/si_shader_llvm_gs.c @@ -529,7 +529,7 @@ si_generate_gs_copy_shader(struct si_screen *sscreen, builder = ctx.ac.builder; - si_create_function(&ctx); + si_create_function(&ctx, false); LLVMValueRef buf_ptr = ac_get_arg(&ctx.ac, ctx.rw_buffers); ctx.gsvs_ring[0] = ac_build_load_to_sgpr(&ctx.ac, buf_ptr, diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c index c811f76e6d8..8cd56fd49d6 100644 --- a/src/gallium/drivers/radeonsi/si_state.c +++ b/src/gallium/drivers/radeonsi/si_state.c @@ -3101,6 +3101,7 @@ static void si_set_framebuffer_state(struct pipe_context *ctx, si_update_ps_colorbuf0_slot(sctx); si_update_poly_offset_state(sctx); + si_update_ngg_small_prim_precision(sctx); si_mark_atom_dirty(sctx, &sctx->atoms.s.cb_render_state); si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer); diff --git a/src/gallium/drivers/radeonsi/si_state_draw.c b/src/gallium/drivers/radeonsi/si_state_draw.c index 80f5f7c943c..7f7398ff7f5 100644 --- a/src/gallium/drivers/radeonsi/si_state_draw.c +++ b/src/gallium/drivers/radeonsi/si_state_draw.c @@ -2038,6 +2038,45 @@ static void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *i sctx->do_update_shaders = true; } + /* Update NGG culling settings. */ + if (sctx->ngg && + rast_prim == PIPE_PRIM_TRIANGLES && + (sctx->screen->always_use_ngg_culling || + /* At least 1500 non-indexed triangles (4500 vertices) are needed + * per draw call (no TES/GS) to enable NGG culling. Triangle strips + * don't need this, because they have good reuse and therefore + * perform the same as indexed triangles. + */ + (!index_size && prim == PIPE_PRIM_TRIANGLES && direct_count > 4500 && + !sctx->tes_shader.cso && !sctx->gs_shader.cso)) && + si_get_vs(sctx)->cso->ngg_culling_allowed) { + unsigned ngg_culling = 0; + + if (rs->rasterizer_discard) { + ngg_culling |= SI_NGG_CULL_FRONT_FACE | + SI_NGG_CULL_BACK_FACE; + } else { + /* Polygon mode can't use view and small primitive culling, + * because it draws points or lines where the culling depends + * on the point or line width. + */ + if (!rs->polygon_mode_enabled) + ngg_culling |= SI_NGG_CULL_VIEW_SMALLPRIMS; + + if (sctx->viewports.y_inverted ? rs->cull_back : rs->cull_front) + ngg_culling |= SI_NGG_CULL_FRONT_FACE; + if (sctx->viewports.y_inverted ? rs->cull_front : rs->cull_back) + ngg_culling |= SI_NGG_CULL_BACK_FACE; + } + if (ngg_culling != sctx->ngg_culling) { + sctx->ngg_culling = ngg_culling; + sctx->do_update_shaders = true; + } + } else if (sctx->ngg_culling) { + sctx->ngg_culling = false; + sctx->do_update_shaders = true; + } + if (sctx->do_update_shaders && !si_update_shaders(sctx)) goto return_cleanup; diff --git a/src/gallium/drivers/radeonsi/si_state_shaders.c b/src/gallium/drivers/radeonsi/si_state_shaders.c index e5ae110fa82..36dbfe9df6f 100644 --- a/src/gallium/drivers/radeonsi/si_state_shaders.c +++ b/src/gallium/drivers/radeonsi/si_state_shaders.c @@ -1272,8 +1272,23 @@ static void gfx10_shader_ngg(struct si_screen *sscreen, struct si_shader *shader shader->ctx_reg.ngg.pa_cl_ngg_cntl = S_028838_INDEX_BUF_EDGE_FLAG_ENA(gs_type == PIPE_SHADER_VERTEX); shader->pa_cl_vs_out_cntl = si_get_vs_out_cntl(gs_sel, true); + + /* Oversubscribe PC. This improves performance when there are too many varyings. */ + float oversub_pc_factor = 0.25; + + if (shader->key.opt.ngg_culling) { + /* Be more aggressive with NGG culling. */ + if (shader->info.nr_param_exports > 4) + oversub_pc_factor = 1; + else if (shader->info.nr_param_exports > 2) + oversub_pc_factor = 0.75; + else + oversub_pc_factor = 0.5; + } + + unsigned oversub_pc_lines = sscreen->info.pc_lines * oversub_pc_factor; shader->ctx_reg.ngg.ge_pc_alloc = S_030980_OVERSUB_EN(1) | - S_030980_NUM_PC_LINES(sscreen->info.pc_lines / 4 - 1); + S_030980_NUM_PC_LINES(oversub_pc_lines - 1); shader->ge_cntl = S_03096C_PRIM_GRP_SIZE(shader->ngg.max_gsprims) | @@ -1874,6 +1889,7 @@ static void si_shader_selector_key_hw_vs(struct si_context *sctx, uint64_t linked = outputs_written & inputs_read; key->opt.kill_outputs = ~linked & outputs_written; + key->opt.ngg_culling = sctx->ngg_culling; } /* Compute the key for the hw shader variant */ @@ -2918,6 +2934,20 @@ static void *si_create_shader_selector(struct pipe_context *ctx, default:; } + sel->ngg_culling_allowed = + sscreen->info.chip_class == GFX10 && + sscreen->info.has_dedicated_vram && + sscreen->use_ngg_culling && + /* Disallow TES by default, because TessMark results are mixed. */ + (sel->type == PIPE_SHADER_VERTEX || + (sscreen->always_use_ngg_culling && sel->type == PIPE_SHADER_TESS_EVAL)) && + sel->info.writes_position && + !sel->info.writes_viewport_index && /* cull only against viewport 0 */ + !sel->info.writes_memory && + !sel->so.num_outputs && + !sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD] && + !sel->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION]; + /* PA_CL_VS_OUT_CNTL */ if (sctx->chip_class <= GFX9) sel->pa_cl_vs_out_cntl = si_get_vs_out_cntl(sel, false); diff --git a/src/gallium/drivers/radeonsi/si_state_viewport.c b/src/gallium/drivers/radeonsi/si_state_viewport.c index e0d81078139..682f00d44a8 100644 --- a/src/gallium/drivers/radeonsi/si_state_viewport.c +++ b/src/gallium/drivers/radeonsi/si_state_viewport.c @@ -23,10 +23,32 @@ */ #include "si_build_pm4.h" +#include "util/u_upload_mgr.h" #include "util/u_viewport.h" #define SI_MAX_SCISSOR 16384 +void si_update_ngg_small_prim_precision(struct si_context *ctx) +{ + if (!ctx->screen->use_ngg_culling) + return; + + /* Set VS_STATE.SMALL_PRIM_PRECISION for NGG culling. */ + unsigned num_samples = ctx->framebuffer.nr_samples; + unsigned quant_mode = ctx->viewports.as_scissor[0].quant_mode; + float precision; + + if (quant_mode == SI_QUANT_MODE_12_12_FIXED_POINT_1_4096TH) + precision = num_samples / 4096.0; + else if (quant_mode == SI_QUANT_MODE_14_10_FIXED_POINT_1_1024TH) + precision = num_samples / 1024.0; + else + precision = num_samples / 256.0; + + ctx->current_vs_state &= C_VS_STATE_SMALL_PRIM_PRECISION; + ctx->current_vs_state |= S_VS_STATE_SMALL_PRIM_PRECISION(fui(precision) >> 23); +} + void si_get_small_prim_cull_info(struct si_context *sctx, struct si_small_prim_cull_info *out) { @@ -321,6 +343,8 @@ static void si_emit_guardband(struct si_context *ctx) vp_as_scissor.quant_mode)); if (initial_cdw != ctx->gfx_cs->current.cdw) ctx->context_roll = true; + + si_update_ngg_small_prim_precision(ctx); } static void si_emit_scissors(struct si_context *ctx) @@ -448,6 +472,35 @@ static void si_emit_viewports(struct si_context *ctx) struct radeon_cmdbuf *cs = ctx->gfx_cs; struct pipe_viewport_state *states = ctx->viewports.states; + if (ctx->screen->use_ngg_culling) { + /* Set the viewport info for small primitive culling. */ + struct si_small_prim_cull_info info; + si_get_small_prim_cull_info(ctx, &info); + + if (memcmp(&info, &ctx->last_small_prim_cull_info, sizeof(info))) { + unsigned offset = 0; + + /* Align to 256, because the address is shifted by 8 bits. */ + u_upload_data(ctx->b.const_uploader, 0, sizeof(info), 256, + &info, &offset, + (struct pipe_resource**)&ctx->small_prim_cull_info_buf); + + ctx->small_prim_cull_info_address = + ctx->small_prim_cull_info_buf->gpu_address + offset; + ctx->last_small_prim_cull_info = info; + ctx->small_prim_cull_info_dirty = true; + } + + if (ctx->small_prim_cull_info_dirty) { + /* This will end up in SGPR6 as (value << 8), shifted by the hw. */ + radeon_add_to_buffer_list(ctx, ctx->gfx_cs, ctx->small_prim_cull_info_buf, + RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER); + radeon_set_sh_reg(ctx->gfx_cs, R_00B220_SPI_SHADER_PGM_LO_GS, + ctx->small_prim_cull_info_address >> 8); + ctx->small_prim_cull_info_dirty = false; + } + } + /* The simple case: Only 1 viewport is active. */ if (!ctx->vs_writes_viewport_index) { radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE, 6); |