1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
|
/*
* Copyright 2016 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef SI_SHADER_PRIVATE_H
#define SI_SHADER_PRIVATE_H
#include "si_shader.h"
#include "ac_shader_abi.h"
struct pipe_debug_callback;
#define RADEON_LLVM_MAX_INPUTS 32 * 4
/* Ideally pass the sample mask input to the PS epilog as v14, which
* is its usual location, so that the shader doesn't have to add v_mov.
*/
#define PS_EPILOG_SAMPLEMASK_MIN_LOC 14
struct si_shader_output_values {
LLVMValueRef values[4];
unsigned semantic_name;
unsigned semantic_index;
ubyte vertex_stream[4];
};
struct si_shader_context {
struct ac_llvm_context ac;
struct si_shader *shader;
struct si_screen *screen;
unsigned type; /* PIPE_SHADER_* specifies the type of shader. */
/* For clamping the non-constant index in resource indexing: */
unsigned num_const_buffers;
unsigned num_shader_buffers;
unsigned num_images;
unsigned num_samplers;
struct ac_shader_args args;
struct ac_shader_abi abi;
LLVMValueRef inputs[RADEON_LLVM_MAX_INPUTS];
LLVMBasicBlockRef merged_wrap_if_entry_block;
int merged_wrap_if_label;
LLVMValueRef main_fn;
LLVMTypeRef return_type;
struct ac_arg const_and_shader_buffers;
struct ac_arg samplers_and_images;
/* For merged shaders, the per-stage descriptors for the stage other
* than the one we're processing, used to pass them through from the
* first stage to the second.
*/
struct ac_arg other_const_and_shader_buffers;
struct ac_arg other_samplers_and_images;
struct ac_arg rw_buffers;
struct ac_arg bindless_samplers_and_images;
/* Common inputs for merged shaders. */
struct ac_arg merged_wave_info;
struct ac_arg merged_scratch_offset;
struct ac_arg small_prim_cull_info;
/* API VS */
struct ac_arg vertex_buffers;
struct ac_arg vb_descriptors[5];
struct ac_arg rel_auto_id;
struct ac_arg vs_prim_id;
struct ac_arg vertex_index0;
/* VS states and layout of LS outputs / TCS inputs at the end
* [0] = clamp vertex color
* [1] = indexed
* [2:3] = NGG: output primitive type
* [4:5] = NGG: provoking vertex index
* [6] = NGG: streamout queries enabled
* [7:10] = NGG: small prim filter precision = num_samples / quant_mode,
* but in reality it's: 1/2^n, from 1/16 to 1/4096 = 1/2^4 to 1/2^12
* Only the first 4 bits of the exponent are stored.
* Set it like this: (fui(num_samples / quant_mode) >> 23)
* Expand to FP32 like this: ((0x70 | value) << 23);
* With 0x70 = 112, we get 2^(112 + value - 127) = 2^(value - 15)
* = 1/2^(15 - value) in FP32
* [11:23] = stride between patches in DW = num_inputs * num_vertices * 4
* max = 32*32*4 + 32*4
* [24:31] = stride between vertices in DW = num_inputs * 4
* max = 32*4
*/
struct ac_arg vs_state_bits;
struct ac_arg vs_blit_inputs;
struct ac_arg ngg_old_thread_id; /* generated by the NGG cull shader */
/* HW VS */
struct ac_arg streamout_config;
struct ac_arg streamout_write_index;
struct ac_arg streamout_offset[4];
/* API TCS & TES */
/* Layout of TCS outputs in the offchip buffer
* # 6 bits
* [0:5] = the number of patches per threadgroup, max = NUM_PATCHES (40)
* # 6 bits
* [6:11] = the number of output vertices per patch, max = 32
* # 20 bits
* [12:31] = the offset of per patch attributes in the buffer in bytes.
* max = NUM_PATCHES*32*32*16
*/
struct ac_arg tcs_offchip_layout;
/* API TCS */
/* Offsets where TCS outputs and TCS patch outputs live in LDS:
* [0:15] = TCS output patch0 offset / 16, max = NUM_PATCHES * 32 * 32
* [16:31] = TCS output patch0 offset for per-patch / 16
* max = (NUM_PATCHES + 1) * 32*32
*/
struct ac_arg tcs_out_lds_offsets;
/* Layout of TCS outputs / TES inputs:
* [0:12] = stride between output patches in DW, num_outputs * num_vertices * 4
* max = 32*32*4 + 32*4
* [13:18] = gl_PatchVerticesIn, max = 32
* [19:31] = high 13 bits of the 32-bit address of tessellation ring buffers
*/
struct ac_arg tcs_out_lds_layout;
struct ac_arg tcs_offchip_offset;
struct ac_arg tcs_factor_offset;
/* API TES */
struct ac_arg tes_offchip_addr;
struct ac_arg tes_u;
struct ac_arg tes_v;
struct ac_arg tes_rel_patch_id;
/* HW ES */
struct ac_arg es2gs_offset;
/* HW GS */
/* On gfx10:
* - bits 0..11: ordered_wave_id
* - bits 12..20: number of vertices in group
* - bits 22..30: number of primitives in group
*/
struct ac_arg gs_tg_info;
/* API GS */
struct ac_arg gs2vs_offset;
struct ac_arg gs_wave_id; /* GFX6 */
struct ac_arg gs_vtx_offset[6]; /* in dwords (GFX6) */
struct ac_arg gs_vtx01_offset; /* in dwords (GFX9) */
struct ac_arg gs_vtx23_offset; /* in dwords (GFX9) */
struct ac_arg gs_vtx45_offset; /* in dwords (GFX9) */
/* PS */
struct ac_arg pos_fixed_pt;
/* CS */
struct ac_arg block_size;
struct ac_arg cs_user_data;
struct ac_llvm_compiler *compiler;
/* Preloaded descriptors. */
LLVMValueRef esgs_ring;
LLVMValueRef gsvs_ring[4];
LLVMValueRef tess_offchip_ring;
LLVMValueRef invoc0_tess_factors[6]; /* outer[4], inner[2] */
LLVMValueRef gs_next_vertex[4];
LLVMValueRef gs_curprim_verts[4];
LLVMValueRef gs_generated_prims[4];
LLVMValueRef gs_ngg_emit;
LLVMValueRef gs_ngg_scratch;
LLVMValueRef postponed_kill;
LLVMValueRef return_value;
};
static inline struct si_shader_context *
si_shader_context_from_abi(struct ac_shader_abi *abi)
{
struct si_shader_context *ctx = NULL;
return container_of(abi, ctx, abi);
}
void si_llvm_context_init(struct si_shader_context *ctx,
struct si_screen *sscreen,
struct ac_llvm_compiler *compiler,
unsigned wave_size);
void si_llvm_context_set_ir(struct si_shader_context *ctx,
struct si_shader *shader);
void si_llvm_create_func(struct si_shader_context *ctx, const char *name,
LLVMTypeRef *return_types, unsigned num_return_elems,
unsigned max_workgroup_size);
void si_llvm_dispose(struct si_shader_context *ctx);
void si_llvm_optimize_module(struct si_shader_context *ctx);
LLVMValueRef si_nir_load_input_tes(struct ac_shader_abi *abi,
LLVMTypeRef type,
LLVMValueRef vertex_index,
LLVMValueRef param_index,
unsigned const_index,
unsigned location,
unsigned driver_location,
unsigned component,
unsigned num_components,
bool is_patch,
bool is_compact,
bool load_input);
bool si_is_merged_shader(struct si_shader_context *ctx);
LLVMValueRef si_get_sample_id(struct si_shader_context *ctx);
LLVMValueRef si_buffer_load_const(struct si_shader_context *ctx,
LLVMValueRef resource, LLVMValueRef offset);
void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret);
LLVMValueRef si_prolog_get_rw_buffers(struct si_shader_context *ctx);
LLVMValueRef si_build_gather_64bit(struct si_shader_context *ctx,
LLVMTypeRef type, LLVMValueRef val1,
LLVMValueRef val2);
void si_llvm_emit_barrier(struct si_shader_context *ctx);
void si_llvm_declare_esgs_ring(struct si_shader_context *ctx);
void si_declare_compute_memory(struct si_shader_context *ctx);
LLVMValueRef si_get_primitive_id(struct si_shader_context *ctx,
unsigned swizzle);
void si_llvm_export_vs(struct si_shader_context *ctx,
struct si_shader_output_values *outputs,
unsigned noutput);
void si_emit_streamout_output(struct si_shader_context *ctx,
LLVMValueRef const *so_buffers,
LLVMValueRef const *so_write_offsets,
struct pipe_stream_output *stream_out,
struct si_shader_output_values *shader_out);
void si_add_arg_checked(struct ac_shader_args *args,
enum ac_arg_regfile file,
unsigned registers, enum ac_arg_type type,
struct ac_arg *arg,
unsigned idx);
void si_llvm_load_input_vs(
struct si_shader_context *ctx,
unsigned input_index,
LLVMValueRef out[4]);
bool si_nir_build_llvm(struct si_shader_context *ctx, struct nir_shader *nir);
LLVMValueRef si_unpack_param(struct si_shader_context *ctx,
struct ac_arg param, unsigned rshift,
unsigned bitwidth);
void si_build_wrapper_function(struct si_shader_context *ctx, LLVMValueRef *parts,
unsigned num_parts, unsigned main_part,
unsigned next_shader_first_part);
bool si_need_ps_prolog(const union si_shader_part_key *key);
void si_get_ps_prolog_key(struct si_shader *shader,
union si_shader_part_key *key,
bool separate_prolog);
void si_get_ps_epilog_key(struct si_shader *shader,
union si_shader_part_key *key);
LLVMValueRef si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
struct ac_arg param, unsigned return_index);
LLVMValueRef si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
struct ac_arg param, unsigned return_index);
LLVMValueRef si_insert_input_ptr(struct si_shader_context *ctx, LLVMValueRef ret,
struct ac_arg param, unsigned return_index);
int si_compile_llvm(struct si_screen *sscreen,
struct si_shader_binary *binary,
struct ac_shader_config *conf,
struct ac_llvm_compiler *compiler,
struct ac_llvm_context *ac,
struct pipe_debug_callback *debug,
enum pipe_shader_type shader_type,
const char *name,
bool less_optimized);
void si_fix_resource_usage(struct si_screen *sscreen, struct si_shader *shader);
void si_llvm_emit_streamout(struct si_shader_context *ctx,
struct si_shader_output_values *outputs,
unsigned noutput, unsigned stream);
void si_create_function(struct si_shader_context *ctx, bool ngg_cull_shader);
bool gfx10_ngg_export_prim_early(struct si_shader *shader);
void gfx10_ngg_build_sendmsg_gs_alloc_req(struct si_shader_context *ctx);
void gfx10_ngg_build_export_prim(struct si_shader_context *ctx,
LLVMValueRef user_edgeflags[3],
LLVMValueRef prim_passthrough);
void gfx10_emit_ngg_culling_epilogue_4x_wave32(struct ac_shader_abi *abi,
unsigned max_outputs,
LLVMValueRef *addrs);
void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi,
unsigned max_outputs,
LLVMValueRef *addrs);
void gfx10_ngg_gs_emit_vertex(struct si_shader_context *ctx,
unsigned stream,
LLVMValueRef *addrs);
void gfx10_ngg_gs_emit_prologue(struct si_shader_context *ctx);
void gfx10_ngg_gs_emit_epilogue(struct si_shader_context *ctx);
void gfx10_ngg_calculate_subgroup_info(struct si_shader *shader);
/* si_shader_llvm_gs.c */
LLVMValueRef si_is_es_thread(struct si_shader_context *ctx);
LLVMValueRef si_is_gs_thread(struct si_shader_context *ctx);
void si_llvm_emit_es_epilogue(struct ac_shader_abi *abi, unsigned max_outputs,
LLVMValueRef *addrs);
void si_preload_esgs_ring(struct si_shader_context *ctx);
void si_preload_gs_rings(struct si_shader_context *ctx);
void si_llvm_build_gs_prolog(struct si_shader_context *ctx,
union si_shader_part_key *key);
void si_llvm_init_gs_callbacks(struct si_shader_context *ctx);
/* si_shader_llvm_tess.c */
void si_llvm_preload_tes_rings(struct si_shader_context *ctx);
void si_llvm_emit_ls_epilogue(struct ac_shader_abi *abi, unsigned max_outputs,
LLVMValueRef *addrs);
void si_llvm_build_tcs_epilog(struct si_shader_context *ctx,
union si_shader_part_key *key);
void si_llvm_init_tcs_callbacks(struct si_shader_context *ctx);
void si_llvm_init_tes_callbacks(struct si_shader_context *ctx);
/* si_shader_llvm_ps.c */
void si_llvm_build_ps_prolog(struct si_shader_context *ctx,
union si_shader_part_key *key);
void si_llvm_build_ps_epilog(struct si_shader_context *ctx,
union si_shader_part_key *key);
void si_llvm_build_monolithic_ps(struct si_shader_context *ctx,
struct si_shader *shader);
void si_llvm_init_ps_callbacks(struct si_shader_context *ctx);
/* si_shader_llvm_resources.c */
void si_llvm_init_resource_callbacks(struct si_shader_context *ctx);
#endif
|