1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
|
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include "ac_shader_util.h"
#include "sid.h"
unsigned
ac_get_spi_shader_z_format(bool writes_z, bool writes_stencil,
bool writes_samplemask)
{
if (writes_z) {
/* Z needs 32 bits. */
if (writes_samplemask)
return V_028710_SPI_SHADER_32_ABGR;
else if (writes_stencil)
return V_028710_SPI_SHADER_32_GR;
else
return V_028710_SPI_SHADER_32_R;
} else if (writes_stencil || writes_samplemask) {
/* Both stencil and sample mask need only 16 bits. */
return V_028710_SPI_SHADER_UINT16_ABGR;
} else {
return V_028710_SPI_SHADER_ZERO;
}
}
unsigned
ac_get_cb_shader_mask(unsigned spi_shader_col_format)
{
unsigned i, cb_shader_mask = 0;
for (i = 0; i < 8; i++) {
switch ((spi_shader_col_format >> (i * 4)) & 0xf) {
case V_028714_SPI_SHADER_ZERO:
break;
case V_028714_SPI_SHADER_32_R:
cb_shader_mask |= 0x1 << (i * 4);
break;
case V_028714_SPI_SHADER_32_GR:
cb_shader_mask |= 0x3 << (i * 4);
break;
case V_028714_SPI_SHADER_32_AR:
cb_shader_mask |= 0x9 << (i * 4);
break;
case V_028714_SPI_SHADER_FP16_ABGR:
case V_028714_SPI_SHADER_UNORM16_ABGR:
case V_028714_SPI_SHADER_SNORM16_ABGR:
case V_028714_SPI_SHADER_UINT16_ABGR:
case V_028714_SPI_SHADER_SINT16_ABGR:
case V_028714_SPI_SHADER_32_ABGR:
cb_shader_mask |= 0xf << (i * 4);
break;
default:
assert(0);
}
}
return cb_shader_mask;
}
/**
* Calculate the appropriate setting of VGT_GS_MODE when \p shader is a
* geometry shader.
*/
uint32_t
ac_vgt_gs_mode(unsigned gs_max_vert_out, enum chip_class chip_class)
{
unsigned cut_mode;
if (gs_max_vert_out <= 128) {
cut_mode = V_028A40_GS_CUT_128;
} else if (gs_max_vert_out <= 256) {
cut_mode = V_028A40_GS_CUT_256;
} else if (gs_max_vert_out <= 512) {
cut_mode = V_028A40_GS_CUT_512;
} else {
assert(gs_max_vert_out <= 1024);
cut_mode = V_028A40_GS_CUT_1024;
}
return S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
S_028A40_CUT_MODE(cut_mode)|
S_028A40_ES_WRITE_OPTIMIZE(chip_class <= GFX8) |
S_028A40_GS_WRITE_OPTIMIZE(1) |
S_028A40_ONCHIP(chip_class >= GFX9 ? 1 : 0);
}
/// Translate a (dfmt, nfmt) pair into a chip-appropriate combined format
/// value for LLVM8+ tbuffer intrinsics.
unsigned
ac_get_tbuffer_format(enum chip_class chip_class,
unsigned dfmt, unsigned nfmt)
{
// Some games try to access vertex buffers without a valid format.
// This is a game bug, but we should still handle it gracefully.
if (dfmt == V_008F0C_IMG_FORMAT_INVALID)
return V_008F0C_IMG_FORMAT_INVALID;
if (chip_class >= GFX10) {
unsigned format;
switch (dfmt) {
default: unreachable("bad dfmt");
case V_008F0C_BUF_DATA_FORMAT_INVALID: format = V_008F0C_IMG_FORMAT_INVALID; break;
case V_008F0C_BUF_DATA_FORMAT_8: format = V_008F0C_IMG_FORMAT_8_UINT; break;
case V_008F0C_BUF_DATA_FORMAT_8_8: format = V_008F0C_IMG_FORMAT_8_8_UINT; break;
case V_008F0C_BUF_DATA_FORMAT_8_8_8_8: format = V_008F0C_IMG_FORMAT_8_8_8_8_UINT; break;
case V_008F0C_BUF_DATA_FORMAT_16: format = V_008F0C_IMG_FORMAT_16_UINT; break;
case V_008F0C_BUF_DATA_FORMAT_16_16: format = V_008F0C_IMG_FORMAT_16_16_UINT; break;
case V_008F0C_BUF_DATA_FORMAT_16_16_16_16: format = V_008F0C_IMG_FORMAT_16_16_16_16_UINT; break;
case V_008F0C_BUF_DATA_FORMAT_32: format = V_008F0C_IMG_FORMAT_32_UINT; break;
case V_008F0C_BUF_DATA_FORMAT_32_32: format = V_008F0C_IMG_FORMAT_32_32_UINT; break;
case V_008F0C_BUF_DATA_FORMAT_32_32_32: format = V_008F0C_IMG_FORMAT_32_32_32_UINT; break;
case V_008F0C_BUF_DATA_FORMAT_32_32_32_32: format = V_008F0C_IMG_FORMAT_32_32_32_32_UINT; break;
case V_008F0C_BUF_DATA_FORMAT_2_10_10_10: format = V_008F0C_IMG_FORMAT_2_10_10_10_UINT; break;
}
// Use the regularity properties of the combined format enum.
//
// Note: float is incompatible with 8-bit data formats,
// [us]{norm,scaled} are incomparible with 32-bit data formats.
// [us]scaled are not writable.
switch (nfmt) {
case V_008F0C_BUF_NUM_FORMAT_UNORM: format -= 4; break;
case V_008F0C_BUF_NUM_FORMAT_SNORM: format -= 3; break;
case V_008F0C_BUF_NUM_FORMAT_USCALED: format -= 2; break;
case V_008F0C_BUF_NUM_FORMAT_SSCALED: format -= 1; break;
default: unreachable("bad nfmt");
case V_008F0C_BUF_NUM_FORMAT_UINT: break;
case V_008F0C_BUF_NUM_FORMAT_SINT: format += 1; break;
case V_008F0C_BUF_NUM_FORMAT_FLOAT: format += 2; break;
}
return format;
} else {
return dfmt | (nfmt << 4);
}
}
static const struct ac_data_format_info data_format_table[] = {
[V_008F0C_BUF_DATA_FORMAT_INVALID] = { 0, 4, 0, V_008F0C_BUF_DATA_FORMAT_INVALID },
[V_008F0C_BUF_DATA_FORMAT_8] = { 1, 1, 1, V_008F0C_BUF_DATA_FORMAT_8 },
[V_008F0C_BUF_DATA_FORMAT_16] = { 2, 1, 2, V_008F0C_BUF_DATA_FORMAT_16 },
[V_008F0C_BUF_DATA_FORMAT_8_8] = { 2, 2, 1, V_008F0C_BUF_DATA_FORMAT_8 },
[V_008F0C_BUF_DATA_FORMAT_32] = { 4, 1, 4, V_008F0C_BUF_DATA_FORMAT_32 },
[V_008F0C_BUF_DATA_FORMAT_16_16] = { 4, 2, 2, V_008F0C_BUF_DATA_FORMAT_16 },
[V_008F0C_BUF_DATA_FORMAT_10_11_11] = { 4, 3, 0, V_008F0C_BUF_DATA_FORMAT_10_11_11 },
[V_008F0C_BUF_DATA_FORMAT_11_11_10] = { 4, 3, 0, V_008F0C_BUF_DATA_FORMAT_11_11_10 },
[V_008F0C_BUF_DATA_FORMAT_10_10_10_2] = { 4, 4, 0, V_008F0C_BUF_DATA_FORMAT_10_10_10_2 },
[V_008F0C_BUF_DATA_FORMAT_2_10_10_10] = { 4, 4, 0, V_008F0C_BUF_DATA_FORMAT_2_10_10_10 },
[V_008F0C_BUF_DATA_FORMAT_8_8_8_8] = { 4, 4, 1, V_008F0C_BUF_DATA_FORMAT_8 },
[V_008F0C_BUF_DATA_FORMAT_32_32] = { 8, 2, 4, V_008F0C_BUF_DATA_FORMAT_32 },
[V_008F0C_BUF_DATA_FORMAT_16_16_16_16] = { 8, 4, 2, V_008F0C_BUF_DATA_FORMAT_16 },
[V_008F0C_BUF_DATA_FORMAT_32_32_32] = { 12, 3, 4, V_008F0C_BUF_DATA_FORMAT_32 },
[V_008F0C_BUF_DATA_FORMAT_32_32_32_32] = { 16, 4, 4, V_008F0C_BUF_DATA_FORMAT_32 },
};
const struct ac_data_format_info *
ac_get_data_format_info(unsigned dfmt)
{
assert(dfmt < ARRAY_SIZE(data_format_table));
return &data_format_table[dfmt];
}
enum ac_image_dim
ac_get_sampler_dim(enum chip_class chip_class, enum glsl_sampler_dim dim,
bool is_array)
{
switch (dim) {
case GLSL_SAMPLER_DIM_1D:
if (chip_class == GFX9)
return is_array ? ac_image_2darray : ac_image_2d;
return is_array ? ac_image_1darray : ac_image_1d;
case GLSL_SAMPLER_DIM_2D:
case GLSL_SAMPLER_DIM_RECT:
case GLSL_SAMPLER_DIM_EXTERNAL:
return is_array ? ac_image_2darray : ac_image_2d;
case GLSL_SAMPLER_DIM_3D:
return ac_image_3d;
case GLSL_SAMPLER_DIM_CUBE:
return ac_image_cube;
case GLSL_SAMPLER_DIM_MS:
return is_array ? ac_image_2darraymsaa : ac_image_2dmsaa;
case GLSL_SAMPLER_DIM_SUBPASS:
return ac_image_2darray;
case GLSL_SAMPLER_DIM_SUBPASS_MS:
return ac_image_2darraymsaa;
default:
unreachable("bad sampler dim");
}
}
enum ac_image_dim
ac_get_image_dim(enum chip_class chip_class, enum glsl_sampler_dim sdim,
bool is_array)
{
enum ac_image_dim dim = ac_get_sampler_dim(chip_class, sdim, is_array);
/* Match the resource type set in the descriptor. */
if (dim == ac_image_cube ||
(chip_class <= GFX8 && dim == ac_image_3d))
dim = ac_image_2darray;
else if (sdim == GLSL_SAMPLER_DIM_2D && !is_array && chip_class == GFX9) {
/* When a single layer of a 3D texture is bound, the shader
* will refer to a 2D target, but the descriptor has a 3D type.
* Since the HW ignores BASE_ARRAY in this case, we need to
* send 3 coordinates. This doesn't hurt when the underlying
* texture is non-3D.
*/
dim = ac_image_3d;
}
return dim;
}
unsigned
ac_get_fs_input_vgpr_cnt(const struct ac_shader_config *config,
signed char *face_vgpr_index_ptr,
signed char *ancillary_vgpr_index_ptr)
{
unsigned num_input_vgprs = 0;
signed char face_vgpr_index = -1;
signed char ancillary_vgpr_index = -1;
if (G_0286CC_PERSP_SAMPLE_ENA(config->spi_ps_input_addr))
num_input_vgprs += 2;
if (G_0286CC_PERSP_CENTER_ENA(config->spi_ps_input_addr))
num_input_vgprs += 2;
if (G_0286CC_PERSP_CENTROID_ENA(config->spi_ps_input_addr))
num_input_vgprs += 2;
if (G_0286CC_PERSP_PULL_MODEL_ENA(config->spi_ps_input_addr))
num_input_vgprs += 3;
if (G_0286CC_LINEAR_SAMPLE_ENA(config->spi_ps_input_addr))
num_input_vgprs += 2;
if (G_0286CC_LINEAR_CENTER_ENA(config->spi_ps_input_addr))
num_input_vgprs += 2;
if (G_0286CC_LINEAR_CENTROID_ENA(config->spi_ps_input_addr))
num_input_vgprs += 2;
if (G_0286CC_LINE_STIPPLE_TEX_ENA(config->spi_ps_input_addr))
num_input_vgprs += 1;
if (G_0286CC_POS_X_FLOAT_ENA(config->spi_ps_input_addr))
num_input_vgprs += 1;
if (G_0286CC_POS_Y_FLOAT_ENA(config->spi_ps_input_addr))
num_input_vgprs += 1;
if (G_0286CC_POS_Z_FLOAT_ENA(config->spi_ps_input_addr))
num_input_vgprs += 1;
if (G_0286CC_POS_W_FLOAT_ENA(config->spi_ps_input_addr))
num_input_vgprs += 1;
if (G_0286CC_FRONT_FACE_ENA(config->spi_ps_input_addr)) {
face_vgpr_index = num_input_vgprs;
num_input_vgprs += 1;
}
if (G_0286CC_ANCILLARY_ENA(config->spi_ps_input_addr)) {
ancillary_vgpr_index = num_input_vgprs;
num_input_vgprs += 1;
}
if (G_0286CC_SAMPLE_COVERAGE_ENA(config->spi_ps_input_addr))
num_input_vgprs += 1;
if (G_0286CC_POS_FIXED_PT_ENA(config->spi_ps_input_addr))
num_input_vgprs += 1;
if (face_vgpr_index_ptr)
*face_vgpr_index_ptr = face_vgpr_index;
if (ancillary_vgpr_index_ptr)
*ancillary_vgpr_index_ptr = ancillary_vgpr_index;
return num_input_vgprs;
}
void ac_choose_spi_color_formats(unsigned format, unsigned swap,
unsigned ntype, bool is_depth,
struct ac_spi_color_formats *formats)
{
/* Alpha is needed for alpha-to-coverage.
* Blending may be with or without alpha.
*/
unsigned normal = 0; /* most optimal, may not support blending or export alpha */
unsigned alpha = 0; /* exports alpha, but may not support blending */
unsigned blend = 0; /* supports blending, but may not export alpha */
unsigned blend_alpha = 0; /* least optimal, supports blending and exports alpha */
/* Choose the SPI color formats. These are required values for RB+.
* Other chips have multiple choices, though they are not necessarily better.
*/
switch (format) {
case V_028C70_COLOR_5_6_5:
case V_028C70_COLOR_1_5_5_5:
case V_028C70_COLOR_5_5_5_1:
case V_028C70_COLOR_4_4_4_4:
case V_028C70_COLOR_10_11_11:
case V_028C70_COLOR_11_11_10:
case V_028C70_COLOR_5_9_9_9:
case V_028C70_COLOR_8:
case V_028C70_COLOR_8_8:
case V_028C70_COLOR_8_8_8_8:
case V_028C70_COLOR_10_10_10_2:
case V_028C70_COLOR_2_10_10_10:
if (ntype == V_028C70_NUMBER_UINT)
alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_UINT16_ABGR;
else if (ntype == V_028C70_NUMBER_SINT)
alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_SINT16_ABGR;
else
alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_FP16_ABGR;
break;
case V_028C70_COLOR_16:
case V_028C70_COLOR_16_16:
case V_028C70_COLOR_16_16_16_16:
if (ntype == V_028C70_NUMBER_UNORM || ntype == V_028C70_NUMBER_SNORM) {
/* UNORM16 and SNORM16 don't support blending */
if (ntype == V_028C70_NUMBER_UNORM)
normal = alpha = V_028714_SPI_SHADER_UNORM16_ABGR;
else
normal = alpha = V_028714_SPI_SHADER_SNORM16_ABGR;
/* Use 32 bits per channel for blending. */
if (format == V_028C70_COLOR_16) {
if (swap == V_028C70_SWAP_STD) { /* R */
blend = V_028714_SPI_SHADER_32_R;
blend_alpha = V_028714_SPI_SHADER_32_AR;
} else if (swap == V_028C70_SWAP_ALT_REV) /* A */
blend = blend_alpha = V_028714_SPI_SHADER_32_AR;
else
assert(0);
} else if (format == V_028C70_COLOR_16_16) {
if (swap == V_028C70_SWAP_STD) { /* RG */
blend = V_028714_SPI_SHADER_32_GR;
blend_alpha = V_028714_SPI_SHADER_32_ABGR;
} else if (swap == V_028C70_SWAP_ALT) /* RA */
blend = blend_alpha = V_028714_SPI_SHADER_32_AR;
else
assert(0);
} else /* 16_16_16_16 */
blend = blend_alpha = V_028714_SPI_SHADER_32_ABGR;
} else if (ntype == V_028C70_NUMBER_UINT)
alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_UINT16_ABGR;
else if (ntype == V_028C70_NUMBER_SINT)
alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_SINT16_ABGR;
else if (ntype == V_028C70_NUMBER_FLOAT)
alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_FP16_ABGR;
else
assert(0);
break;
case V_028C70_COLOR_32:
if (swap == V_028C70_SWAP_STD) { /* R */
blend = normal = V_028714_SPI_SHADER_32_R;
alpha = blend_alpha = V_028714_SPI_SHADER_32_AR;
} else if (swap == V_028C70_SWAP_ALT_REV) /* A */
alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_AR;
else
assert(0);
break;
case V_028C70_COLOR_32_32:
if (swap == V_028C70_SWAP_STD) { /* RG */
blend = normal = V_028714_SPI_SHADER_32_GR;
alpha = blend_alpha = V_028714_SPI_SHADER_32_ABGR;
} else if (swap == V_028C70_SWAP_ALT) /* RA */
alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_AR;
else
assert(0);
break;
case V_028C70_COLOR_32_32_32_32:
case V_028C70_COLOR_8_24:
case V_028C70_COLOR_24_8:
case V_028C70_COLOR_X24_8_32_FLOAT:
alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_ABGR;
break;
default:
assert(0);
return;
}
/* The DB->CB copy needs 32_ABGR. */
if (is_depth)
alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_ABGR;
formats->normal = normal;
formats->alpha = alpha;
formats->blend = blend;
formats->blend_alpha = blend_alpha;
}
|