1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
|
/*
* Copyright 2020 Valve Corporation
* SPDX-License-Identifier: MIT
*
* Authors:
* Jonathan Marek <jonathan@marek.ca>
*/
#ifndef TU_UTIL_H
#define TU_UTIL_H
#include <assert.h>
#include <stdint.h>
#include "util/macros.h"
#include "util/u_math.h"
#include "compiler/shader_enums.h"
#include "adreno_common.xml.h"
#include "adreno_pm4.xml.h"
#include "a6xx.xml.h"
#include <vulkan/vulkan.h>
static inline gl_shader_stage
vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
{
assert(__builtin_popcount(vk_stage) == 1);
return util_logbase2(vk_stage);
}
static inline VkShaderStageFlagBits
mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
{
return 1 << mesa_stage;
}
#define TU_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
#define tu_foreach_stage(stage, stage_bits) \
for (gl_shader_stage stage, \
__tmp = (gl_shader_stage)((stage_bits) &TU_STAGE_MASK); \
stage = __builtin_ffs(__tmp) - 1, __tmp; __tmp &= ~(1 << (stage)))
static inline enum a3xx_msaa_samples
tu_msaa_samples(VkSampleCountFlagBits samples)
{
assert(__builtin_popcount(samples) == 1);
return util_logbase2(samples);
}
static inline uint32_t
tu6_stage2opcode(gl_shader_stage stage)
{
if (stage == MESA_SHADER_FRAGMENT || stage == MESA_SHADER_COMPUTE)
return CP_LOAD_STATE6_FRAG;
return CP_LOAD_STATE6_GEOM;
}
static inline enum a6xx_state_block
tu6_stage2texsb(gl_shader_stage stage)
{
return SB6_VS_TEX + stage;
}
static inline enum a6xx_state_block
tu6_stage2shadersb(gl_shader_stage stage)
{
return SB6_VS_SHADER + stage;
}
static inline enum a3xx_rop_code
tu6_rop(VkLogicOp op)
{
/* note: hw enum matches the VK enum, but with the 4 bits reversed */
static const uint8_t lookup[] = {
[VK_LOGIC_OP_CLEAR] = ROP_CLEAR,
[VK_LOGIC_OP_AND] = ROP_AND,
[VK_LOGIC_OP_AND_REVERSE] = ROP_AND_REVERSE,
[VK_LOGIC_OP_COPY] = ROP_COPY,
[VK_LOGIC_OP_AND_INVERTED] = ROP_AND_INVERTED,
[VK_LOGIC_OP_NO_OP] = ROP_NOOP,
[VK_LOGIC_OP_XOR] = ROP_XOR,
[VK_LOGIC_OP_OR] = ROP_OR,
[VK_LOGIC_OP_NOR] = ROP_NOR,
[VK_LOGIC_OP_EQUIVALENT] = ROP_EQUIV,
[VK_LOGIC_OP_INVERT] = ROP_INVERT,
[VK_LOGIC_OP_OR_REVERSE] = ROP_OR_REVERSE,
[VK_LOGIC_OP_COPY_INVERTED] = ROP_COPY_INVERTED,
[VK_LOGIC_OP_OR_INVERTED] = ROP_OR_INVERTED,
[VK_LOGIC_OP_NAND] = ROP_NAND,
[VK_LOGIC_OP_SET] = ROP_SET,
};
assert(op < ARRAY_SIZE(lookup));
return lookup[op];
}
static inline enum pc_di_primtype
tu6_primtype(VkPrimitiveTopology topology)
{
static const uint8_t lookup[] = {
[VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = DI_PT_POINTLIST,
[VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = DI_PT_LINELIST,
[VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = DI_PT_LINESTRIP,
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = DI_PT_TRILIST,
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = DI_PT_TRISTRIP,
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = DI_PT_TRIFAN,
[VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = DI_PT_LINE_ADJ,
[VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = DI_PT_LINESTRIP_ADJ,
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = DI_PT_TRI_ADJ,
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = DI_PT_TRISTRIP_ADJ,
/* Return PATCH0 and update in tu_pipeline_builder_parse_tessellation */
[VK_PRIMITIVE_TOPOLOGY_PATCH_LIST] = DI_PT_PATCHES0,
};
assert(topology < ARRAY_SIZE(lookup));
return lookup[topology];
}
static inline enum adreno_compare_func
tu6_compare_func(VkCompareOp op)
{
return (enum adreno_compare_func) op;
}
static inline enum adreno_stencil_op
tu6_stencil_op(VkStencilOp op)
{
return (enum adreno_stencil_op) op;
}
static inline enum adreno_rb_blend_factor
tu6_blend_factor(VkBlendFactor factor)
{
static const uint8_t lookup[] = {
[VK_BLEND_FACTOR_ZERO] = FACTOR_ZERO,
[VK_BLEND_FACTOR_ONE] = FACTOR_ONE,
[VK_BLEND_FACTOR_SRC_COLOR] = FACTOR_SRC_COLOR,
[VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR] = FACTOR_ONE_MINUS_SRC_COLOR,
[VK_BLEND_FACTOR_DST_COLOR] = FACTOR_DST_COLOR,
[VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR] = FACTOR_ONE_MINUS_DST_COLOR,
[VK_BLEND_FACTOR_SRC_ALPHA] = FACTOR_SRC_ALPHA,
[VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA] = FACTOR_ONE_MINUS_SRC_ALPHA,
[VK_BLEND_FACTOR_DST_ALPHA] = FACTOR_DST_ALPHA,
[VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA] = FACTOR_ONE_MINUS_DST_ALPHA,
[VK_BLEND_FACTOR_CONSTANT_COLOR] = FACTOR_CONSTANT_COLOR,
[VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR]= FACTOR_ONE_MINUS_CONSTANT_COLOR,
[VK_BLEND_FACTOR_CONSTANT_ALPHA] = FACTOR_CONSTANT_ALPHA,
[VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA]= FACTOR_ONE_MINUS_CONSTANT_ALPHA,
[VK_BLEND_FACTOR_SRC_ALPHA_SATURATE] = FACTOR_SRC_ALPHA_SATURATE,
[VK_BLEND_FACTOR_SRC1_COLOR] = FACTOR_SRC1_COLOR,
[VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR] = FACTOR_ONE_MINUS_SRC1_COLOR,
[VK_BLEND_FACTOR_SRC1_ALPHA] = FACTOR_SRC1_ALPHA,
[VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA] = FACTOR_ONE_MINUS_SRC1_ALPHA,
};
assert(factor < ARRAY_SIZE(lookup));
return lookup[factor];
}
static inline enum a3xx_rb_blend_opcode
tu6_blend_op(VkBlendOp op)
{
return (enum a3xx_rb_blend_opcode) op;
}
static inline enum a6xx_tex_type
tu6_tex_type(VkImageViewType type, bool storage)
{
switch (type) {
default:
case VK_IMAGE_VIEW_TYPE_1D:
case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
return A6XX_TEX_1D;
case VK_IMAGE_VIEW_TYPE_2D:
case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
return A6XX_TEX_2D;
case VK_IMAGE_VIEW_TYPE_3D:
return A6XX_TEX_3D;
case VK_IMAGE_VIEW_TYPE_CUBE:
case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
return storage ? A6XX_TEX_2D : A6XX_TEX_CUBE;
}
}
static inline enum a6xx_tex_clamp
tu6_tex_wrap(VkSamplerAddressMode address_mode)
{
uint8_t lookup[] = {
[VK_SAMPLER_ADDRESS_MODE_REPEAT] = A6XX_TEX_REPEAT,
[VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT] = A6XX_TEX_MIRROR_REPEAT,
[VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE] = A6XX_TEX_CLAMP_TO_EDGE,
[VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER] = A6XX_TEX_CLAMP_TO_BORDER,
[VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE] = A6XX_TEX_MIRROR_CLAMP,
};
assert(address_mode < ARRAY_SIZE(lookup));
return lookup[address_mode];
}
static inline enum a6xx_tex_filter
tu6_tex_filter(VkFilter filter, unsigned aniso)
{
switch (filter) {
case VK_FILTER_NEAREST:
return A6XX_TEX_NEAREST;
case VK_FILTER_LINEAR:
return aniso ? A6XX_TEX_ANISO : A6XX_TEX_LINEAR;
case VK_FILTER_CUBIC_EXT:
return A6XX_TEX_CUBIC;
default:
unreachable("illegal texture filter");
break;
}
}
static inline enum a6xx_reduction_mode
tu6_reduction_mode(VkSamplerReductionMode reduction_mode)
{
return (enum a6xx_reduction_mode) reduction_mode;
}
static inline enum a6xx_depth_format
tu6_pipe2depth(VkFormat format)
{
switch (format) {
case VK_FORMAT_D16_UNORM:
return DEPTH6_16;
case VK_FORMAT_X8_D24_UNORM_PACK32:
case VK_FORMAT_D24_UNORM_S8_UINT:
return DEPTH6_24_8;
case VK_FORMAT_D32_SFLOAT:
case VK_FORMAT_S8_UINT:
return DEPTH6_32;
default:
return ~0;
}
}
static inline enum a6xx_polygon_mode
tu6_polygon_mode(VkPolygonMode mode)
{
switch (mode) {
case VK_POLYGON_MODE_POINT:
return POLYMODE6_POINTS;
case VK_POLYGON_MODE_LINE:
return POLYMODE6_LINES;
case VK_POLYGON_MODE_FILL:
return POLYMODE6_TRIANGLES;
default:
unreachable("bad polygon mode");
}
}
#endif /* TU_UTIL_H */
|