1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
|
/*
* Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Rob Clark <robclark@freedesktop.org>
*/
#ifndef IR3_CONTEXT_H_
#define IR3_CONTEXT_H_
#include "ir3_compiler.h"
#include "ir3_nir.h"
#include "ir3.h"
/* for conditionally setting boolean flag(s): */
#define COND(bool, val) ((bool) ? (val) : 0)
#define DBG(fmt, ...) \
do { debug_printf("%s:%d: "fmt "\n", \
__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
/**
* The context for compilation of a single shader.
*/
struct ir3_context {
struct ir3_compiler *compiler;
const struct ir3_context_funcs *funcs;
struct nir_shader *s;
struct nir_instr *cur_instr; /* current instruction, just for debug */
struct ir3 *ir;
struct ir3_shader_variant *so;
/* Tables of scalar inputs/outputs. Because of the way varying packing
* works, we could have inputs w/ fractional location, which is a bit
* awkward to deal with unless we keep track of the split scalar in/
* out components.
*
* These *only* have inputs/outputs that are touched by load_*input and
* store_output.
*/
unsigned ninputs, noutputs;
struct ir3_instruction **inputs;
struct ir3_instruction **outputs;
struct ir3_block *block; /* the current block */
struct ir3_block *in_block; /* block created for shader inputs */
nir_function_impl *impl;
/* For fragment shaders, varyings are not actual shader inputs,
* instead the hw passes a ij coord which is used with
* bary.f.
*
* But NIR doesn't know that, it still declares varyings as
* inputs. So we do all the input tracking normally and fix
* things up after compile_instructions()
*/
struct ir3_instruction *ij_pixel, *ij_sample, *ij_centroid, *ij_size;
/* for fragment shaders, for gl_FrontFacing and gl_FragCoord: */
struct ir3_instruction *frag_face, *frag_coord;
/* For vertex shaders, keep track of the system values sources */
struct ir3_instruction *vertex_id, *basevertex, *instance_id, *base_instance;
/* For fragment shaders: */
struct ir3_instruction *samp_id, *samp_mask_in;
/* For geometry shaders: */
struct ir3_instruction *primitive_id;
struct ir3_instruction *gs_header;
/* For tessellation shaders: */
struct ir3_instruction *patch_vertices_in;
struct ir3_instruction *tcs_header;
struct ir3_instruction *tess_coord;
/* Compute shader inputs: */
struct ir3_instruction *local_invocation_id, *work_group_id;
/* mapping from nir_register to defining instruction: */
struct hash_table *def_ht;
unsigned num_arrays;
/* Tracking for max level of flowcontrol (branchstack) needed
* by a5xx+:
*/
unsigned stack, max_stack;
/* a common pattern for indirect addressing is to request the
* same address register multiple times. To avoid generating
* duplicate instruction sequences (which our backend does not
* try to clean up, since that should be done as the NIR stage)
* we cache the address value generated for a given src value:
*
* Note that we have to cache these per alignment, since same
* src used for an array of vec1 cannot be also used for an
* array of vec4.
*/
struct hash_table *addr_ht[4];
/* last dst array, for indirect we need to insert a var-store.
*/
struct ir3_instruction **last_dst;
unsigned last_dst_n;
/* maps nir_block to ir3_block, mostly for the purposes of
* figuring out the blocks successors
*/
struct hash_table *block_ht;
/* on a4xx, bitmask of samplers which need astc+srgb workaround: */
unsigned astc_srgb;
unsigned samples; /* bitmask of x,y sample shifts */
unsigned max_texture_index;
/* set if we encounter something we can't handle yet, so we
* can bail cleanly and fallback to TGSI compiler f/e
*/
bool error;
};
struct ir3_context_funcs {
void (*emit_intrinsic_load_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr,
struct ir3_instruction **dst);
void (*emit_intrinsic_store_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
struct ir3_instruction * (*emit_intrinsic_atomic_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
void (*emit_intrinsic_store_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
struct ir3_instruction * (*emit_intrinsic_atomic_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
};
extern const struct ir3_context_funcs ir3_a4xx_funcs;
extern const struct ir3_context_funcs ir3_a6xx_funcs;
struct ir3_context * ir3_context_init(struct ir3_compiler *compiler,
struct ir3_shader_variant *so);
void ir3_context_free(struct ir3_context *ctx);
struct ir3_instruction ** ir3_get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n);
struct ir3_instruction ** ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n);
struct ir3_instruction * const * ir3_get_src(struct ir3_context *ctx, nir_src *src);
void ir3_put_dst(struct ir3_context *ctx, nir_dest *dst);
struct ir3_instruction * ir3_create_collect(struct ir3_context *ctx,
struct ir3_instruction *const *arr, unsigned arrsz);
void ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
struct ir3_instruction *src, unsigned base, unsigned n);
NORETURN void ir3_context_error(struct ir3_context *ctx, const char *format, ...);
#define compile_assert(ctx, cond) do { \
if (!(cond)) ir3_context_error((ctx), "failed assert: "#cond"\n"); \
} while (0)
struct ir3_instruction * ir3_get_addr(struct ir3_context *ctx,
struct ir3_instruction *src, int align);
struct ir3_instruction * ir3_get_predicate(struct ir3_context *ctx,
struct ir3_instruction *src);
void ir3_declare_array(struct ir3_context *ctx, nir_register *reg);
struct ir3_array * ir3_get_array(struct ir3_context *ctx, nir_register *reg);
struct ir3_instruction *ir3_create_array_load(struct ir3_context *ctx,
struct ir3_array *arr, int n, struct ir3_instruction *address,
unsigned bitsize);
void ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
struct ir3_instruction *src, struct ir3_instruction *address);
static inline type_t utype_for_size(unsigned bit_size)
{
switch (bit_size) {
case 32: return TYPE_U32;
case 16: return TYPE_U16;
case 8: return TYPE_U8;
default: unreachable("bad bitsize"); return ~0;
}
}
static inline type_t utype_src(nir_src src)
{ return utype_for_size(nir_src_bit_size(src)); }
static inline type_t utype_dst(nir_dest dst)
{ return utype_for_size(nir_dest_bit_size(dst)); }
#endif /* IR3_CONTEXT_H_ */
|