1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
|
/**********************************************************
* Copyright 2014 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
#include "util/u_inlines.h"
#include "util/u_memory.h"
#include "util/u_bitmask.h"
#include "util/u_simple_shaders.h"
#include "tgsi/tgsi_ureg.h"
#include "tgsi/tgsi_point_sprite.h"
#include "tgsi/tgsi_dump.h"
#include "svga_context.h"
#include "svga_shader.h"
#include "svga_tgsi.h"
/**
* Bind a new GS. This updates the derived current gs state, not the
* user-specified GS state.
*/
static void
bind_gs_state(struct svga_context *svga,
struct svga_geometry_shader *gs)
{
svga->curr.gs = gs;
svga->dirty |= SVGA_NEW_GS;
}
/**
* emulate_point_sprite searches the shader variants list to see it there is
* a shader variant with a token string that matches the emulation
* requirement. It there isn't, then it will use a tgsi utility
* tgsi_add_point_sprite to transform the original token string to support
* point sprite. A new geometry shader state will be created with the
* transformed token string and added to the shader variants list of the
* original geometry shader. The new geometry shader state will then be
* bound as the current geometry shader.
*/
static struct svga_shader *
emulate_point_sprite(struct svga_context *svga,
struct svga_shader *shader,
const struct tgsi_token *tokens)
{
struct svga_token_key key;
struct tgsi_token *new_tokens;
const struct tgsi_token *orig_tokens;
struct svga_geometry_shader *orig_gs = (struct svga_geometry_shader *)shader;
struct svga_geometry_shader *gs = NULL;
struct pipe_shader_state templ;
struct svga_stream_output *streamout = NULL;
int pos_out_index = -1;
int aa_point_coord_index = -1;
assert(tokens != NULL);
orig_tokens = tokens;
/* Create a token key */
memset(&key, 0, sizeof key);
key.gs.writes_psize = 1;
key.gs.sprite_coord_enable = svga->curr.rast->templ.sprite_coord_enable;
key.gs.sprite_origin_upper_left =
!(svga->curr.rast->templ.sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT);
key.gs.aa_point = svga->curr.rast->templ.point_smooth;
if (orig_gs) {
/* Check if the original geometry shader has stream output and
* if position is one of the outputs.
*/
streamout = orig_gs->base.stream_output;
if (streamout) {
pos_out_index = streamout->pos_out_index;
key.gs.point_pos_stream_out = pos_out_index != -1;
}
/* Search the shader lists to see if there is a variant that matches
* this token key.
*/
gs = (struct svga_geometry_shader *)
svga_search_shader_token_key(&orig_gs->base, &key);
}
/* If there isn't, then call the tgsi utility tgsi_add_point_sprite
* to transform the original tokens to support point sprite.
* Flip the sprite origin as SVGA3D device only supports an
* upper-left origin.
*/
if (!gs) {
new_tokens = tgsi_add_point_sprite(orig_tokens,
key.gs.sprite_coord_enable,
key.gs.sprite_origin_upper_left,
key.gs.point_pos_stream_out,
key.gs.aa_point ?
&aa_point_coord_index : NULL);
if (!new_tokens) {
/* if no new tokens are generated for whatever reason, just return */
return NULL;
}
if (0) {
debug_printf("Before tgsi_add_point_sprite ---------------\n");
tgsi_dump(orig_tokens, 0);
debug_printf("After tgsi_add_point_sprite --------------\n");
tgsi_dump(new_tokens, 0);
}
templ.tokens = new_tokens;
templ.stream_output.num_outputs = 0;
if (streamout) {
templ.stream_output = streamout->info;
/* The tgsi_add_point_sprite utility adds an extra output
* for the original point position for stream output purpose.
* We need to replace the position output register index in the
* stream output declaration with the new register index.
*/
if (pos_out_index != -1) {
assert(orig_gs != NULL);
templ.stream_output.output[pos_out_index].register_index =
orig_gs->base.info.num_outputs;
}
}
/* Create a new geometry shader state with the new tokens */
gs = svga->pipe.create_gs_state(&svga->pipe, &templ);
/* Don't need the token string anymore. There is a local copy
* in the shader state.
*/
FREE(new_tokens);
if (!gs) {
return NULL;
}
gs->wide_point = TRUE;
gs->aa_point_coord_index = aa_point_coord_index;
gs->base.token_key = key;
gs->base.parent = &orig_gs->base;
gs->base.next = NULL;
/* Add the new geometry shader to the head of the shader list
* pointed to by the original geometry shader.
*/
if (orig_gs) {
gs->base.next = orig_gs->base.next;
orig_gs->base.next = &gs->base;
}
}
/* Bind the new geometry shader state */
bind_gs_state(svga, gs);
return &gs->base;
}
/**
* Generate a geometry shader that emits a wide point by drawing a quad.
* This function first creates a passthrough geometry shader and then
* calls emulate_point_sprite() to transform the geometry shader to
* support point sprite.
*/
static struct svga_shader *
add_point_sprite_shader(struct svga_context *svga)
{
struct svga_vertex_shader *vs = svga->curr.vs;
struct svga_geometry_shader *orig_gs = vs->gs;
struct svga_geometry_shader *new_gs;
const struct tgsi_token *tokens;
if (orig_gs == NULL) {
/* If this is the first time adding a geometry shader to this
* vertex shader to support point sprite, then create
* a passthrough geometry shader first.
*/
orig_gs = (struct svga_geometry_shader *)
util_make_geometry_passthrough_shader(
&svga->pipe, vs->base.info.num_outputs,
vs->base.info.output_semantic_name,
vs->base.info.output_semantic_index);
if (!orig_gs)
return NULL;
}
else {
if (orig_gs->base.parent)
orig_gs = (struct svga_geometry_shader *)orig_gs->base.parent;
}
tokens = orig_gs->base.tokens;
/* Call emulate_point_sprite to find or create a transformed
* geometry shader for supporting point sprite.
*/
new_gs = (struct svga_geometry_shader *)
emulate_point_sprite(svga, &orig_gs->base, tokens);
/* If this is the first time creating a geometry shader to
* support vertex point size, then add the new geometry shader
* to the vertex shader.
*/
if (vs->gs == NULL) {
vs->gs = new_gs;
}
return &new_gs->base;
}
/* update_tgsi_transform provides a hook to transform a shader if needed.
*/
static enum pipe_error
update_tgsi_transform(struct svga_context *svga, unsigned dirty)
{
struct svga_geometry_shader *gs = svga->curr.user_gs; /* current gs */
struct svga_vertex_shader *vs = svga->curr.vs; /* currently bound vs */
struct svga_shader *orig_gs; /* original gs */
struct svga_shader *new_gs; /* new gs */
if (!svga_have_vgpu10(svga))
return PIPE_OK;
if (svga->curr.reduced_prim == PIPE_PRIM_POINTS) {
/* If the current prim type is POINTS and the current geometry shader
* emits wide points, transform the shader to emulate wide points using
* quads. NOTE: we don't do emulation of wide points in GS when
* transform feedback is enabled.
*/
if (gs != NULL && !gs->base.stream_output &&
(gs->base.info.writes_psize || gs->wide_point)) {
orig_gs = gs->base.parent ? gs->base.parent : &gs->base;
new_gs = emulate_point_sprite(svga, orig_gs, orig_gs->tokens);
}
/* If there is not an active geometry shader and the current vertex
* shader emits wide point then create a new geometry shader to emulate
* wide point.
*/
else if (gs == NULL && !vs->base.stream_output &&
(svga->curr.rast->pointsize > 1.0 ||
vs->base.info.writes_psize)) {
new_gs = add_point_sprite_shader(svga);
}
else {
/* use the user's GS */
bind_gs_state(svga, svga->curr.user_gs);
}
}
else if (svga->curr.gs != svga->curr.user_gs) {
/* If current primitive type is not POINTS, then make sure
* we don't bind to any of the generated geometry shader
*/
bind_gs_state(svga, svga->curr.user_gs);
}
(void) new_gs; /* silence the unused var warning */
return PIPE_OK;
}
struct svga_tracked_state svga_need_tgsi_transform =
{
"transform shader for optimization",
(SVGA_NEW_VS |
SVGA_NEW_FS |
SVGA_NEW_GS |
SVGA_NEW_REDUCED_PRIMITIVE |
SVGA_NEW_RAST),
update_tgsi_transform
};
|