1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
|
/**************************************************************************
*
* Copyright 2012 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "pipe/p_state.h"
#include "util/u_debug.h"
#include "gallivm/lp_bld_type.h"
#include "gallivm/lp_bld_arit.h"
#include "gallivm/lp_bld_const.h"
#include "gallivm/lp_bld_logic.h"
#include "gallivm/lp_bld_swizzle.h"
#include "gallivm/lp_bld_flow.h"
#include "gallivm/lp_bld_debug.h"
#include "lp_bld_blend.h"
/**
* Is (a OP b) == (b OP a)?
*/
boolean
lp_build_blend_func_commutative(unsigned func)
{
switch (func) {
case PIPE_BLEND_ADD:
case PIPE_BLEND_MIN:
case PIPE_BLEND_MAX:
return TRUE;
case PIPE_BLEND_SUBTRACT:
case PIPE_BLEND_REVERSE_SUBTRACT:
return FALSE;
default:
assert(0);
return TRUE;
}
}
/**
* Whether the blending functions are the reverse of each other.
*/
boolean
lp_build_blend_func_reverse(unsigned rgb_func, unsigned alpha_func)
{
if(rgb_func == alpha_func)
return FALSE;
if(rgb_func == PIPE_BLEND_SUBTRACT && alpha_func == PIPE_BLEND_REVERSE_SUBTRACT)
return TRUE;
if(rgb_func == PIPE_BLEND_REVERSE_SUBTRACT && alpha_func == PIPE_BLEND_SUBTRACT)
return TRUE;
return FALSE;
}
/**
* Whether the blending factors are complementary of each other.
*/
static inline boolean
lp_build_blend_factor_complementary(unsigned src_factor, unsigned dst_factor)
{
return dst_factor == (src_factor ^ 0x10);
}
/**
* @sa http://www.opengl.org/sdk/docs/man/xhtml/glBlendEquationSeparate.xml
*/
LLVMValueRef
lp_build_blend_func(struct lp_build_context *bld,
unsigned func,
LLVMValueRef term1,
LLVMValueRef term2)
{
switch (func) {
case PIPE_BLEND_ADD:
return lp_build_add(bld, term1, term2);
case PIPE_BLEND_SUBTRACT:
return lp_build_sub(bld, term1, term2);
case PIPE_BLEND_REVERSE_SUBTRACT:
return lp_build_sub(bld, term2, term1);
case PIPE_BLEND_MIN:
return lp_build_min(bld, term1, term2);
case PIPE_BLEND_MAX:
return lp_build_max(bld, term1, term2);
default:
assert(0);
return bld->zero;
}
}
/**
* Performs optimisations and blending independent of SoA/AoS
*
* @param func the blend function
* @param factor_src PIPE_BLENDFACTOR_xxx
* @param factor_dst PIPE_BLENDFACTOR_xxx
* @param src source rgba
* @param dst dest rgba
* @param src_factor src factor computed value
* @param dst_factor dst factor computed value
* @param not_alpha_dependent same factors accross all channels of src/dst
*
* not_alpha_dependent should be:
* SoA: always true as it is only one channel at a time
* AoS: rgb_src_factor == alpha_src_factor && rgb_dst_factor == alpha_dst_factor
*
* Note that pretty much every possible optimisation can only be done on non-unorm targets
* due to unorm values not going above 1.0 meaning factorisation can change results.
* e.g. (0.9 * 0.9) + (0.9 * 0.9) != 0.9 * (0.9 + 0.9) as result of + is always <= 1.
*/
LLVMValueRef
lp_build_blend(struct lp_build_context *bld,
unsigned func,
unsigned factor_src,
unsigned factor_dst,
LLVMValueRef src,
LLVMValueRef dst,
LLVMValueRef src_factor,
LLVMValueRef dst_factor,
boolean not_alpha_dependent,
boolean optimise_only)
{
LLVMValueRef result, src_term, dst_term;
/* If we are not alpha dependent we can mess with the src/dst factors */
if (not_alpha_dependent) {
if (lp_build_blend_factor_complementary(factor_src, factor_dst)) {
if (func == PIPE_BLEND_ADD) {
if (factor_src < factor_dst) {
return lp_build_lerp(bld, src_factor, dst, src, 0);
} else {
return lp_build_lerp(bld, dst_factor, src, dst, 0);
}
} else if(bld->type.floating && func == PIPE_BLEND_SUBTRACT) {
result = lp_build_add(bld, src, dst);
if (factor_src < factor_dst) {
result = lp_build_mul(bld, result, src_factor);
return lp_build_sub(bld, result, dst);
} else {
result = lp_build_mul(bld, result, dst_factor);
return lp_build_sub(bld, src, result);
}
} else if(bld->type.floating && func == PIPE_BLEND_REVERSE_SUBTRACT) {
result = lp_build_add(bld, src, dst);
if (factor_src < factor_dst) {
result = lp_build_mul(bld, result, src_factor);
return lp_build_sub(bld, dst, result);
} else {
result = lp_build_mul(bld, result, dst_factor);
return lp_build_sub(bld, result, src);
}
}
}
if (bld->type.floating && factor_src == factor_dst) {
if (func == PIPE_BLEND_ADD ||
func == PIPE_BLEND_SUBTRACT ||
func == PIPE_BLEND_REVERSE_SUBTRACT) {
LLVMValueRef result;
result = lp_build_blend_func(bld, func, src, dst);
return lp_build_mul(bld, result, src_factor);
}
}
}
if (optimise_only)
return NULL;
src_term = lp_build_mul(bld, src, src_factor);
dst_term = lp_build_mul(bld, dst, dst_factor);
return lp_build_blend_func(bld, func, src_term, dst_term);
}
void
lp_build_alpha_to_coverage(struct gallivm_state *gallivm,
struct lp_type type,
struct lp_build_mask_context *mask,
LLVMValueRef alpha,
boolean do_branch)
{
struct lp_build_context bld;
LLVMValueRef test;
LLVMValueRef alpha_ref_value;
lp_build_context_init(&bld, gallivm, type);
alpha_ref_value = lp_build_const_vec(gallivm, type, 0.5);
test = lp_build_cmp(&bld, PIPE_FUNC_GREATER, alpha, alpha_ref_value);
lp_build_name(test, "alpha_to_coverage");
lp_build_mask_update(mask, test);
if (do_branch)
lp_build_mask_check(mask);
}
|