summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/gallium/auxiliary/util/u_sse.h92
-rw-r--r--src/gallium/drivers/llvmpipe/lp_setup_line.c16
-rw-r--r--src/gallium/drivers/llvmpipe/lp_setup_tri.c104
3 files changed, 150 insertions, 62 deletions
diff --git a/src/gallium/auxiliary/util/u_sse.h b/src/gallium/auxiliary/util/u_sse.h
index 7f8e5a1a3cf..cae4138ba01 100644
--- a/src/gallium/auxiliary/util/u_sse.h
+++ b/src/gallium/auxiliary/util/u_sse.h
@@ -166,14 +166,49 @@ _mm_shuffle_epi8(__m128i a, __m128i mask)
#endif /* !PIPE_ARCH_SSSE3 */
+/*
+ * Provide an SSE implementation of _mm_mul_epi32() in terms of
+ * _mm_mul_epu32().
+ *
+ * Basically, albeit surprising at first (and second, and third...) look
+ * if a * b is done signed instead of unsigned, can just
+ * subtract b from the high bits of the result if a is negative
+ * (and the same for a if b is negative). Modular arithmetic at its best!
+ *
+ * So for int32 a,b in crude pseudo-code ("*" here denoting a widening mul)
+ * fixupb = (signmask(b) & a) << 32ULL
+ * fixupa = (signmask(a) & b) << 32ULL
+ * a * b = (unsigned)a * (unsigned)b - fixupb - fixupa
+ * = (unsigned)a * (unsigned)b -(fixupb + fixupa)
+ *
+ * This does both lo (dwords 0/2) and hi parts (1/3) at the same time due
+ * to some optimization potential.
+ */
+static inline __m128i
+mm_mullohi_epi32(const __m128i a, const __m128i b, __m128i *res13)
+{
+ __m128i a13, b13, mul02, mul13;
+ __m128i anegmask, bnegmask, fixup, fixup02, fixup13;
+ a13 = _mm_shuffle_epi32(a, _MM_SHUFFLE(2,3,0,1));
+ b13 = _mm_shuffle_epi32(b, _MM_SHUFFLE(2,3,0,1));
+ anegmask = _mm_srai_epi32(a, 31);
+ bnegmask = _mm_srai_epi32(b, 31);
+ fixup = _mm_add_epi32(_mm_and_si128(anegmask, b),
+ _mm_and_si128(bnegmask, a));
+ mul02 = _mm_mul_epu32(a, b);
+ mul13 = _mm_mul_epu32(a13, b13);
+ fixup02 = _mm_slli_epi64(fixup, 32);
+ fixup13 = _mm_and_si128(fixup, _mm_set_epi32(-1,0,-1,0));
+ *res13 = _mm_sub_epi64(mul13, fixup13);
+ return _mm_sub_epi64(mul02, fixup02);
+}
/* Provide an SSE2 implementation of _mm_mullo_epi32() in terms of
* _mm_mul_epu32().
*
- * I suspect this works fine for us because one of our operands is
- * always positive, but not sure that this can be used for general
- * signed integer multiplication.
+ * This always works regardless the signs of the operands, since
+ * the high bits (which would be different) aren't used.
*
* This seems close enough to the speed of SSE4 and the real
* _mm_mullo_epi32() intrinsic as to not justify adding an sse4
@@ -188,6 +223,12 @@ static inline __m128i mm_mullo_epi32(const __m128i a, const __m128i b)
/* Interleave the results, either with shuffles or (slightly
* faster) direct bit operations:
+ * XXX: might be only true for some cpus (in particular 65nm
+ * Core 2). On most cpus (including that Core 2, but not Nehalem...)
+ * using _mm_shuffle_ps/_mm_shuffle_epi32 might also be faster
+ * than using the 3 instructions below. But logic should be fine
+ * as well, we can't have optimal solution for all cpus (if anything,
+ * should just use _mm_mullo_epi32() if sse41 is available...).
*/
#if 0
__m128i ba8 = _mm_shuffle_epi32(ba, 8);
@@ -214,17 +255,44 @@ transpose4_epi32(const __m128i * restrict a,
__m128i * restrict q,
__m128i * restrict r)
{
- __m128i t0 = _mm_unpacklo_epi32(*a, *b);
- __m128i t1 = _mm_unpacklo_epi32(*c, *d);
- __m128i t2 = _mm_unpackhi_epi32(*a, *b);
- __m128i t3 = _mm_unpackhi_epi32(*c, *d);
-
- *o = _mm_unpacklo_epi64(t0, t1);
- *p = _mm_unpackhi_epi64(t0, t1);
- *q = _mm_unpacklo_epi64(t2, t3);
- *r = _mm_unpackhi_epi64(t2, t3);
+ __m128i t0 = _mm_unpacklo_epi32(*a, *b);
+ __m128i t1 = _mm_unpacklo_epi32(*c, *d);
+ __m128i t2 = _mm_unpackhi_epi32(*a, *b);
+ __m128i t3 = _mm_unpackhi_epi32(*c, *d);
+
+ *o = _mm_unpacklo_epi64(t0, t1);
+ *p = _mm_unpackhi_epi64(t0, t1);
+ *q = _mm_unpacklo_epi64(t2, t3);
+ *r = _mm_unpackhi_epi64(t2, t3);
}
+
+/*
+ * Same as above, except the first two values are already interleaved
+ * (i.e. contain 64bit values).
+ */
+static inline void
+transpose2_64_2_32(const __m128i * restrict a01,
+ const __m128i * restrict a23,
+ const __m128i * restrict c,
+ const __m128i * restrict d,
+ __m128i * restrict o,
+ __m128i * restrict p,
+ __m128i * restrict q,
+ __m128i * restrict r)
+{
+ __m128i t0 = *a01;
+ __m128i t1 = _mm_unpacklo_epi32(*c, *d);
+ __m128i t2 = *a23;
+ __m128i t3 = _mm_unpackhi_epi32(*c, *d);
+
+ *o = _mm_unpacklo_epi64(t0, t1);
+ *p = _mm_unpackhi_epi64(t0, t1);
+ *q = _mm_unpacklo_epi64(t2, t3);
+ *r = _mm_unpackhi_epi64(t2, t3);
+}
+
+
#define SCALAR_EPI32(m, i) _mm_shuffle_epi32((m), _MM_SHUFFLE(i,i,i,i))
diff --git a/src/gallium/drivers/llvmpipe/lp_setup_line.c b/src/gallium/drivers/llvmpipe/lp_setup_line.c
index fac1cd61d77..a0de599c9c6 100644
--- a/src/gallium/drivers/llvmpipe/lp_setup_line.c
+++ b/src/gallium/drivers/llvmpipe/lp_setup_line.c
@@ -644,19 +644,25 @@ try_setup_line( struct lp_setup_context *setup,
line->inputs.layer = layer;
line->inputs.viewport_index = viewport_index;
+ /*
+ * XXX: this code is mostly identical to the one in lp_setup_tri, except it
+ * uses 4 planes instead of 3. Could share the code (including the sse
+ * assembly, in fact we'd get the 4th plane for free).
+ * The only difference apart from storing the 4th plane would be some
+ * different shuffle for calculating dcdx/dcdy.
+ */
for (i = 0; i < 4; i++) {
- /* half-edge constants, will be interated over the whole render
+ /* half-edge constants, will be iterated over the whole render
* target.
*/
plane[i].c = IMUL64(plane[i].dcdx, x[i]) - IMUL64(plane[i].dcdy, y[i]);
-
- /* correct for top-left vs. bottom-left fill convention.
- */
+ /* correct for top-left vs. bottom-left fill convention.
+ */
if (plane[i].dcdx < 0) {
/* both fill conventions want this - adjust for left edges */
- plane[i].c++;
+ plane[i].c++;
}
else if (plane[i].dcdx == 0) {
if (setup->pixel_offset == 0) {
diff --git a/src/gallium/drivers/llvmpipe/lp_setup_tri.c b/src/gallium/drivers/llvmpipe/lp_setup_tri.c
index a1631fdaee9..358da442ea7 100644
--- a/src/gallium/drivers/llvmpipe/lp_setup_tri.c
+++ b/src/gallium/drivers/llvmpipe/lp_setup_tri.c
@@ -390,26 +390,18 @@ do_triangle_ccw(struct lp_setup_context *setup,
plane = GET_PLANES(tri);
#if defined(PIPE_ARCH_SSE)
- /*
- * XXX this code is effectively disabled for all practical purposes,
- * as the allowed fb size is tiny if FIXED_ORDER is 8.
- */
- if (setup->fb.width <= MAX_FIXED_LENGTH32 &&
- setup->fb.height <= MAX_FIXED_LENGTH32 &&
- (bbox.x1 - bbox.x0) <= MAX_FIXED_LENGTH32 &&
- (bbox.y1 - bbox.y0) <= MAX_FIXED_LENGTH32) {
+ if (1) {
__m128i vertx, verty;
__m128i shufx, shufy;
- __m128i dcdx, dcdy, c;
- __m128i unused;
+ __m128i dcdx, dcdy;
+ __m128i cdx02, cdx13, cdy02, cdy13, c02, c13;
+ __m128i c01, c23, unused;
__m128i dcdx_neg_mask;
__m128i dcdy_neg_mask;
__m128i dcdx_zero_mask;
- __m128i top_left_flag;
- __m128i c_inc_mask, c_inc;
+ __m128i top_left_flag, c_dec;
__m128i eo, p0, p1, p2;
__m128i zero = _mm_setzero_si128();
- PIPE_ALIGN_VAR(16) int32_t temp_vec[4];
vertx = _mm_load_si128((__m128i *)position->x); /* vertex x coords */
verty = _mm_load_si128((__m128i *)position->y); /* vertex y coords */
@@ -426,48 +418,70 @@ do_triangle_ccw(struct lp_setup_context *setup,
top_left_flag = _mm_set1_epi32((setup->bottom_edge_rule == 0) ? ~0 : 0);
- c_inc_mask = _mm_or_si128(dcdx_neg_mask,
- _mm_and_si128(dcdx_zero_mask,
- _mm_xor_si128(dcdy_neg_mask,
- top_left_flag)));
-
- c_inc = _mm_srli_epi32(c_inc_mask, 31);
-
- c = _mm_sub_epi32(mm_mullo_epi32(dcdx, vertx),
- mm_mullo_epi32(dcdy, verty));
+ c_dec = _mm_or_si128(dcdx_neg_mask,
+ _mm_and_si128(dcdx_zero_mask,
+ _mm_xor_si128(dcdy_neg_mask,
+ top_left_flag)));
- c = _mm_add_epi32(c, c_inc);
+ /*
+ * 64 bit arithmetic.
+ * Note we need _signed_ mul (_mm_mul_epi32) which we emulate.
+ */
+ cdx02 = mm_mullohi_epi32(dcdx, vertx, &cdx13);
+ cdy02 = mm_mullohi_epi32(dcdy, verty, &cdy13);
+ c02 = _mm_sub_epi64(cdx02, cdy02);
+ c13 = _mm_sub_epi64(cdx13, cdy13);
+ c02 = _mm_sub_epi64(c02, _mm_shuffle_epi32(c_dec,
+ _MM_SHUFFLE(2,2,0,0)));
+ c13 = _mm_sub_epi64(c13, _mm_shuffle_epi32(c_dec,
+ _MM_SHUFFLE(3,3,1,1)));
+
+ /*
+ * Useful for very small fbs/tris (or fewer subpixel bits) only:
+ * c = _mm_sub_epi32(mm_mullo_epi32(dcdx, vertx),
+ * mm_mullo_epi32(dcdy, verty));
+ *
+ * c = _mm_sub_epi32(c, c_dec);
+ */
/* Scale up to match c:
*/
dcdx = _mm_slli_epi32(dcdx, FIXED_ORDER);
dcdy = _mm_slli_epi32(dcdy, FIXED_ORDER);
- /* Calculate trivial reject values:
+ /*
+ * Calculate trivial reject values:
+ * Note eo cannot overflow even if dcdx/dcdy would already have
+ * 31 bits (which they shouldn't have). This is because eo
+ * is never negative (albeit if we rely on that need to be careful...)
*/
eo = _mm_sub_epi32(_mm_andnot_si128(dcdy_neg_mask, dcdy),
_mm_and_si128(dcdx_neg_mask, dcdx));
/* ei = _mm_sub_epi32(_mm_sub_epi32(dcdy, dcdx), eo); */
- /* Pointless transpose which gets undone immediately in
- * rasterization:
+ /*
+ * Pointless transpose which gets undone immediately in
+ * rasterization.
+ * It is actually difficult to do away with it - would essentially
+ * need GET_PLANES_DX, GET_PLANES_DY etc., but the calculations
+ * for this then would need to depend on the number of planes.
+ * The transpose is quite special here due to c being 64bit...
+ * The store has to be unaligned (unless we'd make the plane size
+ * a multiple of 128), and of course storing eo separately...
*/
- transpose4_epi32(&c, &dcdx, &dcdy, &eo,
- &p0, &p1, &p2, &unused);
-
-#define STORE_PLANE(plane, vec) do { \
- _mm_store_si128((__m128i *)&temp_vec, vec); \
- plane.c = (int64_t)temp_vec[0]; \
- plane.dcdx = temp_vec[1]; \
- plane.dcdy = temp_vec[2]; \
- plane.eo = temp_vec[3]; \
- } while(0)
-
- STORE_PLANE(plane[0], p0);
- STORE_PLANE(plane[1], p1);
- STORE_PLANE(plane[2], p2);
-#undef STORE_PLANE
+ c01 = _mm_unpacklo_epi64(c02, c13);
+ c23 = _mm_unpackhi_epi64(c02, c13);
+ transpose2_64_2_32(&c01, &c23, &dcdx, &dcdy,
+ &p0, &p1, &p2, &unused);
+ _mm_storeu_si128((__m128i *)&plane[0], p0);
+ plane[0].eo = (uint32_t)_mm_cvtsi128_si32(eo);
+ _mm_storeu_si128((__m128i *)&plane[1], p1);
+ eo = _mm_shuffle_epi32(eo, _MM_SHUFFLE(3,2,0,1));
+ plane[1].eo = (uint32_t)_mm_cvtsi128_si32(eo);
+ _mm_storeu_si128((__m128i *)&plane[2], p2);
+ eo = _mm_shuffle_epi32(eo, _MM_SHUFFLE(0,0,0,2));
+ plane[2].eo = (uint32_t)_mm_cvtsi128_si32(eo);
} else
#elif defined(_ARCH_PWR8) && defined(PIPE_ARCH_LITTLE_ENDIAN)
/*
@@ -577,17 +591,17 @@ do_triangle_ccw(struct lp_setup_context *setup,
plane[2].dcdx = position->dy20;
for (i = 0; i < 3; i++) {
- /* half-edge constants, will be interated over the whole render
+ /* half-edge constants, will be iterated over the whole render
* target.
*/
plane[i].c = IMUL64(plane[i].dcdx, position->x[i]) -
- IMUL64(plane[i].dcdy, position->y[i]);
+ IMUL64(plane[i].dcdy, position->y[i]);
/* correct for top-left vs. bottom-left fill convention.
- */
+ */
if (plane[i].dcdx < 0) {
/* both fill conventions want this - adjust for left edges */
- plane[i].c++;
+ plane[i].c++;
}
else if (plane[i].dcdx == 0) {
if (setup->bottom_edge_rule == 0){