summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland Scheidegger <[email protected]>2018-01-16 17:55:00 +0100
committerRoland Scheidegger <[email protected]>2018-01-17 00:01:19 +0100
commitb0413cfd8b84634db4a5bf57d550b21d0d2fa8f7 (patch)
treed64aa56e3313ae977bee718aeca90a175f416cbf
parent1f462eaf394517dac98b0c41f09e995f2940fdb8 (diff)
draw: remove VSPLIT_CREATE_IDX macro
Just inline the little bit of code. Reviewed-by: Jose Fonseca <[email protected]> Reviewed-by: Brian Paul <[email protected]>
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_vsplit.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/src/gallium/auxiliary/draw/draw_pt_vsplit.c b/src/gallium/auxiliary/draw/draw_pt_vsplit.c
index 3ff077b7602..653deab28cd 100644
--- a/src/gallium/auxiliary/draw/draw_pt_vsplit.c
+++ b/src/gallium/auxiliary/draw/draw_pt_vsplit.c
@@ -116,21 +116,15 @@ vsplit_get_base_idx(unsigned start, unsigned fetch)
return draw_overflow_uadd(start, fetch, MAX_ELT_IDX);
}
-/*
- * The final element index is just element index plus element bias.
- */
-#define VSPLIT_CREATE_IDX(elts, start, fetch, elt_bias) \
- unsigned elt_idx; \
- elt_idx = vsplit_get_base_idx(start, fetch); \
- elt_idx = (unsigned)((int)(DRAW_GET_IDX(elts, elt_idx)) + (int)elt_bias);
-
static inline void
vsplit_add_cache_ubyte(struct vsplit_frontend *vsplit, const ubyte *elts,
unsigned start, unsigned fetch, int elt_bias)
{
struct draw_context *draw = vsplit->draw;
- VSPLIT_CREATE_IDX(elts, start, fetch, elt_bias);
+ unsigned elt_idx;
+ elt_idx = vsplit_get_base_idx(start, fetch);
+ elt_idx = (unsigned)((int)(DRAW_GET_IDX(elts, elt_idx)) + elt_bias);
/* unlike the uint case this can only happen with elt_bias */
if (elt_bias && elt_idx == DRAW_MAX_FETCH_IDX && !vsplit->cache.has_max_fetch) {
unsigned hash = elt_idx % MAP_SIZE;
@@ -145,7 +139,9 @@ vsplit_add_cache_ushort(struct vsplit_frontend *vsplit, const ushort *elts,
unsigned start, unsigned fetch, int elt_bias)
{
struct draw_context *draw = vsplit->draw;
- VSPLIT_CREATE_IDX(elts, start, fetch, elt_bias);
+ unsigned elt_idx;
+ elt_idx = vsplit_get_base_idx(start, fetch);
+ elt_idx = (unsigned)((int)(DRAW_GET_IDX(elts, elt_idx)) + elt_bias);
/* unlike the uint case this can only happen with elt_bias */
if (elt_bias && elt_idx == DRAW_MAX_FETCH_IDX && !vsplit->cache.has_max_fetch) {
unsigned hash = elt_idx % MAP_SIZE;
@@ -165,7 +161,12 @@ vsplit_add_cache_uint(struct vsplit_frontend *vsplit, const uint *elts,
unsigned start, unsigned fetch, int elt_bias)
{
struct draw_context *draw = vsplit->draw;
- VSPLIT_CREATE_IDX(elts, start, fetch, elt_bias);
+ unsigned elt_idx;
+ /*
+ * The final element index is just element index plus element bias.
+ */
+ elt_idx = vsplit_get_base_idx(start, fetch);
+ elt_idx = (unsigned)((int)(DRAW_GET_IDX(elts, elt_idx)) + elt_bias);
/* Take care for DRAW_MAX_FETCH_IDX (since cache is initialized to -1). */
if (elt_idx == DRAW_MAX_FETCH_IDX && !vsplit->cache.has_max_fetch) {
unsigned hash = elt_idx % MAP_SIZE;