summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/vc4
diff options
context:
space:
mode:
authorEric Anholt <[email protected]>2017-01-05 15:37:14 -0800
committerEric Anholt <[email protected]>2017-01-05 17:19:54 -0800
commit69da8c32c78cf67a55f32c71932d89b01154d49c (patch)
tree74dd4234fa968aac39d79fa2b78aa4bb6d59f32e /src/gallium/drivers/vc4
parent3a3a0d2d6cf37ff9d6e7f8ec9a6a78b24a353e08 (diff)
vc4: Rewrite T image handling based on calling the LT handler.
The T images are composed of effectively swizzled-around blocks of LT (4x4 utile) images, so we can reduce the t_utile_address() calls by 16x by calling into the simpler LT loop. This also adds support for calling down with non-utile-aligned coordinates, which will be part of lifting the utile alignment requirement on our callers and avoiding the RMW on non-utile-aligned stores. Improves 1024x1024 TexSubImage by 2.55014% +/- 1.18584% (n=46) Improves 1024x1024 GetTexImage by 2.242% +/- 0.880954% (n=32)
Diffstat (limited to 'src/gallium/drivers/vc4')
-rw-r--r--src/gallium/drivers/vc4/vc4_tiling.c109
1 files changed, 75 insertions, 34 deletions
diff --git a/src/gallium/drivers/vc4/vc4_tiling.c b/src/gallium/drivers/vc4/vc4_tiling.c
index dab92752ff7..bf3861c7158 100644
--- a/src/gallium/drivers/vc4/vc4_tiling.c
+++ b/src/gallium/drivers/vc4/vc4_tiling.c
@@ -186,7 +186,10 @@ t_utile_address(uint32_t utile_x, uint32_t utile_y,
odd_stile_map[stile_index] :
even_stile_map[stile_index]);
- uint32_t utile_offset = 64 * ((utile_y & 3) * 4 + (utile_x & 3));
+ /* This function no longer handles the utile offset within a subtile.
+ * Walking subtiles is the job of the LT image handler.
+ */
+ assert(!(utile_x & 3) && !(utile_y & 3));
#if 0
fprintf(stderr, "utile %d,%d -> %d + %d + %d (stride %d,%d) = %d\n",
@@ -196,29 +199,70 @@ t_utile_address(uint32_t utile_x, uint32_t utile_y,
tile_offset + stile_offset + utile_offset);
#endif
- return tile_offset + stile_offset + utile_offset;
+ return tile_offset + stile_offset;
}
-static void
-vc4_load_t_image(void *dst, uint32_t dst_stride,
- void *src, uint32_t src_stride,
- int cpp, const struct pipe_box *box)
+/**
+ * Loads or stores a T texture image by breaking it down into subtiles
+ * (1024-byte, 4x4-utile) sub-images that we can use the LT tiling functions
+ * on.
+ */
+static inline void
+vc4_t_image_helper(void *gpu, uint32_t gpu_stride,
+ void *cpu, uint32_t cpu_stride,
+ int cpp, const struct pipe_box *box,
+ bool to_cpu)
{
uint32_t utile_w = vc4_utile_width(cpp);
uint32_t utile_h = vc4_utile_height(cpp);
- uint32_t utile_stride = src_stride / cpp / utile_w;
- uint32_t xstart = box->x / utile_w;
- uint32_t ystart = box->y / utile_h;
-
- for (uint32_t y = 0; y < box->height / utile_h; y++) {
- for (int x = 0; x < box->width / utile_w; x++) {
- vc4_load_utile(dst + (y * utile_h * dst_stride +
- x * utile_w * cpp),
- src + t_utile_address(xstart + x,
- ystart + y,
- utile_stride),
- dst_stride, cpp);
+ uint32_t utile_w_shift = ffs(utile_w) - 1;
+ uint32_t utile_h_shift = ffs(utile_h) - 1;
+ uint32_t stile_w = 4 * utile_w;
+ uint32_t stile_h = 4 * utile_h;
+ assert(stile_w * stile_h * cpp == 1024);
+ uint32_t utile_stride = gpu_stride / cpp / utile_w;
+ uint32_t x1 = box->x;
+ uint32_t y1 = box->y;
+ uint32_t x2 = box->x + box->width;
+ uint32_t y2 = box->y + box->height;
+ struct pipe_box partial_box;
+ uint32_t gpu_lt_stride = stile_w * cpp;
+
+ for (uint32_t y = y1; y < y2; y = align(y + 1, stile_h)) {
+ partial_box.y = y & (stile_h - 1);
+ partial_box.height = MIN2(y2 - y, stile_h - partial_box.y);
+
+ uint32_t cpu_offset = 0;
+ for (uint32_t x = x1; x < x2; x = align(x + 1, stile_w)) {
+ partial_box.x = x & (stile_w - 1);
+ partial_box.width = MIN2(x2 - x,
+ stile_w - partial_box.x);
+
+ /* The dst offset we want is the start of this
+ * subtile
+ */
+ uint32_t gpu_offset =
+ t_utile_address((x >> utile_w_shift) & ~0x3,
+ (y >> utile_h_shift) & ~0x3,
+ utile_stride);
+
+ if (to_cpu) {
+ vc4_load_lt_image(cpu + cpu_offset,
+ cpu_stride,
+ gpu + gpu_offset,
+ gpu_lt_stride,
+ cpp, &partial_box);
+ } else {
+ vc4_store_lt_image(gpu + gpu_offset,
+ gpu_lt_stride,
+ cpu + cpu_offset,
+ cpu_stride,
+ cpp, &partial_box);
+ }
+
+ cpu_offset += partial_box.width * cpp;
}
+ cpu += cpu_stride * partial_box.height;
}
}
@@ -227,22 +271,19 @@ vc4_store_t_image(void *dst, uint32_t dst_stride,
void *src, uint32_t src_stride,
int cpp, const struct pipe_box *box)
{
- uint32_t utile_w = vc4_utile_width(cpp);
- uint32_t utile_h = vc4_utile_height(cpp);
- uint32_t utile_stride = dst_stride / cpp / utile_w;
- uint32_t xstart = box->x / utile_w;
- uint32_t ystart = box->y / utile_h;
-
- for (uint32_t y = 0; y < box->height / utile_h; y++) {
- for (int x = 0; x < box->width / utile_w; x++) {
- vc4_store_utile(dst + t_utile_address(xstart + x,
- ystart + y,
- utile_stride),
- src + (y * utile_h * src_stride +
- x * utile_w * cpp),
- src_stride, cpp);
- }
- }
+ vc4_t_image_helper(dst, dst_stride,
+ src, src_stride,
+ cpp, box, false);
+}
+
+static void
+vc4_load_t_image(void *dst, uint32_t dst_stride,
+ void *src, uint32_t src_stride,
+ int cpp, const struct pipe_box *box)
+{
+ vc4_t_image_helper(src, src_stride,
+ dst, dst_stride,
+ cpp, box, true);
}
/**