aboutsummaryrefslogtreecommitdiffstats
path: root/src/panfrost
diff options
context:
space:
mode:
Diffstat (limited to 'src/panfrost')
-rw-r--r--src/panfrost/encoder/pan_encoder.h6
-rw-r--r--src/panfrost/encoder/pan_tiler.c148
-rw-r--r--src/panfrost/include/panfrost-job.h10
-rw-r--r--src/panfrost/pandecode/decode.c55
4 files changed, 93 insertions, 126 deletions
diff --git a/src/panfrost/encoder/pan_encoder.h b/src/panfrost/encoder/pan_encoder.h
index 8aa2df7240b..ceff2e949de 100644
--- a/src/panfrost/encoder/pan_encoder.h
+++ b/src/panfrost/encoder/pan_encoder.h
@@ -56,14 +56,14 @@ panfrost_pack_work_groups_fused(
/* Tiler structure size computation */
unsigned
-panfrost_tiler_header_size(unsigned width, unsigned height, unsigned mask);
+panfrost_tiler_header_size(unsigned width, unsigned height, unsigned mask, bool hierarchy);
unsigned
-panfrost_tiler_full_size(unsigned width, unsigned height, unsigned mask);
+panfrost_tiler_full_size(unsigned width, unsigned height, unsigned mask, bool hierarchy);
unsigned
panfrost_choose_hierarchy_mask(
unsigned width, unsigned height,
- unsigned vertex_count);
+ unsigned vertex_count, bool hierarchy);
#endif
diff --git a/src/panfrost/encoder/pan_tiler.c b/src/panfrost/encoder/pan_tiler.c
index 98ef6827a80..fc42724a1e5 100644
--- a/src/panfrost/encoder/pan_tiler.c
+++ b/src/panfrost/encoder/pan_tiler.c
@@ -218,13 +218,6 @@
/* Likewise, each tile per level has 512 bytes of body */
#define FULL_BYTES_PER_TILE 0x200
-/* Absent any geometry, the minimum size of the header */
-#define MINIMUM_HEADER_SIZE 0x200
-
-/* Mask of valid hierarchy levels: one bit for each level from min...max
- * inclusive */
-#define HIERARCHY_MASK (((MAX_TILE_SIZE / MIN_TILE_SIZE) << 1) - 1)
-
/* If the width-x-height framebuffer is divided into tile_size-x-tile_size
* tiles, how many tiles are there? Rounding up in each direction. For the
* special case of tile_size=16, this aligns with the usual Midgard count.
@@ -233,108 +226,86 @@
* a a fixed-tile size (not any of a number of power-of-twos) */
static unsigned
-pan_tile_count(unsigned width, unsigned height, unsigned tile_size)
+pan_tile_count(unsigned width, unsigned height, unsigned tile_width, unsigned tile_height)
{
- unsigned aligned_width = ALIGN_POT(width, tile_size);
- unsigned aligned_height = ALIGN_POT(height, tile_size);
+ unsigned aligned_width = ALIGN_POT(width, tile_width);
+ unsigned aligned_height = ALIGN_POT(height, tile_height);
- unsigned tile_count_x = aligned_width / tile_size;
- unsigned tile_count_y = aligned_height / tile_size;
+ unsigned tile_count_x = aligned_width / tile_width;
+ unsigned tile_count_y = aligned_height / tile_height;
return tile_count_x * tile_count_y;
}
/* For `masked_count` of the smallest tile sizes masked out, computes how the
* size of the polygon list header. We iterate the tile sizes (16x16 through
- * 2048x2048, if nothing is masked; (16*2^masked_count)x(16*2^masked_count)
- * through 2048x2048 more generally. For each tile size, we figure out how many
- * tiles there are at this hierarchy level and therefore many bytes this level
- * is, leaving us with a byte count for each level. We then just sum up the
- * byte counts across the levels to find a byte count for all levels. */
+ * 2048x2048). For each tile size, we figure out how many tiles there are at
+ * this hierarchy level and therefore many bytes this level is, leaving us with
+ * a byte count for each level. We then just sum up the byte counts across the
+ * levels to find a byte count for all levels. */
static unsigned
-panfrost_raw_segment_size(
+panfrost_hierarchy_size(
unsigned width,
unsigned height,
- unsigned masked_count,
- unsigned end_level,
+ unsigned mask,
unsigned bytes_per_tile)
{
unsigned size = PROLOGUE_SIZE;
- /* Normally we start at 16x16 tiles (MIN_TILE_SHIFT), but we add more
- * if anything is masked off */
-
- unsigned start_level = MIN_TILE_SHIFT + masked_count;
+ /* Iterate hierarchy levels */
- /* Iterate hierarchy levels / tile sizes */
+ for (unsigned b = 0; b < (MAX_TILE_SHIFT - MIN_TILE_SHIFT); ++b) {
+ /* Check if this level is enabled */
+ if (!(mask & (1 << b)))
+ continue;
- for (unsigned i = start_level; i <= end_level; ++i) {
/* Shift from a level to a tile size */
- unsigned tile_size = (1 << i);
+ unsigned tile_size = (1 << b) * MIN_TILE_SIZE;
- unsigned tile_count = pan_tile_count(width, height, tile_size);
+ unsigned tile_count = pan_tile_count(width, height, tile_size, tile_size);
unsigned level_count = bytes_per_tile * tile_count;
size += level_count;
}
/* This size will be used as an offset, so ensure it's aligned */
- return ALIGN_POT(size, 512);
+ return ALIGN_POT(size, 0x200);
}
-/* Given a hierarchy mask and a framebuffer size, compute the size of one of
- * the segments (header or body) */
+/* Implement the formula:
+ *
+ * 0x200 + bytes_per_tile * ceil(W / w) * ceil(H / h)
+ *
+ * rounding down the answer to the nearest 0x200. This is used to compute both
+ * header and body sizes for GPUs without hierarchical tiling. Essentially,
+ * computing a single hierarchy level, since there isn't any hierarchy!
+ */
static unsigned
-panfrost_segment_size(
- unsigned width, unsigned height,
- unsigned mask, unsigned bytes_per_tile)
+panfrost_flat_size(unsigned width, unsigned height, unsigned dim, unsigned bytes_per_tile)
{
- /* The tiler-disabled case should have been handled by the caller */
- assert(mask);
+ /* First, extract the tile dimensions */
- /* Some levels are enabled. Ensure that only smaller levels are
- * disabled and there are no gaps. Theoretically the hardware is more
- * flexible, but there's no known reason to use other configurations
- * and this keeps the code simple. Since we know the 0x80 or 0x100 bit
- * is set, ctz(mask) will return the number of masked off levels. */
+ unsigned tw = (1 << (dim & 0b111)) * 8;
+ unsigned th = (1 << ((dim & (0b111 << 6)) >> 6)) * 8;
- unsigned masked_count = __builtin_ctz(mask);
+ /* tile_count is ceil(W/w) * ceil(H/h) */
+ unsigned raw = pan_tile_count(width, height, tw, th) * bytes_per_tile;
- assert(mask & (0x80 | 0x100));
- assert(((mask >> masked_count) & ((mask >> masked_count) + 1)) == 0);
-
- /* Figure out the top level */
- unsigned unused_count = __builtin_clz(mask);
- unsigned top_bit = ((8 * sizeof(mask)) - 1) - unused_count;
-
- /* We don't have bits for nonexistant levels below 16x16 */
- unsigned top_level = top_bit + 4;
-
- /* Everything looks good. Use the number of trailing zeroes we found to
- * figure out how many smaller levels are disabled to compute the
- * actual header size */
-
- return panfrost_raw_segment_size(width, height,
- masked_count, top_level, bytes_per_tile);
+ /* Round down and add offset */
+ return 0x200 + ((raw / 0x200) * 0x200);
}
-
/* Given a hierarchy mask and a framebuffer size, compute the header size */
unsigned
-panfrost_tiler_header_size(unsigned width, unsigned height, unsigned mask)
+panfrost_tiler_header_size(unsigned width, unsigned height, unsigned mask, bool hierarchy)
{
- mask &= HIERARCHY_MASK;
-
- /* If no hierarchy levels are enabled, that means there is no geometry
- * for the tiler to process, so use a minimum size. Used for clears */
-
- if (mask == 0x00)
- return MINIMUM_HEADER_SIZE;
-
- return panfrost_segment_size(width, height, mask, HEADER_BYTES_PER_TILE);
+ if (hierarchy)
+ return panfrost_hierarchy_size(width, height, mask, HEADER_BYTES_PER_TILE);
+ else
+ return panfrost_flat_size(width, height, mask, HEADER_BYTES_PER_TILE);
}
/* The combined header/body is sized similarly (but it is significantly
@@ -343,14 +314,38 @@ panfrost_tiler_header_size(unsigned width, unsigned height, unsigned mask)
*/
unsigned
-panfrost_tiler_full_size(unsigned width, unsigned height, unsigned mask)
+panfrost_tiler_full_size(unsigned width, unsigned height, unsigned mask, bool hierarchy)
+{
+ if (hierarchy)
+ return panfrost_hierarchy_size(width, height, mask, FULL_BYTES_PER_TILE);
+ else
+ return panfrost_flat_size(width, height, mask, FULL_BYTES_PER_TILE);
+}
+
+/* On GPUs without hierarchical tiling, we choose a tile size directly and
+ * stuff it into the field otherwise known as hierarchy mask (not a mask). */
+
+static unsigned
+panfrost_choose_tile_size(
+ unsigned width, unsigned height, unsigned vertex_count)
{
- mask &= HIERARCHY_MASK;
+ /* Figure out the ideal tile size. Eventually a heuristic should be
+ * used for this */
+
+ unsigned best_w = 16;
+ unsigned best_h = 16;
+
+ /* Clamp so there are less than 64 tiles in each direction */
- if (mask == 0x00)
- return MINIMUM_HEADER_SIZE;
+ best_w = MAX2(best_w, util_next_power_of_two(width / 63));
+ best_h = MAX2(best_h, util_next_power_of_two(height / 63));
- return panfrost_segment_size(width, height, mask, FULL_BYTES_PER_TILE);
+ /* We have our ideal tile size, so encode */
+
+ unsigned exp_w = util_logbase2(best_w / 16);
+ unsigned exp_h = util_logbase2(best_h / 16);
+
+ return exp_w | (exp_h << 6);
}
/* In the future, a heuristic to choose a tiler hierarchy mask would go here.
@@ -362,13 +357,16 @@ panfrost_tiler_full_size(unsigned width, unsigned height, unsigned mask)
unsigned
panfrost_choose_hierarchy_mask(
unsigned width, unsigned height,
- unsigned vertex_count)
+ unsigned vertex_count, bool hierarchy)
{
/* If there is no geometry, we don't bother enabling anything */
if (!vertex_count)
return 0x00;
+ if (!hierarchy)
+ return panfrost_choose_tile_size(width, height, vertex_count);
+
/* Otherwise, default everything on. TODO: Proper tests */
return 0xFF;
diff --git a/src/panfrost/include/panfrost-job.h b/src/panfrost/include/panfrost-job.h
index 06a45d186d3..ff0d9aa9036 100644
--- a/src/panfrost/include/panfrost-job.h
+++ b/src/panfrost/include/panfrost-job.h
@@ -1392,9 +1392,17 @@ struct mali_payload_fragment {
/* See pan_tiler.c for derivation */
#define MALI_HIERARCHY_MASK ((1 << 9) - 1)
-/* Flag disabling the tiler for clear-only jobs */
+/* Flag disabling the tiler for clear-only jobs, with
+ hierarchical tiling */
#define MALI_TILER_DISABLED (1 << 12)
+/* Flag selecting userspace-generated polygon list, for clear-only jobs without
+ * hierarhical tiling. */
+#define MALI_TILER_USER 0xFFF
+
+/* Absent any geometry, the minimum size of the polygon list header */
+#define MALI_TILER_MINIMUM_HEADER_SIZE 0x200
+
struct midgard_tiler_descriptor {
/* Size of the entire polygon list; see pan_tiler.c for the
* computation. It's based on hierarchical tiling */
diff --git a/src/panfrost/pandecode/decode.c b/src/panfrost/pandecode/decode.c
index 847a0d7e7b0..89992a76a79 100644
--- a/src/panfrost/pandecode/decode.c
+++ b/src/panfrost/pandecode/decode.c
@@ -513,7 +513,8 @@ pandecode_midgard_tiler_descriptor(
const struct midgard_tiler_descriptor *t,
unsigned width,
unsigned height,
- bool is_fragment)
+ bool is_fragment,
+ bool has_hierarchy)
{
pandecode_log(".tiler = {\n");
pandecode_indent++;
@@ -546,8 +547,8 @@ pandecode_midgard_tiler_descriptor(
/* Now that we've sanity checked, we'll try to calculate the sizes
* ourselves for comparison */
- unsigned ref_header = panfrost_tiler_header_size(width, height, t->hierarchy_mask);
- unsigned ref_size = panfrost_tiler_full_size(width, height, t->hierarchy_mask);
+ unsigned ref_header = panfrost_tiler_header_size(width, height, t->hierarchy_mask, has_hierarchy);
+ unsigned ref_size = panfrost_tiler_full_size(width, height, t->hierarchy_mask, has_hierarchy);
if (!((ref_header == body_offset) && (ref_size == t->polygon_list_size))) {
pandecode_msg("XXX: bad polygon list size (expected %d / 0x%x)\n",
@@ -630,44 +631,6 @@ pandecode_midgard_tiler_descriptor(
pandecode_log("}\n");
}
-static void
-pandecode_midgard_tiler_descriptor_0x20(
- const struct midgard_tiler_descriptor *t)
-{
- pandecode_log(".tiler = {\n");
- pandecode_indent++;
-
- pandecode_prop("hierarchy_mask = 0x%" PRIx16, t->hierarchy_mask);
- pandecode_prop("flags = 0x%" PRIx16, t->flags);
- MEMORY_PROP(t, polygon_list);
- MEMORY_PROP(t, polygon_list_body);
- pandecode_prop("polygon_list_size = 0x%x", t->polygon_list_size);
- MEMORY_PROP(t, heap_start);
- MEMORY_PROP(t, heap_end);
-
- /* We've never seen weights used in practice, but we know from the
- * kernel these fields are there */
-
- bool nonzero_weights = false;
-
- for (unsigned w = 0; w < ARRAY_SIZE(t->weights); ++w) {
- nonzero_weights |= t->weights[w] != 0x0;
- }
-
- if (nonzero_weights) {
- pandecode_log(".weights = {");
-
- for (unsigned w = 0; w < ARRAY_SIZE(t->weights); ++w) {
- pandecode_log("%d, ", t->weights[w]);
- }
-
- pandecode_log("},");
- }
-
- pandecode_indent--;
- pandecode_log("}\n");
-}
-
/* Information about the framebuffer passed back for
* additional analysis */
@@ -792,11 +755,9 @@ pandecode_sfbd(uint64_t gpu_va, int job_no, bool is_fragment, unsigned gpu_id)
MEMORY_PROP(s, unknown_address_0);
const struct midgard_tiler_descriptor t = s->tiler;
- if (gpu_id == 0x0720 || gpu_id == 0x0820 || gpu_id == 0x0830)
- /* These ones don't have an "Advanced Tiling Unit" */
- pandecode_midgard_tiler_descriptor_0x20(&t);
- else
- pandecode_midgard_tiler_descriptor(&t, s->width + 1, s->height + 1, is_fragment);
+
+ bool has_hierarchy = !(gpu_id == 0x0720 || gpu_id == 0x0820 || gpu_id == 0x0830);
+ pandecode_midgard_tiler_descriptor(&t, s->width + 1, s->height + 1, is_fragment, has_hierarchy);
pandecode_indent--;
pandecode_log("};\n");
@@ -1157,7 +1118,7 @@ pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment)
pandecode_prop("unknown2 = 0x%x", fb->unknown2);
MEMORY_PROP(fb, scratchpad);
const struct midgard_tiler_descriptor t = fb->tiler;
- pandecode_midgard_tiler_descriptor(&t, fb->width1 + 1, fb->height1 + 1, is_fragment);
+ pandecode_midgard_tiler_descriptor(&t, fb->width1 + 1, fb->height1 + 1, is_fragment, true);
if (fb->zero3 || fb->zero4) {
pandecode_msg("XXX: framebuffer zeros tripped\n");