aboutsummaryrefslogtreecommitdiffstats
path: root/src/freedreno
diff options
context:
space:
mode:
authorEric Anholt <[email protected]>2020-05-29 16:49:43 -0700
committerEric Anholt <[email protected]>2020-06-05 13:43:30 -0700
commit0bacb280a886905310c9b30c5af234c32ff582dc (patch)
treee6a22d4394c7fbee3a3ed4841dbc0d27fb367747 /src/freedreno
parente349f502792e927a1acdeaf00e591878bd18c837 (diff)
freedreno/ir3: Handle cases where we decide not to lower UBO 0 loads.
We advertize 4096 vec4s of GL uniform storage, but the HW can only store 512 vec4s in the const buffer. Closes: #3049 Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5273>
Diffstat (limited to 'src/freedreno')
-rw-r--r--src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c78
1 files changed, 39 insertions, 39 deletions
diff --git a/src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c b/src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c
index d7dd9b912c8..449d908a290 100644
--- a/src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c
+++ b/src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c
@@ -27,16 +27,29 @@
#include "compiler/nir/nir_builder.h"
#include "util/u_math.h"
+static bool
+range_is_gl_uniforms(struct ir3_ubo_range *r)
+{
+ return !r->bindless && r->block == 0;
+}
+
static inline struct ir3_ubo_range
-get_ubo_load_range(nir_intrinsic_instr *instr, uint32_t alignment)
+get_ubo_load_range(nir_shader *nir, nir_intrinsic_instr *instr, uint32_t alignment)
{
struct ir3_ubo_range r;
- int offset = nir_src_as_uint(instr->src[1]);
- const int bytes = nir_intrinsic_dest_components(instr) * 4;
+ if (nir_src_is_const(instr->src[1])) {
+ int offset = nir_src_as_uint(instr->src[1]);
+ const int bytes = nir_intrinsic_dest_components(instr) * 4;
- r.start = ROUND_DOWN_TO(offset, alignment * 16);
- r.end = ALIGN(offset + bytes, alignment * 16);
+ r.start = ROUND_DOWN_TO(offset, alignment * 16);
+ r.end = ALIGN(offset + bytes, alignment * 16);
+ } else {
+ /* The other valid place to call this is on the GL default uniform block */
+ assert(nir_src_as_uint(instr->src[0]) == 0);
+ r.start = 0;
+ r.end = ALIGN(nir->num_uniforms * 16, alignment * 16);
+ }
return r;
}
@@ -87,30 +100,21 @@ static void
gather_ubo_ranges(nir_shader *nir, nir_intrinsic_instr *instr,
struct ir3_ubo_analysis_state *state, uint32_t alignment)
{
- struct ir3_ubo_range *old_r = get_existing_range(instr, state, true);
- if (!old_r)
+ if (ir3_shader_debug & IR3_DBG_NOUBOOPT)
return;
- if (!nir_src_is_const(instr->src[1])) {
- if (!old_r->bindless && old_r->block == 0) {
- /* If this is an indirect on UBO 0, we'll still lower it back to
- * load_uniform. Set the range to cover all of UBO 0.
- */
- old_r->start = 0;
- old_r->end = ALIGN(nir->num_uniforms * 16, alignment * 16);
- }
-
+ struct ir3_ubo_range *old_r = get_existing_range(instr, state, true);
+ if (!old_r)
return;
- }
- const struct ir3_ubo_range r = get_ubo_load_range(instr, alignment);
-
- /* if UBO lowering is disabled, we still want to lower block 0
- * (which is normal uniforms):
+ /* We don't know how to get the size of UBOs being indirected on, other
+ * than on the GL uniforms where we have some other shader_info data.
*/
- if ((old_r->bindless || old_r->block != 0) && (ir3_shader_debug & IR3_DBG_NOUBOOPT))
+ if (!nir_src_is_const(instr->src[1]) && !range_is_gl_uniforms(old_r))
return;
+ const struct ir3_ubo_range r = get_ubo_load_range(nir, instr, alignment);
+
if (r.start < old_r->start)
old_r->start = r.start;
if (old_r->end < r.end)
@@ -212,24 +216,20 @@ lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
return;
}
- if (range->bindless || range->block > 0) {
- /* We don't lower dynamic array indexing either, but we definitely should.
- * We don't have a good way of determining the range of the dynamic
- * access, so for now just fall back to pulling.
- */
- if (!nir_src_is_const(instr->src[1])) {
- track_ubo_use(instr, b, num_ubos);
- return;
- }
+ /* We don't have a good way of determining the range of the dynamic
+ * access in general, so for now just fall back to pulling.
+ */
+ if (!nir_src_is_const(instr->src[1]) && !range_is_gl_uniforms(range))
+ return;
- /* After gathering the UBO access ranges, we limit the total
- * upload. Reject if we're now outside the range.
- */
- const struct ir3_ubo_range r = get_ubo_load_range(instr, alignment);
- if (!(range->start <= r.start && r.end <= range->end)) {
- track_ubo_use(instr, b, num_ubos);
- return;
- }
+ /* After gathering the UBO access ranges, we limit the total
+ * upload. Don't lower if this load is outside the range.
+ */
+ const struct ir3_ubo_range r = get_ubo_load_range(b->shader,
+ instr, alignment);
+ if (!(range->start <= r.start && r.end <= range->end)) {
+ track_ubo_use(instr, b, num_ubos);
+ return;
}
nir_ssa_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);