diff options
author | Alyssa Rosenzweig <[email protected]> | 2019-07-16 14:40:32 -0700 |
---|---|---|
committer | Alyssa Rosenzweig <[email protected]> | 2019-07-22 08:20:34 -0700 |
commit | 7e052d933270876f4c3200e05b5b66515589fd1e (patch) | |
tree | d533ad3f6efd58cdbfe69f08c6566ed316722e4a /src | |
parent | 3174bc9972fa812989c2bbb4be8d0651024d84f2 (diff) |
pan/midgard: Remove "aliasing"
It was a crazy idea that didn't pan out. We're better served by a good
copyprop pass. It's also unused now.
Signed-off-by: Alyssa Rosenzweig <[email protected]>
Diffstat (limited to 'src')
-rw-r--r-- | src/panfrost/midgard/compiler.h | 14 | ||||
-rw-r--r-- | src/panfrost/midgard/midgard_compile.c | 82 |
2 files changed, 0 insertions, 96 deletions
diff --git a/src/panfrost/midgard/compiler.h b/src/panfrost/midgard/compiler.h index 73ec4b56fb3..3a8732657e5 100644 --- a/src/panfrost/midgard/compiler.h +++ b/src/panfrost/midgard/compiler.h @@ -218,20 +218,6 @@ typedef struct compiler_context { /* Constants which have been loaded, for later inlining */ struct hash_table_u64 *ssa_constants; - /* SSA values / registers which have been aliased. Naively, these - * demand a fmov output; instead, we alias them in a later pass to - * avoid the wasted op. - * - * A note on encoding: to avoid dynamic memory management here, rather - * than ampping to a pointer, we map to the source index; the key - * itself is just the destination index. */ - - struct hash_table_u64 *ssa_to_alias; - struct set *leftover_ssa_to_alias; - - /* Actual SSA-to-register for RA */ - struct hash_table_u64 *ssa_to_register; - /* Mapping of hashes computed from NIR indices to the sequential temp indices ultimately used in MIR */ struct hash_table_u64 *hash_to_temp; int temp_count; diff --git a/src/panfrost/midgard/midgard_compile.c b/src/panfrost/midgard/midgard_compile.c index 37436e133fc..d7914b2d05b 100644 --- a/src/panfrost/midgard/midgard_compile.c +++ b/src/panfrost/midgard/midgard_compile.c @@ -518,26 +518,6 @@ optimise_nir(nir_shader *nir) NIR_PASS(progress, nir, nir_opt_dce); } -/* Front-half of aliasing the SSA slots, merely by inserting the flag in the - * appropriate hash table. Intentional off-by-one to avoid confusing NULL with - * r0. See the comments in compiler_context */ - -static void -alias_ssa(compiler_context *ctx, int dest, int src) -{ - _mesa_hash_table_u64_insert(ctx->ssa_to_alias, dest + 1, (void *) ((uintptr_t) src + 1)); - _mesa_set_add(ctx->leftover_ssa_to_alias, (void *) (uintptr_t) (dest + 1)); -} - -/* ...or undo it, after which the original index will be used (dummy move should be emitted alongside this) */ - -static void -unalias_ssa(compiler_context *ctx, int dest) -{ - _mesa_hash_table_u64_remove(ctx->ssa_to_alias, dest + 1); - /* TODO: Remove from leftover or no? */ -} - /* Do not actually emit a load; instead, cache the constant for inlining */ static void @@ -1555,11 +1535,6 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, int reg = ctx->texture_op_count & 1; int in_reg = reg, out_reg = reg; - /* Make room for the reg */ - - if (ctx->texture_index[reg] > -1) - unalias_ssa(ctx, ctx->texture_index[reg]); - int texture_index = instr->texture_index; int sampler_index = texture_index; @@ -1990,32 +1965,6 @@ embedded_to_inline_constant(compiler_context *ctx) } } -/* Map normal SSA sources to other SSA sources / fixed registers (like - * uniforms) */ - -static void -map_ssa_to_alias(compiler_context *ctx, int *ref) -{ - /* Sign is used quite deliberately for unused */ - if (*ref < 0) - return; - - unsigned int alias = (uintptr_t) _mesa_hash_table_u64_search(ctx->ssa_to_alias, *ref + 1); - - if (alias) { - /* Remove entry in leftovers to avoid a redunant fmov */ - - struct set_entry *leftover = _mesa_set_search(ctx->leftover_ssa_to_alias, ((void *) (uintptr_t) (*ref + 1))); - - if (leftover) - _mesa_set_remove(ctx->leftover_ssa_to_alias, leftover); - - /* Assign the alias map */ - *ref = alias - 1; - return; - } -} - /* Basic dead code elimination on the MIR itself, which cleans up e.g. the * texture pipeline */ @@ -2212,32 +2161,6 @@ midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block) return progress; } -/* If there are leftovers after the below pass, emit actual fmov - * instructions for the slow-but-correct path */ - -static void -emit_leftover_move(compiler_context *ctx) -{ - set_foreach(ctx->leftover_ssa_to_alias, leftover) { - int base = ((uintptr_t) leftover->key) - 1; - int mapped = base; - - map_ssa_to_alias(ctx, &mapped); - EMIT(mov, mapped, blank_alu_src, base); - } -} - -static void -actualise_ssa_to_alias(compiler_context *ctx) -{ - mir_foreach_instr(ctx, ins) { - map_ssa_to_alias(ctx, &ins->ssa_args.src0); - map_ssa_to_alias(ctx, &ins->ssa_args.src1); - } - - emit_leftover_move(ctx); -} - static void emit_fragment_epilogue(compiler_context *ctx) { @@ -2288,9 +2211,6 @@ emit_block(compiler_context *ctx, nir_block *block) inline_alu_constants(ctx); embedded_to_inline_constant(ctx); - /* Perform heavylifting for aliasing */ - actualise_ssa_to_alias(ctx); - /* Append fragment shader epilogue (value writeout) */ if (ctx->stage == MESA_SHADER_FRAGMENT) { if (block == nir_impl_last_block(ctx->func->impl)) { @@ -2503,10 +2423,8 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl /* Initialize at a global (not block) level hash tables */ ctx->ssa_constants = _mesa_hash_table_u64_create(NULL); - ctx->ssa_to_alias = _mesa_hash_table_u64_create(NULL); ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL); ctx->sysval_to_id = _mesa_hash_table_u64_create(NULL); - ctx->leftover_ssa_to_alias = _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal); /* Record the varying mapping for the command stream's bookkeeping */ |