summaryrefslogtreecommitdiffstats
path: root/src/compiler/nir
diff options
context:
space:
mode:
Diffstat (limited to 'src/compiler/nir')
-rw-r--r--src/compiler/nir/README2
-rw-r--r--src/compiler/nir/nir.c2
-rw-r--r--src/compiler/nir/nir.h14
-rw-r--r--src/compiler/nir/nir_builder.h2
-rw-r--r--src/compiler/nir/nir_clone.c8
-rw-r--r--src/compiler/nir/nir_control_flow.c2
-rw-r--r--src/compiler/nir/nir_control_flow.h8
-rw-r--r--src/compiler/nir/nir_from_ssa.c10
-rw-r--r--src/compiler/nir/nir_loop_analyze.c4
-rw-r--r--src/compiler/nir/nir_lower_clip.c6
-rw-r--r--src/compiler/nir/nir_lower_drawpixels.c4
-rw-r--r--src/compiler/nir/nir_lower_io.c2
-rw-r--r--src/compiler/nir/nir_lower_io_types.c2
-rw-r--r--src/compiler/nir/nir_lower_tex.c2
-rw-r--r--src/compiler/nir/nir_lower_wpos_center.c2
-rw-r--r--src/compiler/nir/nir_lower_wpos_ytransform.c2
-rw-r--r--src/compiler/nir/nir_move_vec_src_uses_to_dest.c2
-rw-r--r--src/compiler/nir/nir_opt_cse.c2
-rw-r--r--src/compiler/nir/nir_opt_peephole_select.c2
-rw-r--r--src/compiler/nir/nir_opt_remove_phis.c4
-rw-r--r--src/compiler/nir/nir_search_helpers.h4
21 files changed, 43 insertions, 43 deletions
diff --git a/src/compiler/nir/README b/src/compiler/nir/README
index 2c81db9db61..7b312a37c30 100644
--- a/src/compiler/nir/README
+++ b/src/compiler/nir/README
@@ -1,5 +1,5 @@
New IR, or NIR, is an IR for Mesa intended to sit below GLSL IR and Mesa IR.
-Its design inherits from the various IR's that Mesa has used in the past, as
+Its design inherits from the various IRs that Mesa has used in the past, as
well as Direct3D assembly, and it includes a few new ideas as well. It is a
flat (in terms of using instructions instead of expressions), typeless IR,
similar to TGSI and Mesa IR. It also supports SSA (although it doesn't require
diff --git a/src/compiler/nir/nir.c b/src/compiler/nir/nir.c
index 937b6300624..43fa60f0b6e 100644
--- a/src/compiler/nir/nir.c
+++ b/src/compiler/nir/nir.c
@@ -345,7 +345,7 @@ nir_block_create(nir_shader *shader)
_mesa_key_pointer_equal);
block->imm_dom = NULL;
/* XXX maybe it would be worth it to defer allocation? This
- * way it doesn't get allocated for shader ref's that never run
+ * way it doesn't get allocated for shader refs that never run
* nir_calc_dominance? For example, state-tracker creates an
* initial IR, clones that, runs appropriate lowering pass, passes
* to driver which does common lowering/opt, and then stores ref
diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h
index 405e6739436..c1294543dd0 100644
--- a/src/compiler/nir/nir.h
+++ b/src/compiler/nir/nir.h
@@ -368,13 +368,13 @@ typedef struct nir_register {
*/
bool is_packed;
- /** set of nir_src's where this register is used (read from) */
+ /** set of nir_srcs where this register is used (read from) */
struct list_head uses;
- /** set of nir_dest's where this register is defined (written to) */
+ /** set of nir_dests where this register is defined (written to) */
struct list_head defs;
- /** set of nir_if's where this register is used as a condition */
+ /** set of nir_ifs where this register is used as a condition */
struct list_head if_uses;
} nir_register;
@@ -453,10 +453,10 @@ typedef struct nir_ssa_def {
nir_instr *parent_instr;
- /** set of nir_instr's where this register is used (read from) */
+ /** set of nir_instrs where this register is used (read from) */
struct list_head uses;
- /** set of nir_if's where this register is used as a condition */
+ /** set of nir_ifs where this register is used as a condition */
struct list_head if_uses;
uint8_t num_components;
@@ -1422,7 +1422,7 @@ typedef struct {
typedef struct {
nir_instr instr;
- /* A list of nir_parallel_copy_entry's. The sources of all of the
+ /* A list of nir_parallel_copy_entrys. The sources of all of the
* entries are copied to the corresponding destinations "in parallel".
* In other words, if we have two entries: a -> b and b -> a, the values
* get swapped.
@@ -1506,7 +1506,7 @@ typedef struct nir_block {
unsigned num_dom_children;
struct nir_block **dom_children;
- /* Set of nir_block's on the dominance frontier of this block */
+ /* Set of nir_blocks on the dominance frontier of this block */
struct set *dom_frontier;
/*
diff --git a/src/compiler/nir/nir_builder.h b/src/compiler/nir/nir_builder.h
index a4f15b6d335..7dbf8efbb34 100644
--- a/src/compiler/nir/nir_builder.h
+++ b/src/compiler/nir/nir_builder.h
@@ -495,7 +495,7 @@ nir_ssa_for_src(nir_builder *build, nir_src src, int num_components)
}
/**
- * Similar to nir_ssa_for_src(), but for alu src's, respecting the
+ * Similar to nir_ssa_for_src(), but for alu srcs, respecting the
* nir_alu_src's swizzle.
*/
static inline nir_ssa_def *
diff --git a/src/compiler/nir/nir_clone.c b/src/compiler/nir/nir_clone.c
index a0ba8f7a4a0..e2204c4e72a 100644
--- a/src/compiler/nir/nir_clone.c
+++ b/src/compiler/nir/nir_clone.c
@@ -140,7 +140,7 @@ nir_constant_clone(const nir_constant *c, nir_variable *nvar)
return nc;
}
-/* NOTE: for cloning nir_variable's, bypass nir_variable_create to avoid
+/* NOTE: for cloning nir_variables, bypass nir_variable_create to avoid
* having to deal with locals and globals separately:
*/
nir_variable *
@@ -185,7 +185,7 @@ clone_var_list(clone_state *state, struct exec_list *dst,
}
}
-/* NOTE: for cloning nir_register's, bypass nir_global/local_reg_create()
+/* NOTE: for cloning nir_registers, bypass nir_global/local_reg_create()
* to avoid having to deal with locals and globals separately:
*/
static nir_register *
@@ -724,7 +724,7 @@ clone_function(clone_state *state, const nir_function *fxn, nir_shader *ns)
/* At first glance, it looks like we should clone the function_impl here.
* However, call instructions need to be able to reference at least the
- * function and those will get processed as we clone the function_impl's.
+ * function and those will get processed as we clone the function_impls.
* We stop here and do function_impls as a second pass.
*/
@@ -752,7 +752,7 @@ nir_shader_clone(void *mem_ctx, const nir_shader *s)
clone_function(&state, fxn, ns);
/* Only after all functions are cloned can we clone the actual function
- * implementations. This is because nir_call_instr's need to reference the
+ * implementations. This is because nir_call_instrs need to reference the
* functions of other functions and we don't know what order the functions
* will have in the list.
*/
diff --git a/src/compiler/nir/nir_control_flow.c b/src/compiler/nir/nir_control_flow.c
index d33819d56da..1622b35a6c9 100644
--- a/src/compiler/nir/nir_control_flow.c
+++ b/src/compiler/nir/nir_control_flow.c
@@ -203,7 +203,7 @@ split_block_beginning(nir_block *block)
}
/* Any phi nodes must stay part of the new block, or else their
- * sourcse will be messed up. This will reverse the order of the phi's, but
+ * sourcse will be messed up. This will reverse the order of the phis, but
* order shouldn't matter.
*/
nir_foreach_instr_safe(instr, block) {
diff --git a/src/compiler/nir/nir_control_flow.h b/src/compiler/nir/nir_control_flow.h
index a487eb04660..2ea460e5df3 100644
--- a/src/compiler/nir/nir_control_flow.h
+++ b/src/compiler/nir/nir_control_flow.h
@@ -36,11 +36,11 @@ extern "C" {
/** NIR Control Flow Modification
*
- * This file contains various API's that make modifying control flow in NIR,
+ * This file contains various APIs that make modifying control flow in NIR,
* while maintaining the invariants checked by the validator, much easier.
* There are two parts to this:
*
- * 1. Inserting control flow (if's and loops) in various places, for creating
+ * 1. Inserting control flow (ifs and loops) in various places, for creating
* IR either from scratch or as part of some lowering pass.
* 2. Taking existing pieces of the IR and either moving them around or
* deleting them.
@@ -93,12 +93,12 @@ nir_cf_node_insert_end(struct exec_list *list, nir_cf_node *node)
* predecessors:
*
* 1) After an if statement, if neither branch ends in a jump.
- * 2) After a loop, if there are multiple break's.
+ * 2) After a loop, if there are multiple breaks.
* 3) At the beginning of a loop.
*
* For #1, the phi node is considered to be part of the if, and for #2 and
* #3 the phi node is considered to be part of the loop. This allows us to
- * keep phi's intact, but it means that phi nodes cannot be separated from
+ * keep phis intact, but it means that phi nodes cannot be separated from
* the control flow they come from. For example, extracting an if without
* extracting all the phi nodes after it is not allowed, and neither is
* extracting only some of the phi nodes at the beginning of a block. It
diff --git a/src/compiler/nir/nir_from_ssa.c b/src/compiler/nir/nir_from_ssa.c
index a8d3e648974..27e94f823b0 100644
--- a/src/compiler/nir/nir_from_ssa.c
+++ b/src/compiler/nir/nir_from_ssa.c
@@ -71,7 +71,7 @@ ssa_def_dominates(nir_ssa_def *a, nir_ssa_def *b)
* Each SSA definition is associated with a merge_node and the association
* is represented by a combination of a hash table and the "def" parameter
* in the merge_node structure. The merge_set stores a linked list of
- * merge_node's in dominence order of the ssa definitions. (Since the
+ * merge_nodes in dominence order of the ssa definitions. (Since the
* liveness analysis pass indexes the SSA values in dominence order for us,
* this is an easy thing to keep up.) It is assumed that no pair of the
* nodes in a given set interfere. Merging two sets or checking for
@@ -313,7 +313,7 @@ isolate_phi_nodes_block(nir_block *block, void *dead_ctx)
last_phi_instr = instr;
}
- /* If we don't have any phi's, then there's nothing for us to do. */
+ /* If we don't have any phis, then there's nothing for us to do. */
if (last_phi_instr == NULL)
return true;
@@ -558,7 +558,7 @@ emit_copy(nir_builder *b, nir_src src, nir_src dest_src)
nir_builder_instr_insert(b, &mov->instr);
}
-/* Resolves a single parallel copy operation into a sequence of mov's
+/* Resolves a single parallel copy operation into a sequence of movs
*
* This is based on Algorithm 1 from "Revisiting Out-of-SSA Translation for
* Correctness, Code Quality, and Efficiency" by Boissinot et. al..
@@ -851,10 +851,10 @@ place_phi_read(nir_shader *shader, nir_register *reg,
nir_instr_insert(nir_after_block_before_jump(block), &mov->instr);
}
-/** Lower all of the phi nodes in a block to imov's to and from a register
+/** Lower all of the phi nodes in a block to imovs to and from a register
*
* This provides a very quick-and-dirty out-of-SSA pass that you can run on a
- * single block to convert all of it's phis to a register and some imov's.
+ * single block to convert all of its phis to a register and some imovs.
* The code that is generated, while not optimal for actual codegen in a
* back-end, is easy to generate, correct, and will turn into the same set of
* phis after you call regs_to_ssa and do some copy propagation.
diff --git a/src/compiler/nir/nir_loop_analyze.c b/src/compiler/nir/nir_loop_analyze.c
index 6afad9e6033..84da035052d 100644
--- a/src/compiler/nir/nir_loop_analyze.c
+++ b/src/compiler/nir/nir_loop_analyze.c
@@ -218,7 +218,7 @@ compute_induction_information(loop_info_state *state)
*/
assert(!var->in_control_flow && var->type != invariant);
- /* We are only interested in checking phi's for the basic induction
+ /* We are only interested in checking phis for the basic induction
* variable case as its simple to detect. All basic induction variables
* have a phi node
*/
@@ -707,7 +707,7 @@ static void
get_loop_info(loop_info_state *state, nir_function_impl *impl)
{
/* Initialize all variables to "outside_loop". This also marks defs
- * invariant and constant if they are nir_instr_type_load_const's
+ * invariant and constant if they are nir_instr_type_load_consts
*/
nir_foreach_block(block, impl) {
nir_foreach_instr(instr, block)
diff --git a/src/compiler/nir/nir_lower_clip.c b/src/compiler/nir/nir_lower_clip.c
index 62540ac11d9..8268e4128f6 100644
--- a/src/compiler/nir/nir_lower_clip.c
+++ b/src/compiler/nir/nir_lower_clip.c
@@ -31,7 +31,7 @@
/* Generates the lowering code for user-clip-planes, generating CLIPDIST
* from UCP[n] + CLIPVERTEX or POSITION. Additionally, an optional pass
- * for fragment shaders to insert conditional kill's based on the inter-
+ * for fragment shaders to insert conditional kills based on the inter-
* polated CLIPDIST
*
* NOTE: should be run after nir_lower_outputs_to_temporaries() (or at
@@ -163,7 +163,7 @@ lower_clip_vs(nir_function_impl *impl, unsigned ucp_enables,
* should be only a single predecessor block to end_block, which
* makes the perfect place to insert the clipdist calculations.
*
- * NOTE: in case of early return's, these would have to be lowered
+ * NOTE: in case of early returns, these would have to be lowered
* to jumps to end_block predecessor in a previous pass. Not sure
* if there is a good way to sanity check this, but for now the
* users of this pass don't support sub-routines.
@@ -193,7 +193,7 @@ lower_clip_vs(nir_function_impl *impl, unsigned ucp_enables,
nir_metadata_preserve(impl, nir_metadata_dominance);
}
-/* ucp_enables is bitmask of enabled ucp's. Actual ucp values are
+/* ucp_enables is bitmask of enabled ucps. Actual ucp values are
* passed in to shader via user_clip_plane system-values
*/
void
diff --git a/src/compiler/nir/nir_lower_drawpixels.c b/src/compiler/nir/nir_lower_drawpixels.c
index 51c52d038c7..e221fd5ce0f 100644
--- a/src/compiler/nir/nir_lower_drawpixels.c
+++ b/src/compiler/nir/nir_lower_drawpixels.c
@@ -214,11 +214,11 @@ lower_drawpixels_block(lower_drawpixels_state *state, nir_block *block)
nir_variable *var = dvar->var;
if (var->data.location == VARYING_SLOT_COL0) {
- /* gl_Color should not have array/struct deref's: */
+ /* gl_Color should not have array/struct derefs: */
assert(dvar->deref.child == NULL);
lower_color(state, intr);
} else if (var->data.location == VARYING_SLOT_TEX0) {
- /* gl_TexCoord should not have array/struct deref's: */
+ /* gl_TexCoord should not have array/struct derefs: */
assert(dvar->deref.child == NULL);
lower_texcoord(state, intr);
}
diff --git a/src/compiler/nir/nir_lower_io.c b/src/compiler/nir/nir_lower_io.c
index 7dff26b9c0f..1156b80ea8f 100644
--- a/src/compiler/nir/nir_lower_io.c
+++ b/src/compiler/nir/nir_lower_io.c
@@ -49,7 +49,7 @@ nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
nir_foreach_variable(var, var_list) {
/*
- * UBO's have their own address spaces, so don't count them towards the
+ * UBOs have their own address spaces, so don't count them towards the
* number of global uniforms
*/
if ((var->data.mode == nir_var_uniform || var->data.mode == nir_var_shader_storage) &&
diff --git a/src/compiler/nir/nir_lower_io_types.c b/src/compiler/nir/nir_lower_io_types.c
index 6b29f819632..fb84b185be3 100644
--- a/src/compiler/nir/nir_lower_io_types.c
+++ b/src/compiler/nir/nir_lower_io_types.c
@@ -68,7 +68,7 @@ get_new_var(struct lower_io_types_state *state, nir_variable *var,
nvar->data = var->data;
nvar->data.location += off;
- /* nir_variable_create is too clever for it's own good: */
+ /* nir_variable_create is too clever for its own good: */
exec_node_remove(&nvar->node);
exec_node_self_link(&nvar->node); /* no delinit() :-( */
diff --git a/src/compiler/nir/nir_lower_tex.c b/src/compiler/nir/nir_lower_tex.c
index 70054679955..352d1499bc8 100644
--- a/src/compiler/nir/nir_lower_tex.c
+++ b/src/compiler/nir/nir_lower_tex.c
@@ -645,7 +645,7 @@ swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4])
if (swizzle[0] < 4 && swizzle[1] < 4 &&
swizzle[2] < 4 && swizzle[3] < 4) {
unsigned swiz[4] = { swizzle[0], swizzle[1], swizzle[2], swizzle[3] };
- /* We have no 0's or 1's, just emit a swizzling MOV */
+ /* We have no 0s or 1s, just emit a swizzling MOV */
swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4, false);
} else {
nir_ssa_def *srcs[4];
diff --git a/src/compiler/nir/nir_lower_wpos_center.c b/src/compiler/nir/nir_lower_wpos_center.c
index 7b70af30146..5a70848eb88 100644
--- a/src/compiler/nir/nir_lower_wpos_center.c
+++ b/src/compiler/nir/nir_lower_wpos_center.c
@@ -71,7 +71,7 @@ lower_wpos_center_block(nir_builder *b, nir_block *block)
if (var->data.mode == nir_var_shader_in &&
var->data.location == VARYING_SLOT_POS) {
- /* gl_FragCoord should not have array/struct deref's: */
+ /* gl_FragCoord should not have array/struct derefs: */
assert(dvar->deref.child == NULL);
add_half_to_fragcoord(b, intr);
progress = true;
diff --git a/src/compiler/nir/nir_lower_wpos_ytransform.c b/src/compiler/nir/nir_lower_wpos_ytransform.c
index f211c733d0d..873d259d7fc 100644
--- a/src/compiler/nir/nir_lower_wpos_ytransform.c
+++ b/src/compiler/nir/nir_lower_wpos_ytransform.c
@@ -304,7 +304,7 @@ lower_wpos_ytransform_block(lower_wpos_ytransform_state *state, nir_block *block
if (var->data.mode == nir_var_shader_in &&
var->data.location == VARYING_SLOT_POS) {
- /* gl_FragCoord should not have array/struct deref's: */
+ /* gl_FragCoord should not have array/struct derefs: */
assert(dvar->deref.child == NULL);
lower_fragcoord(state, intr);
} else if (var->data.mode == nir_var_system_value &&
diff --git a/src/compiler/nir/nir_move_vec_src_uses_to_dest.c b/src/compiler/nir/nir_move_vec_src_uses_to_dest.c
index 76917752ca4..5ad17b8507d 100644
--- a/src/compiler/nir/nir_move_vec_src_uses_to_dest.c
+++ b/src/compiler/nir/nir_move_vec_src_uses_to_dest.c
@@ -41,7 +41,7 @@
* ssa_2 = fadd(ssa_1.x, ssa_1.y)
*
* While this is "worse" because it adds a bunch of unneeded dependencies, it
- * actually makes it much easier for vec4-based backends to coalesce the MOV's
+ * actually makes it much easier for vec4-based backends to coalesce the MOVs
* that result from the vec4 operation because it doesn't have to worry about
* quite as many reads.
*/
diff --git a/src/compiler/nir/nir_opt_cse.c b/src/compiler/nir/nir_opt_cse.c
index 71953b81bf7..db6bb9a4a22 100644
--- a/src/compiler/nir/nir_opt_cse.c
+++ b/src/compiler/nir/nir_opt_cse.c
@@ -33,7 +33,7 @@
*/
/*
- * Visits and CSE's the given block and all its descendants in the dominance
+ * Visits and CSEs the given block and all its descendants in the dominance
* tree recursively. Note that the instr_set is guaranteed to only ever
* contain instructions that dominate the current block.
*/
diff --git a/src/compiler/nir/nir_opt_peephole_select.c b/src/compiler/nir/nir_opt_peephole_select.c
index 87a8ee0fb0e..4ca4f80d788 100644
--- a/src/compiler/nir/nir_opt_peephole_select.c
+++ b/src/compiler/nir/nir_opt_peephole_select.c
@@ -128,7 +128,7 @@ block_check_for_allowed_instrs(nir_block *block, unsigned *count, bool alu_ok)
if (!list_empty(&mov->dest.dest.ssa.if_uses))
return false;
- /* The only uses of this definition must be phi's in the successor */
+ /* The only uses of this definition must be phis in the successor */
nir_foreach_use(use, &mov->dest.dest.ssa) {
if (use->parent_instr->type != nir_instr_type_phi ||
use->parent_instr->block != block->successors[0])
diff --git a/src/compiler/nir/nir_opt_remove_phis.c b/src/compiler/nir/nir_opt_remove_phis.c
index acaa6e1911c..b20ff729156 100644
--- a/src/compiler/nir/nir_opt_remove_phis.c
+++ b/src/compiler/nir/nir_opt_remove_phis.c
@@ -115,11 +115,11 @@ remove_phis_block(nir_block *block, nir_builder *b)
assert(def != NULL);
if (mov) {
- /* If the sources were all mov's from the same source with the same
+ /* If the sources were all movs from the same source with the same
* swizzle, then we can't just pick a random move because it may not
* dominate the phi node. Instead, we need to emit our own move after
* the phi which uses the shared source, and rewrite uses of the phi
- * to use the move instead. This is ok, because while the mov's may
+ * to use the move instead. This is ok, because while the movs may
* not all dominate the phi node, their shared source does.
*/
diff --git a/src/compiler/nir/nir_search_helpers.h b/src/compiler/nir/nir_search_helpers.h
index ddaff52311a..faa3bdfd12b 100644
--- a/src/compiler/nir/nir_search_helpers.h
+++ b/src/compiler/nir/nir_search_helpers.h
@@ -41,7 +41,7 @@ is_pos_power_of_two(nir_alu_instr *instr, unsigned src, unsigned num_components,
{
nir_const_value *val = nir_src_as_const_value(instr->src[src].src);
- /* only constant src's: */
+ /* only constant srcs: */
if (!val)
return false;
@@ -71,7 +71,7 @@ is_neg_power_of_two(nir_alu_instr *instr, unsigned src, unsigned num_components,
{
nir_const_value *val = nir_src_as_const_value(instr->src[src].src);
- /* only constant src's: */
+ /* only constant srcs: */
if (!val)
return false;