summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorMatt Turner <[email protected]>2018-12-10 14:49:49 -0800
committerMatt Turner <[email protected]>2019-01-09 16:42:41 -0800
commit7e4e9da90d8b4a000357efa5c0e0027fd00c28b9 (patch)
tree7e131c90eda0402e445420a5a1e5abf863310d27 /src
parent2b801b66686ec893d1335c971724deb5a624c76f (diff)
intel/compiler: Prevent warnings in the following patch
The next patch replaces an unsigned bitfield with a plain unsigned, which triggers gcc to begin warning on signed/unsigned comparisons. Keeping this patch separate from the actual move allows bisectablity and generates no additional warnings temporarily. Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src')
-rw-r--r--src/intel/compiler/brw_fs.cpp18
-rw-r--r--src/intel/compiler/brw_fs.h2
-rw-r--r--src/intel/compiler/brw_fs_nir.cpp4
-rw-r--r--src/intel/compiler/brw_fs_reg_allocate.cpp2
-rw-r--r--src/intel/compiler/brw_fs_register_coalesce.cpp8
-rw-r--r--src/intel/compiler/brw_schedule_instructions.cpp6
-rw-r--r--src/intel/compiler/brw_vec4.cpp12
-rw-r--r--src/intel/compiler/brw_vec4.h2
-rw-r--r--src/intel/compiler/brw_vec4_reg_allocate.cpp12
-rw-r--r--src/intel/compiler/brw_vec4_visitor.cpp4
-rw-r--r--src/intel/compiler/gen6_gs_visitor.cpp4
11 files changed, 38 insertions, 36 deletions
diff --git a/src/intel/compiler/brw_fs.cpp b/src/intel/compiler/brw_fs.cpp
index eb4fc327d5c..06a4b645650 100644
--- a/src/intel/compiler/brw_fs.cpp
+++ b/src/intel/compiler/brw_fs.cpp
@@ -2874,8 +2874,8 @@ fs_visitor::opt_register_renaming()
bool progress = false;
int depth = 0;
- int remap[alloc.count];
- memset(remap, -1, sizeof(int) * alloc.count);
+ unsigned remap[alloc.count];
+ memset(remap, ~0u, sizeof(unsigned) * alloc.count);
foreach_block_and_inst(block, fs_inst, inst, cfg) {
if (inst->opcode == BRW_OPCODE_IF || inst->opcode == BRW_OPCODE_DO) {
@@ -2888,20 +2888,20 @@ fs_visitor::opt_register_renaming()
/* Rewrite instruction sources. */
for (int i = 0; i < inst->sources; i++) {
if (inst->src[i].file == VGRF &&
- remap[inst->src[i].nr] != -1 &&
+ remap[inst->src[i].nr] != ~0u &&
remap[inst->src[i].nr] != inst->src[i].nr) {
inst->src[i].nr = remap[inst->src[i].nr];
progress = true;
}
}
- const int dst = inst->dst.nr;
+ const unsigned dst = inst->dst.nr;
if (depth == 0 &&
inst->dst.file == VGRF &&
alloc.sizes[inst->dst.nr] * REG_SIZE == inst->size_written &&
!inst->is_partial_write()) {
- if (remap[dst] == -1) {
+ if (remap[dst] == ~0u) {
remap[dst] = dst;
} else {
remap[dst] = alloc.allocate(regs_written(inst));
@@ -2909,7 +2909,7 @@ fs_visitor::opt_register_renaming()
progress = true;
}
} else if (inst->dst.file == VGRF &&
- remap[dst] != -1 &&
+ remap[dst] != ~0u &&
remap[dst] != dst) {
inst->dst.nr = remap[dst];
progress = true;
@@ -2920,7 +2920,7 @@ fs_visitor::opt_register_renaming()
invalidate_live_intervals();
for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
- if (delta_xy[i].file == VGRF && remap[delta_xy[i].nr] != -1) {
+ if (delta_xy[i].file == VGRF && remap[delta_xy[i].nr] != ~0u) {
delta_xy[i].nr = remap[delta_xy[i].nr];
}
}
@@ -3608,7 +3608,7 @@ void
fs_visitor::insert_gen4_post_send_dependency_workarounds(bblock_t *block, fs_inst *inst)
{
int write_len = regs_written(inst);
- int first_write_grf = inst->dst.nr;
+ unsigned first_write_grf = inst->dst.nr;
bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
assert(write_len < (int)sizeof(needs_dep) - 1);
@@ -4783,7 +4783,7 @@ lower_sampler_logical_send_gen7(const fs_builder &bld, fs_inst *inst, opcode op,
bld.MOV(sources[length++], min_lod);
}
- int mlen;
+ unsigned mlen;
if (reg_width == 2)
mlen = length * reg_width - header_size;
else
diff --git a/src/intel/compiler/brw_fs.h b/src/intel/compiler/brw_fs.h
index d141a9237df..68287bcdcea 100644
--- a/src/intel/compiler/brw_fs.h
+++ b/src/intel/compiler/brw_fs.h
@@ -119,7 +119,7 @@ public:
void setup_payload_interference(struct ra_graph *g, int payload_reg_count,
int first_payload_node);
int choose_spill_reg(struct ra_graph *g);
- void spill_reg(int spill_reg);
+ void spill_reg(unsigned spill_reg);
void split_virtual_grfs();
bool compact_virtual_grfs();
void assign_constant_locations();
diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp
index fe3dff016ad..dc4f1d4879c 100644
--- a/src/intel/compiler/brw_fs_nir.cpp
+++ b/src/intel/compiler/brw_fs_nir.cpp
@@ -1840,7 +1840,7 @@ fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
}
/* Store the control data bits in the message payload and send it. */
- int mlen = 2;
+ unsigned mlen = 2;
if (channel_mask.file != BAD_FILE)
mlen += 4; /* channel masks, plus 3 extra copies of the data */
if (per_slot_offset.file != BAD_FILE)
@@ -1848,7 +1848,7 @@ fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
- int i = 0;
+ unsigned i = 0;
sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
if (per_slot_offset.file != BAD_FILE)
sources[i++] = per_slot_offset;
diff --git a/src/intel/compiler/brw_fs_reg_allocate.cpp b/src/intel/compiler/brw_fs_reg_allocate.cpp
index 73b8b7841f5..678afe6bab4 100644
--- a/src/intel/compiler/brw_fs_reg_allocate.cpp
+++ b/src/intel/compiler/brw_fs_reg_allocate.cpp
@@ -912,7 +912,7 @@ fs_visitor::choose_spill_reg(struct ra_graph *g)
}
void
-fs_visitor::spill_reg(int spill_reg)
+fs_visitor::spill_reg(unsigned spill_reg)
{
int size = alloc.sizes[spill_reg];
unsigned int spill_offset = last_scratch;
diff --git a/src/intel/compiler/brw_fs_register_coalesce.cpp b/src/intel/compiler/brw_fs_register_coalesce.cpp
index 952276faed8..4fe6773da54 100644
--- a/src/intel/compiler/brw_fs_register_coalesce.cpp
+++ b/src/intel/compiler/brw_fs_register_coalesce.cpp
@@ -158,7 +158,7 @@ fs_visitor::register_coalesce()
int src_size = 0;
int channels_remaining = 0;
- int src_reg = -1, dst_reg = -1;
+ unsigned src_reg = ~0u, dst_reg = ~0u;
int dst_reg_offset[MAX_VGRF_SIZE];
fs_inst *mov[MAX_VGRF_SIZE];
int dst_var[MAX_VGRF_SIZE];
@@ -221,7 +221,7 @@ fs_visitor::register_coalesce()
if (dst_reg_offset[i] != dst_reg_offset[0] + i) {
/* Registers are out-of-order. */
can_coalesce = false;
- src_reg = -1;
+ src_reg = ~0u;
break;
}
@@ -231,7 +231,7 @@ fs_visitor::register_coalesce()
if (!can_coalesce_vars(live_intervals, cfg, inst,
dst_var[i], src_var[i])) {
can_coalesce = false;
- src_reg = -1;
+ src_reg = ~0u;
break;
}
}
@@ -278,7 +278,7 @@ fs_visitor::register_coalesce()
MAX2(live_intervals->end[dst_var[i]],
live_intervals->end[src_var[i]]);
}
- src_reg = -1;
+ src_reg = ~0u;
}
if (progress) {
diff --git a/src/intel/compiler/brw_schedule_instructions.cpp b/src/intel/compiler/brw_schedule_instructions.cpp
index f29671859cb..95e4b873501 100644
--- a/src/intel/compiler/brw_schedule_instructions.cpp
+++ b/src/intel/compiler/brw_schedule_instructions.cpp
@@ -430,7 +430,7 @@ schedule_node::set_latency_gen7(bool is_haswell)
class instruction_scheduler {
public:
instruction_scheduler(backend_shader *s, int grf_count,
- int hw_reg_count, int block_count,
+ unsigned hw_reg_count, int block_count,
instruction_scheduler_mode mode)
{
this->bs = s;
@@ -511,7 +511,7 @@ public:
bool post_reg_alloc;
int instructions_to_schedule;
int grf_count;
- int hw_reg_count;
+ unsigned hw_reg_count;
int reg_pressure;
int block_idx;
exec_list instructions;
@@ -665,7 +665,7 @@ fs_instruction_scheduler::setup_liveness(cfg_t *cfg)
int payload_last_use_ip[hw_reg_count];
v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
- for (int i = 0; i < hw_reg_count; i++) {
+ for (unsigned i = 0; i < hw_reg_count; i++) {
if (payload_last_use_ip[i] == -1)
continue;
diff --git a/src/intel/compiler/brw_vec4.cpp b/src/intel/compiler/brw_vec4.cpp
index 8cbbdd4ec70..4489c682d01 100644
--- a/src/intel/compiler/brw_vec4.cpp
+++ b/src/intel/compiler/brw_vec4.cpp
@@ -409,7 +409,7 @@ vec4_visitor::opt_vector_float()
bool progress = false;
foreach_block(block, cfg) {
- int last_reg = -1, last_offset = -1;
+ unsigned last_reg = ~0u, last_offset = ~0u;
enum brw_reg_file last_reg_file = BAD_FILE;
uint8_t imm[4] = { 0 };
@@ -442,7 +442,7 @@ vec4_visitor::opt_vector_float()
need_type = BRW_REGISTER_TYPE_F;
}
} else {
- last_reg = -1;
+ last_reg = ~0u;
}
/* If this wasn't a MOV, or the destination register doesn't match,
@@ -470,7 +470,7 @@ vec4_visitor::opt_vector_float()
}
inst_count = 0;
- last_reg = -1;
+ last_reg = ~0u;;
writemask = 0;
dest_type = BRW_REGISTER_TYPE_F;
@@ -1397,8 +1397,10 @@ vec4_visitor::opt_register_coalesce()
* in the register instead.
*/
if (to_mrf && scan_inst->mlen > 0) {
- if (inst->dst.nr >= scan_inst->base_mrf &&
- inst->dst.nr < scan_inst->base_mrf + scan_inst->mlen) {
+ unsigned start = scan_inst->base_mrf;
+ unsigned end = scan_inst->base_mrf + scan_inst->mlen;
+
+ if (inst->dst.nr >= start && inst->dst.nr < end) {
break;
}
} else {
diff --git a/src/intel/compiler/brw_vec4.h b/src/intel/compiler/brw_vec4.h
index 8ef0b5319b0..4b24e2a2db8 100644
--- a/src/intel/compiler/brw_vec4.h
+++ b/src/intel/compiler/brw_vec4.h
@@ -132,7 +132,7 @@ public:
bool reg_allocate();
void evaluate_spill_costs(float *spill_costs, bool *no_spill);
int choose_spill_reg(struct ra_graph *g);
- void spill_reg(int spill_reg);
+ void spill_reg(unsigned spill_reg);
void move_grf_array_access_to_scratch();
void move_uniform_array_access_to_pull_constants();
void move_push_constants_to_pull_constants();
diff --git a/src/intel/compiler/brw_vec4_reg_allocate.cpp b/src/intel/compiler/brw_vec4_reg_allocate.cpp
index bbad46ee6c0..3e3791574db 100644
--- a/src/intel/compiler/brw_vec4_reg_allocate.cpp
+++ b/src/intel/compiler/brw_vec4_reg_allocate.cpp
@@ -502,18 +502,18 @@ vec4_visitor::choose_spill_reg(struct ra_graph *g)
}
void
-vec4_visitor::spill_reg(int spill_reg_nr)
+vec4_visitor::spill_reg(unsigned spill_reg_nr)
{
assert(alloc.sizes[spill_reg_nr] == 1 || alloc.sizes[spill_reg_nr] == 2);
- unsigned int spill_offset = last_scratch;
+ unsigned spill_offset = last_scratch;
last_scratch += alloc.sizes[spill_reg_nr];
/* Generate spill/unspill instructions for the objects being spilled. */
- int scratch_reg = -1;
+ unsigned scratch_reg = ~0u;
foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
- for (unsigned int i = 0; i < 3; i++) {
+ for (unsigned i = 0; i < 3; i++) {
if (inst->src[i].file == VGRF && inst->src[i].nr == spill_reg_nr) {
- if (scratch_reg == -1 ||
+ if (scratch_reg == ~0u ||
!can_use_scratch_for_source(inst, i, scratch_reg)) {
/* We need to unspill anyway so make sure we read the full vec4
* in any case. This way, the cached register can be reused
@@ -529,7 +529,7 @@ vec4_visitor::spill_reg(int spill_reg_nr)
dst_reg(temp), inst->src[i], spill_offset);
temp.offset = inst->src[i].offset;
}
- assert(scratch_reg != -1);
+ assert(scratch_reg != ~0u);
inst->src[i].nr = scratch_reg;
}
}
diff --git a/src/intel/compiler/brw_vec4_visitor.cpp b/src/intel/compiler/brw_vec4_visitor.cpp
index aea64c5b54d..30b86ec302b 100644
--- a/src/intel/compiler/brw_vec4_visitor.cpp
+++ b/src/intel/compiler/brw_vec4_visitor.cpp
@@ -1337,8 +1337,8 @@ vec4_visitor::emit_urb_slot(dst_reg reg, int varying)
}
}
-static int
-align_interleaved_urb_mlen(const struct gen_device_info *devinfo, int mlen)
+static unsigned
+align_interleaved_urb_mlen(const struct gen_device_info *devinfo, unsigned mlen)
{
if (devinfo->gen >= 6) {
/* URB data written (does not include the message header reg) must
diff --git a/src/intel/compiler/gen6_gs_visitor.cpp b/src/intel/compiler/gen6_gs_visitor.cpp
index c9571cce6fd..2742fbacfa1 100644
--- a/src/intel/compiler/gen6_gs_visitor.cpp
+++ b/src/intel/compiler/gen6_gs_visitor.cpp
@@ -274,8 +274,8 @@ gen6_gs_visitor::emit_urb_write_header(int mrf)
emit(GS_OPCODE_SET_DWORD_2, dst_reg(MRF, mrf), flags_data);
}
-static int
-align_interleaved_urb_mlen(int mlen)
+static unsigned
+align_interleaved_urb_mlen(unsigned mlen)
{
/* URB data written (does not include the message header reg) must
* be a multiple of 256 bits, or 2 VS registers. See vol5c.5,