diff options
author | Matt Turner <[email protected]> | 2015-11-02 11:26:16 -0800 |
---|---|---|
committer | Matt Turner <[email protected]> | 2015-11-19 11:12:24 -0800 |
commit | 3ccc41ecfc5e9345a1c291748d8840984f7413ae (patch) | |
tree | 5c9bcb8a010e1741053b079e7b0a8314874107d4 /src/mesa/drivers | |
parent | c15a407eb49d3b26bdbf039816636adb184c276a (diff) |
i965/fs: Replace fs_reg(imm) constructors with brw_imm_*().
Cuts 10k of .text, of which only 776 bytes are the fs_reg constructor
implementations themselves.
text data bss dec hex filename
5204535 214112 27784 5446431 531b1f i965_dri.so before
5193977 214112 27784 5435873 52f1e1 i965_dri.so after
Reviewed-by: Emil Velikov <[email protected]>
Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/mesa/drivers')
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_blorp_blit_eu.cpp | 2 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_fs.cpp | 100 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_fs_builder.h | 4 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_fs_combine_constants.cpp | 2 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_fs_nir.cpp | 150 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_fs_surface_builder.cpp | 49 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_fs_visitor.cpp | 42 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_ir_fs.h | 5 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/test_fs_cmod_propagation.cpp | 30 |
9 files changed, 167 insertions, 217 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_blorp_blit_eu.cpp b/src/mesa/drivers/dri/i965/brw_blorp_blit_eu.cpp index 5308d175416..e684bdbb72c 100644 --- a/src/mesa/drivers/dri/i965/brw_blorp_blit_eu.cpp +++ b/src/mesa/drivers/dri/i965/brw_blorp_blit_eu.cpp @@ -85,7 +85,7 @@ brw_blorp_eu_emitter::emit_texture_lookup(const struct brw_reg &dst, unsigned msg_length) { fs_inst *inst = new (mem_ctx) fs_inst(op, 16, dst, brw_message_reg(base_mrf), - fs_reg(0u)); + brw_imm_ud(0u)); inst->base_mrf = base_mrf; inst->mlen = msg_length; diff --git a/src/mesa/drivers/dri/i965/brw_fs.cpp b/src/mesa/drivers/dri/i965/brw_fs.cpp index 72a21587a4f..e9c990d4308 100644 --- a/src/mesa/drivers/dri/i965/brw_fs.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs.cpp @@ -186,7 +186,7 @@ fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_builder &bld, * the redundant ones. */ fs_reg vec4_offset = vgrf(glsl_type::int_type); - bld.ADD(vec4_offset, varying_offset, fs_reg(const_offset & ~3)); + bld.ADD(vec4_offset, varying_offset, brw_imm_ud(const_offset & ~3)); int scale = 1; if (devinfo->gen == 4 && bld.dispatch_width() == 8) { @@ -374,54 +374,6 @@ fs_reg::fs_reg() this->file = BAD_FILE; } -/** Immediate value constructor. */ -fs_reg::fs_reg(float f) -{ - init(); - this->file = IMM; - this->type = BRW_REGISTER_TYPE_F; - this->stride = 0; - this->f = f; -} - -/** Immediate value constructor. */ -fs_reg::fs_reg(int32_t i) -{ - init(); - this->file = IMM; - this->type = BRW_REGISTER_TYPE_D; - this->stride = 0; - this->d = i; -} - -/** Immediate value constructor. */ -fs_reg::fs_reg(uint32_t u) -{ - init(); - this->file = IMM; - this->type = BRW_REGISTER_TYPE_UD; - this->stride = 0; - this->ud = u; -} - -/** Vector float immediate value constructor. */ -fs_reg::fs_reg(uint8_t vf[4]) -{ - init(); - this->file = IMM; - this->type = BRW_REGISTER_TYPE_VF; - memcpy(&this->ud, vf, sizeof(unsigned)); -} - -/** Vector float immediate value constructor. */ -fs_reg::fs_reg(uint8_t vf0, uint8_t vf1, uint8_t vf2, uint8_t vf3) -{ - init(); - this->file = IMM; - this->type = BRW_REGISTER_TYPE_VF; - this->ud = (vf0 << 0) | (vf1 << 8) | (vf2 << 16) | (vf3 << 24); -} - fs_reg::fs_reg(struct brw_reg reg) : backend_reg(reg) { @@ -590,7 +542,7 @@ fs_visitor::emit_shader_time_end() fs_reg reset = shader_end_time; reset.set_smear(2); set_condmod(BRW_CONDITIONAL_Z, - ibld.AND(ibld.null_reg_ud(), reset, fs_reg(1u))); + ibld.AND(ibld.null_reg_ud(), reset, brw_imm_ud(1u))); ibld.IF(BRW_PREDICATE_NORMAL); fs_reg start = shader_start_time; @@ -605,11 +557,11 @@ fs_visitor::emit_shader_time_end() * is 2 cycles. Remove that overhead, so I can forget about that when * trying to determine the time taken for single instructions. */ - cbld.ADD(diff, diff, fs_reg(-2u)); + cbld.ADD(diff, diff, brw_imm_ud(-2u)); SHADER_TIME_ADD(cbld, 0, diff); - SHADER_TIME_ADD(cbld, 1, fs_reg(1u)); + SHADER_TIME_ADD(cbld, 1, brw_imm_ud(1u)); ibld.emit(BRW_OPCODE_ELSE); - SHADER_TIME_ADD(cbld, 2, fs_reg(1u)); + SHADER_TIME_ADD(cbld, 2, brw_imm_ud(1u)); ibld.emit(BRW_OPCODE_ENDIF); } @@ -619,7 +571,7 @@ fs_visitor::SHADER_TIME_ADD(const fs_builder &bld, fs_reg value) { int index = shader_time_index * 3 + shader_time_subindex; - fs_reg offset = fs_reg(index * SHADER_TIME_STRIDE); + struct brw_reg offset = brw_imm_d(index * SHADER_TIME_STRIDE); fs_reg payload; if (dispatch_width == 8) @@ -1032,7 +984,7 @@ fs_visitor::emit_fragcoord_interpolation(bool pixel_center_integer, if (pixel_center_integer) { bld.MOV(wpos, this->pixel_x); } else { - bld.ADD(wpos, this->pixel_x, fs_reg(0.5f)); + bld.ADD(wpos, this->pixel_x, brw_imm_f(0.5f)); } wpos = offset(wpos, bld, 1); @@ -1048,7 +1000,7 @@ fs_visitor::emit_fragcoord_interpolation(bool pixel_center_integer, offset += key->drawable_height - 1.0f; } - bld.ADD(wpos, pixel_y, fs_reg(offset)); + bld.ADD(wpos, pixel_y, brw_imm_f(offset)); } wpos = offset(wpos, bld, 1); @@ -1225,7 +1177,7 @@ fs_visitor::emit_frontfacing_interpolation() fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W)); g0.negate = true; - bld.ASR(*reg, g0, fs_reg(15)); + bld.ASR(*reg, g0, brw_imm_d(15)); } else { /* Bit 31 of g1.6 is 0 if the polygon is front facing. We want to create * a boolean result from this (1/true or 0/false). @@ -1240,7 +1192,7 @@ fs_visitor::emit_frontfacing_interpolation() fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D)); g1_6.negate = true; - bld.ASR(*reg, g1_6, fs_reg(31)); + bld.ASR(*reg, g1_6, brw_imm_d(31)); } return reg; @@ -1257,7 +1209,7 @@ fs_visitor::compute_sample_position(fs_reg dst, fs_reg int_sample_pos) /* Convert int_sample_pos to floating point */ bld.MOV(dst, int_sample_pos); /* Scale to the range [0, 1] */ - bld.MUL(dst, dst, fs_reg(1 / 16.0f)); + bld.MUL(dst, dst, brw_imm_f(1 / 16.0f)); } else { /* From ARB_sample_shading specification: @@ -1265,7 +1217,7 @@ fs_visitor::compute_sample_position(fs_reg dst, fs_reg int_sample_pos) * rasterization is disabled, gl_SamplePosition will always be * (0.5, 0.5). */ - bld.MOV(dst, fs_reg(0.5f)); + bld.MOV(dst, brw_imm_f(0.5f)); } } @@ -1360,8 +1312,8 @@ fs_visitor::emit_sampleid_setup() abld.exec_all().group(1, 0) .AND(t1, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)), - fs_reg(sspi_mask)); - abld.exec_all().group(1, 0).SHR(t1, t1, fs_reg(5)); + brw_imm_ud(sspi_mask)); + abld.exec_all().group(1, 0).SHR(t1, t1, brw_imm_d(5)); /* This works for both SIMD8 and SIMD16 */ abld.exec_all().group(4, 0) @@ -1376,7 +1328,7 @@ fs_visitor::emit_sampleid_setup() * "When rendering to a non-multisample buffer, or if multisample * rasterization is disabled, gl_SampleID will always be zero." */ - abld.MOV(*reg, fs_reg(0)); + abld.MOV(*reg, brw_imm_d(0)); } return reg; @@ -2047,16 +1999,16 @@ fs_visitor::demote_pull_constants() /* Generate a pull load into dst. */ if (inst->src[i].reladdr) { VARYING_PULL_CONSTANT_LOAD(ibld, dst, - fs_reg(index), + brw_imm_ud(index), *inst->src[i].reladdr, pull_index); inst->src[i].reladdr = NULL; inst->src[i].stride = 1; } else { const fs_builder ubld = ibld.exec_all().group(8, 0); - fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15); + struct brw_reg offset = brw_imm_ud((unsigned)(pull_index * 4) & ~15); ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, - dst, fs_reg(index), offset); + dst, brw_imm_ud(index), offset); inst->src[i].set_smear(pull_index & 3); } brw_mark_surface_used(prog_data, index); @@ -2748,7 +2700,7 @@ fs_visitor::eliminate_find_live_channel() case SHADER_OPCODE_FIND_LIVE_CHANNEL: if (depth == 0) { inst->opcode = BRW_OPCODE_MOV; - inst->src[0] = fs_reg(0u); + inst->src[0] = brw_imm_ud(0u); inst->sources = 1; inst->force_writemask_all = true; progress = true; @@ -3660,7 +3612,7 @@ lower_sampler_logical_send_gen4(const fs_builder &bld, fs_inst *inst, opcode op, (has_lod || shadow_c.file != BAD_FILE || (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8))) { for (unsigned i = coord_components; i < 3; i++) - bld.MOV(offset(msg_end, bld, i), fs_reg(0.0f)); + bld.MOV(offset(msg_end, bld, i), brw_imm_f(0.0f)); msg_end = offset(msg_end, bld, 3 - coord_components); } @@ -3717,7 +3669,7 @@ lower_sampler_logical_send_gen4(const fs_builder &bld, fs_inst *inst, opcode op, /* There's no plain shadow compare message, so we use shadow * compare with a bias of 0.0. */ - bld.MOV(msg_end, fs_reg(0.0f)); + bld.MOV(msg_end, brw_imm_f(0.0f)); msg_end = offset(msg_end, bld, 1); } @@ -3811,7 +3763,7 @@ lower_sampler_logical_send_gen5(const fs_builder &bld, fs_inst *inst, opcode op, case SHADER_OPCODE_TXF_CMS: msg_lod = offset(msg_coords, bld, 3); /* lod */ - bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), fs_reg(0u)); + bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), brw_imm_ud(0u)); /* sample index */ bld.MOV(retype(offset(msg_lod, bld, 1), BRW_REGISTER_TYPE_UD), sample_index); msg_end = offset(msg_lod, bld, 2); @@ -3891,7 +3843,7 @@ lower_sampler_logical_send_gen7(const fs_builder &bld, fs_inst *inst, opcode op, if (bld.shader->stage != MESA_SHADER_FRAGMENT && op == SHADER_OPCODE_TEX) { op = SHADER_OPCODE_TXL; - lod = fs_reg(0.0f); + lod = brw_imm_f(0.0f); } /* Set up the LOD info */ @@ -4102,7 +4054,7 @@ emit_surface_header(const fs_builder &bld, const fs_reg &sample_mask) { fs_builder ubld = bld.exec_all().group(8, 0); const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD); - ubld.MOV(dst, fs_reg(0)); + ubld.MOV(dst, brw_imm_d(0)); ubld.MOV(component(dst, 7), sample_mask); return dst; } @@ -4244,7 +4196,7 @@ fs_visitor::lower_logical_sends() case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL: lower_surface_logical_send(ibld, inst, SHADER_OPCODE_TYPED_SURFACE_READ, - fs_reg(0xffff)); + brw_imm_d(0xffff)); break; case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL: @@ -5233,7 +5185,7 @@ fs_visitor::run_gs() */ if (gs_compile->control_data_header_size_bits <= 32) { const fs_builder abld = bld.annotate("initialize control data bits"); - abld.MOV(this->control_data_bits, fs_reg(0u)); + abld.MOV(this->control_data_bits, brw_imm_ud(0u)); } } diff --git a/src/mesa/drivers/dri/i965/brw_fs_builder.h b/src/mesa/drivers/dri/i965/brw_fs_builder.h index 22b2f22073f..dd3c383a17d 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_builder.h +++ b/src/mesa/drivers/dri/i965/brw_fs_builder.h @@ -225,7 +225,7 @@ namespace brw { sample_mask_reg() const { if (shader->stage != MESA_SHADER_FRAGMENT) { - return src_reg(0xffff); + return brw_imm_d(0xffff); } else if (((brw_wm_prog_data *)shader->stage_prog_data)->uses_kill) { return brw_flag_reg(0, 1); } else { @@ -548,7 +548,7 @@ namespace brw { const dst_reg x_times_one_minus_a = vgrf(dst.type); MUL(y_times_a, y, a); - ADD(one_minus_a, negate(a), src_reg(1.0f)); + ADD(one_minus_a, negate(a), brw_imm_f(1.0f)); MUL(x_times_one_minus_a, x, src_reg(one_minus_a)); return ADD(dst, src_reg(x_times_one_minus_a), src_reg(y_times_a)); } diff --git a/src/mesa/drivers/dri/i965/brw_fs_combine_constants.cpp b/src/mesa/drivers/dri/i965/brw_fs_combine_constants.cpp index 0c115f50748..c3ad7ad4771 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_combine_constants.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_combine_constants.cpp @@ -279,7 +279,7 @@ fs_visitor::opt_combine_constants() imm->block->last_non_control_flow_inst()->next); const fs_builder ibld = bld.at(imm->block, n).exec_all().group(1, 0); - ibld.MOV(reg, fs_reg(imm->val)); + ibld.MOV(reg, brw_imm_f(imm->val)); imm->nr = reg.nr; imm->subreg_offset = reg.subreg_offset; diff --git a/src/mesa/drivers/dri/i965/brw_fs_nir.cpp b/src/mesa/drivers/dri/i965/brw_fs_nir.cpp index ebdcb3a4246..8364bbfc0f4 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_nir.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_nir.cpp @@ -208,7 +208,7 @@ emit_system_values_block(nir_block *block, void *void_visitor) const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL); fs_reg g1(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD)); fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1); - abld.SHR(iid, g1, fs_reg(27u)); + abld.SHR(iid, g1, brw_imm_ud(27u)); *reg = iid; } break; @@ -454,7 +454,7 @@ fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr, tmp.subreg_offset = 2; tmp.stride = 2; - fs_inst *or_inst = bld.OR(tmp, g0, fs_reg(0x3f80)); + fs_inst *or_inst = bld.OR(tmp, g0, brw_imm_d(0x3f80)); or_inst->src[1].type = BRW_REGISTER_TYPE_UW; tmp.type = BRW_REGISTER_TYPE_D; @@ -479,9 +479,9 @@ fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr, g1_6.negate = true; } - bld.OR(tmp, g1_6, fs_reg(0x3f800000)); + bld.OR(tmp, g1_6, brw_imm_d(0x3f800000)); } - bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, fs_reg(0xbf800000)); + bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, brw_imm_d(0xbf800000)); return true; } @@ -594,14 +594,14 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not * zero. */ - bld.CMP(bld.null_reg_f(), op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ); + bld.CMP(bld.null_reg_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ); fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD); op[0].type = BRW_REGISTER_TYPE_UD; result.type = BRW_REGISTER_TYPE_UD; - bld.AND(result_int, op[0], fs_reg(0x80000000u)); + bld.AND(result_int, op[0], brw_imm_ud(0x80000000u)); - inst = bld.OR(result_int, result_int, fs_reg(0x3f800000u)); + inst = bld.OR(result_int, result_int, brw_imm_ud(0x3f800000u)); inst->predicate = BRW_PREDICATE_NORMAL; if (instr->dest.saturate) { inst = bld.MOV(result, result); @@ -615,9 +615,9 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) * -> non-negative val generates 0x00000000. * Predicated OR sets 1 if val is positive. */ - bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_G); - bld.ASR(result, op[0], fs_reg(31)); - inst = bld.OR(result, result, fs_reg(1)); + bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_G); + bld.ASR(result, op[0], brw_imm_d(31)); + inst = bld.OR(result, result, brw_imm_d(1)); inst->predicate = BRW_PREDICATE_NORMAL; break; @@ -665,21 +665,21 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) case nir_op_fddy: if (fs_key->high_quality_derivatives) { inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0], - fs_reg(fs_key->render_to_fbo)); + brw_imm_d(fs_key->render_to_fbo)); } else { inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0], - fs_reg(fs_key->render_to_fbo)); + brw_imm_d(fs_key->render_to_fbo)); } inst->saturate = instr->dest.saturate; break; case nir_op_fddy_fine: inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0], - fs_reg(fs_key->render_to_fbo)); + brw_imm_d(fs_key->render_to_fbo)); inst->saturate = instr->dest.saturate; break; case nir_op_fddy_coarse: inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0], - fs_reg(fs_key->render_to_fbo)); + brw_imm_d(fs_key->render_to_fbo)); inst->saturate = instr->dest.saturate; break; @@ -828,10 +828,10 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) break; case nir_op_f2b: - bld.CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ); + bld.CMP(result, op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ); break; case nir_op_i2b: - bld.CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ); + bld.CMP(result, op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ); break; case nir_op_ftrunc: @@ -931,9 +931,9 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then * subtract the result from 31 to convert the MSB count into an LSB count. */ - bld.CMP(bld.null_reg_d(), result, fs_reg(-1), BRW_CONDITIONAL_NZ); + bld.CMP(bld.null_reg_d(), result, brw_imm_d(-1), BRW_CONDITIONAL_NZ); - inst = bld.ADD(result, result, fs_reg(31)); + inst = bld.ADD(result, result, brw_imm_d(31)); inst->predicate = BRW_PREDICATE_NORMAL; inst->src[0].negate = true; break; @@ -986,7 +986,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) if (optimize_frontfacing_ternary(instr, result)) return; - bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_NZ); + bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ); inst = bld.SEL(result, op[1], op[2]); inst->predicate = BRW_PREDICATE_NORMAL; break; @@ -1001,7 +1001,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) if (devinfo->gen <= 5 && (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) { fs_reg masked = vgrf(glsl_type::int_type); - bld.AND(masked, result, fs_reg(1)); + bld.AND(masked, result, brw_imm_d(1)); masked.negate = true; bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked); } @@ -1014,7 +1014,7 @@ fs_visitor::nir_emit_load_const(const fs_builder &bld, fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_D, instr->def.num_components); for (unsigned i = 0; i < instr->def.num_components; i++) - bld.MOV(offset(reg, bld, i), fs_reg(instr->value.i[i])); + bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value.i[i])); nir_ssa_values[instr->def.index] = reg; } @@ -1042,7 +1042,7 @@ fs_reg_for_nir_reg(fs_visitor *v, nir_register *nir_reg, reg.reladdr = new(v->mem_ctx) fs_reg(v->vgrf(glsl_type::int_type)); v->bld.MUL(*reg.reladdr, v->get_nir_src(*indirect), - fs_reg(multiplier)); + brw_imm_d(multiplier)); } return reg; @@ -1108,12 +1108,12 @@ fs_visitor::get_nir_image_deref(const nir_deref_var *deref) */ bld.emit_minmax(tmp, retype(get_nir_src(deref_array->indirect), BRW_REGISTER_TYPE_UD), - fs_reg(size - base - 1), BRW_CONDITIONAL_L); + brw_imm_ud(size - base - 1), BRW_CONDITIONAL_L); } else { bld.MOV(tmp, get_nir_src(deref_array->indirect)); } - bld.MUL(tmp, tmp, fs_reg(element_size)); + bld.MUL(tmp, tmp, brw_imm_ud(element_size)); if (image.reladdr) bld.ADD(*image.reladdr, *image.reladdr, tmp); else @@ -1232,7 +1232,7 @@ intexp2(const fs_builder &bld, const fs_reg &x) fs_reg result = bld.vgrf(x.type, 1); fs_reg one = bld.vgrf(x.type, 1); - bld.MOV(one, retype(fs_reg(1), one.type)); + bld.MOV(one, retype(brw_imm_d(1), one.type)); bld.SHL(result, one, x); return result; } @@ -1285,7 +1285,7 @@ fs_visitor::emit_gs_end_primitive(const nir_src &vertex_count_nir_src) /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */ fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1); - abld.ADD(prev_count, vertex_count, fs_reg(0xffffffffu)); + abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu)); fs_reg mask = intexp2(abld, prev_count); /* Note: we're relying on the fact that the GEN SHL instruction only pays * attention to the lower 5 bits of its second source argument, so on this @@ -1356,26 +1356,26 @@ fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count) if (opcode != SHADER_OPCODE_URB_WRITE_SIMD8) { fs_reg dword_index = bld.vgrf(BRW_REGISTER_TYPE_UD, 1); fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1); - abld.ADD(prev_count, vertex_count, fs_reg(0xffffffffu)); + abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu)); unsigned log2_bits_per_vertex = _mesa_fls(gs_compile->control_data_bits_per_vertex); - abld.SHR(dword_index, prev_count, fs_reg(6u - log2_bits_per_vertex)); + abld.SHR(dword_index, prev_count, brw_imm_ud(6u - log2_bits_per_vertex)); if (per_slot_offset.file != BAD_FILE) { /* Set the per-slot offset to dword_index / 4, so that we'll write to * the appropriate OWord within the control data header. */ - abld.SHR(per_slot_offset, dword_index, fs_reg(2u)); + abld.SHR(per_slot_offset, dword_index, brw_imm_ud(2u)); } /* Set the channel masks to 1 << (dword_index % 4), so that we'll * write to the appropriate DWORD within the OWORD. */ fs_reg channel = bld.vgrf(BRW_REGISTER_TYPE_UD, 1); - fwa_bld.AND(channel, dword_index, fs_reg(3u)); + fwa_bld.AND(channel, dword_index, brw_imm_ud(3u)); channel_mask = intexp2(fwa_bld, channel); /* Then the channel masks need to be in bits 23:16. */ - fwa_bld.SHL(channel_mask, channel_mask, fs_reg(16u)); + fwa_bld.SHL(channel_mask, channel_mask, brw_imm_ud(16u)); } /* Store the control data bits in the message payload and send it. */ @@ -1435,11 +1435,11 @@ fs_visitor::set_gs_stream_control_data_bits(const fs_reg &vertex_count, /* reg::sid = stream_id */ fs_reg sid = bld.vgrf(BRW_REGISTER_TYPE_UD, 1); - abld.MOV(sid, fs_reg(stream_id)); + abld.MOV(sid, brw_imm_ud(stream_id)); /* reg:shift_count = 2 * (vertex_count - 1) */ fs_reg shift_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1); - abld.SHL(shift_count, vertex_count, fs_reg(1u)); + abld.SHL(shift_count, vertex_count, brw_imm_ud(1u)); /* Note: we're relying on the fact that the GEN SHL instruction only pays * attention to the lower 5 bits of its second source argument, so on this @@ -1510,14 +1510,14 @@ fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src, */ fs_inst *inst = abld.AND(bld.null_reg_d(), vertex_count, - fs_reg(32u / gs_compile->control_data_bits_per_vertex - 1u)); + brw_imm_ud(32u / gs_compile->control_data_bits_per_vertex - 1u)); inst->conditional_mod = BRW_CONDITIONAL_Z; abld.IF(BRW_PREDICATE_NORMAL); /* If vertex_count is 0, then no control data bits have been * accumulated yet, so we can skip emitting them. */ - abld.CMP(bld.null_reg_d(), vertex_count, fs_reg(0u), + abld.CMP(bld.null_reg_d(), vertex_count, brw_imm_ud(0u), BRW_CONDITIONAL_NEQ); abld.IF(BRW_PREDICATE_NORMAL); emit_gs_control_data_bits(vertex_count); @@ -1530,7 +1530,7 @@ fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src, * effect of any call to EndPrimitive() that the shader may have * made before outputting its first vertex. */ - inst = abld.MOV(this->control_data_bits, fs_reg(0u)); + inst = abld.MOV(this->control_data_bits, brw_imm_ud(0u)); inst->force_writemask_all = true; abld.emit(BRW_OPCODE_ENDIF); } @@ -1613,7 +1613,7 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst, /* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */ bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210))); /* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */ - bld.SHL(channel_offsets, sequence, fs_reg(2u)); + bld.SHL(channel_offsets, sequence, brw_imm_ud(2u)); /* Convert vertex_index to bytes (multiply by 32) */ bld.SHL(vertex_offset_bytes, retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD), @@ -1627,7 +1627,7 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst, bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle, fs_reg(brw_vec8_grf(first_icp_handle, 0)), fs_reg(icp_offset_bytes), - fs_reg(nir->info.gs.vertices_in * REG_SIZE)); + brw_imm_ud(nir->info.gs.vertices_in * REG_SIZE)); } fs_inst *inst; @@ -1797,7 +1797,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld, fs_inst *cmp; if (instr->intrinsic == nir_intrinsic_discard_if) { cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]), - fs_reg(0), BRW_CONDITIONAL_Z); + brw_imm_d(0), BRW_CONDITIONAL_Z); } else { fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW)); @@ -1845,7 +1845,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld, FS_OPCODE_INTERPOLATE_AT_CENTROID, dst_xy, fs_reg(), /* src */ - fs_reg(0u), + brw_imm_ud(0u), interpolation); break; @@ -1859,7 +1859,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld, FS_OPCODE_INTERPOLATE_AT_SAMPLE, dst_xy, fs_reg(), /* src */ - fs_reg(msg_data), + brw_imm_ud(msg_data), interpolation); } else { const fs_reg sample_src = retype(get_nir_src(instr->src[0]), @@ -1868,7 +1868,8 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld, if (nir_src_is_dynamically_uniform(instr->src[0])) { const fs_reg sample_id = bld.emit_uniformize(sample_src); const fs_reg msg_data = vgrf(glsl_type::uint_type); - bld.exec_all().group(1, 0).SHL(msg_data, sample_id, fs_reg(4u)); + bld.exec_all().group(1, 0) + .SHL(msg_data, sample_id, brw_imm_ud(4u)); emit_pixel_interpolater_send(bld, FS_OPCODE_INTERPOLATE_AT_SAMPLE, dst_xy, @@ -1894,7 +1895,8 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld, sample_src, sample_id, BRW_CONDITIONAL_EQ); const fs_reg msg_data = vgrf(glsl_type::uint_type); - bld.exec_all().group(1, 0).SHL(msg_data, sample_id, fs_reg(4u)); + bld.exec_all().group(1, 0) + .SHL(msg_data, sample_id, brw_imm_ud(4u)); fs_inst *inst = emit_pixel_interpolater_send(bld, FS_OPCODE_INTERPOLATE_AT_SAMPLE, @@ -1925,7 +1927,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld, FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET, dst_xy, fs_reg(), /* src */ - fs_reg(off_x | (off_y << 4)), + brw_imm_ud(off_x | (off_y << 4)), interpolation); } else { fs_reg src = vgrf(glsl_type::ivec2_type); @@ -1933,7 +1935,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld, BRW_REGISTER_TYPE_F); for (int i = 0; i < 2; i++) { fs_reg temp = vgrf(glsl_type::float_type); - bld.MUL(temp, offset(offset_src, bld, i), fs_reg(16.0f)); + bld.MUL(temp, offset(offset_src, bld, i), brw_imm_f(16.0f)); fs_reg itemp = vgrf(glsl_type::int_type); bld.MOV(itemp, temp); /* float to int */ @@ -1953,7 +1955,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld, * FRAGMENT_INTERPOLATION_OFFSET_BITS" */ set_condmod(BRW_CONDITIONAL_L, - bld.SEL(offset(src, bld, i), itemp, fs_reg(7))); + bld.SEL(offset(src, bld, i), itemp, brw_imm_d(7))); } const enum opcode opcode = FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET; @@ -1961,7 +1963,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld, opcode, dst_xy, src, - fs_reg(0u), + brw_imm_ud(0u), interpolation); } break; @@ -2021,14 +2023,14 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld, cs_prog_data->uses_num_work_groups = true; - fs_reg surf_index = fs_reg(surface); + fs_reg surf_index = brw_imm_ud(surface); brw_mark_surface_used(prog_data, surface); /* Read the 3 GLuint components of gl_NumWorkGroups */ for (unsigned i = 0; i < 3; i++) { fs_reg read_result = emit_untyped_read(bld, surf_index, - fs_reg(i << 2), + brw_imm_ud(i << 2), 1 /* dims */, 1 /* size */, BRW_PREDICATE_NONE); read_result.type = dest.type; @@ -2068,16 +2070,16 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr /* Emit a surface read or atomic op. */ switch (instr->intrinsic) { case nir_intrinsic_atomic_counter_read: - tmp = emit_untyped_read(bld, fs_reg(surface), offset, 1, 1); + tmp = emit_untyped_read(bld, brw_imm_ud(surface), offset, 1, 1); break; case nir_intrinsic_atomic_counter_inc: - tmp = emit_untyped_atomic(bld, fs_reg(surface), offset, fs_reg(), + tmp = emit_untyped_atomic(bld, brw_imm_ud(surface), offset, fs_reg(), fs_reg(), 1, 1, BRW_AOP_INC); break; case nir_intrinsic_atomic_counter_dec: - tmp = emit_untyped_atomic(bld, fs_reg(surface), offset, fs_reg(), + tmp = emit_untyped_atomic(bld, brw_imm_ud(surface), offset, fs_reg(), fs_reg(), 1, 1, BRW_AOP_PREDEC); break; @@ -2219,14 +2221,14 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr for (unsigned c = 0; c < info->dest_components; ++c) { if ((int)c >= type->coordinate_components()) { bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c), - fs_reg(1)); + brw_imm_d(1)); } else if (c == 1 && is_1d_array_image) { bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c), offset(size, bld, 2)); } else if (c == 2 && is_cube_array_image) { bld.emit(SHADER_OPCODE_INT_QUOTIENT, offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c), - offset(size, bld, c), fs_reg(6)); + offset(size, bld, c), brw_imm_d(6)); } else { bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c), offset(size, bld, c)); @@ -2238,7 +2240,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr case nir_intrinsic_image_samples: /* The driver does not support multi-sampled images. */ - bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), fs_reg(1)); + bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(1)); break; case nir_intrinsic_load_uniform_indirect: @@ -2269,7 +2271,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr if (const_index) { const unsigned index = stage_prog_data->binding_table.ubo_start + const_index->u[0]; - surf_index = fs_reg(index); + surf_index = brw_imm_ud(index); brw_mark_surface_used(prog_data, index); } else { /* The block index is not a constant. Evaluate the index expression @@ -2278,7 +2280,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr */ surf_index = vgrf(glsl_type::uint_type); bld.ADD(surf_index, get_nir_src(instr->src[0]), - fs_reg(stage_prog_data->binding_table.ubo_start)); + brw_imm_ud(stage_prog_data->binding_table.ubo_start)); surf_index = bld.emit_uniformize(surf_index); /* Assume this may touch any UBO. It would be nice to provide @@ -2294,7 +2296,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr fs_reg base_offset = vgrf(glsl_type::int_type); bld.SHR(base_offset, retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_D), - fs_reg(2)); + brw_imm_d(2)); unsigned vec4_offset = instr->const_index[0] / 4; for (int i = 0; i < instr->num_components; i++) @@ -2304,7 +2306,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr fs_reg packed_consts = vgrf(glsl_type::float_type); packed_consts.type = dest.type; - fs_reg const_offset_reg((unsigned) instr->const_index[0] & ~15); + struct brw_reg const_offset_reg = brw_imm_ud(instr->const_index[0] & ~15); bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts, surf_index, const_offset_reg); @@ -2336,12 +2338,12 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr if (const_uniform_block) { unsigned index = stage_prog_data->binding_table.ssbo_start + const_uniform_block->u[0]; - surf_index = fs_reg(index); + surf_index = brw_imm_ud(index); brw_mark_surface_used(prog_data, index); } else { surf_index = vgrf(glsl_type::uint_type); bld.ADD(surf_index, get_nir_src(instr->src[0]), - fs_reg(stage_prog_data->binding_table.ssbo_start)); + brw_imm_ud(stage_prog_data->binding_table.ssbo_start)); /* Assume this may touch any UBO. It would be nice to provide * a tighter bound, but the array information is already lowered away. @@ -2356,7 +2358,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr if (has_indirect) { offset_reg = get_nir_src(instr->src[1]); } else { - offset_reg = fs_reg(instr->const_index[0]); + offset_reg = brw_imm_ud(instr->const_index[0]); } /* Read the vector */ @@ -2407,12 +2409,12 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr if (const_uniform_block) { unsigned index = stage_prog_data->binding_table.ssbo_start + const_uniform_block->u[0]; - surf_index = fs_reg(index); + surf_index = brw_imm_ud(index); brw_mark_surface_used(prog_data, index); } else { surf_index = vgrf(glsl_type::uint_type); bld.ADD(surf_index, get_nir_src(instr->src[1]), - fs_reg(stage_prog_data->binding_table.ssbo_start)); + brw_imm_ud(stage_prog_data->binding_table.ssbo_start)); brw_mark_surface_used(prog_data, stage_prog_data->binding_table.ssbo_start + @@ -2436,12 +2438,12 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr fs_reg offset_reg; if (!has_indirect) { - offset_reg = fs_reg(instr->const_index[0] + 4 * first_component); + offset_reg = brw_imm_ud(instr->const_index[0] + 4 * first_component); } else { offset_reg = vgrf(glsl_type::uint_type); bld.ADD(offset_reg, retype(get_nir_src(instr->src[2]), BRW_REGISTER_TYPE_UD), - fs_reg(4 * first_component)); + brw_imm_ud(4 * first_component)); } emit_untyped_write(bld, surf_index, offset_reg, @@ -2512,7 +2514,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr int reg_width = dispatch_width / 8; /* Set LOD = 0 */ - fs_reg source = fs_reg(0); + fs_reg source = brw_imm_d(0); int mlen = 1 * reg_width; @@ -2531,7 +2533,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr BRW_REGISTER_TYPE_UD); const unsigned index = prog_data->binding_table.ssbo_start + ssbo_index; fs_inst *inst = bld.emit(FS_OPCODE_GET_BUFFER_SIZE, buffer_size, - src_payload, fs_reg(index)); + src_payload, brw_imm_ud(index)); inst->header_size = 0; inst->mlen = mlen; inst->regs_written = regs_written; @@ -2560,12 +2562,12 @@ fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld, if (const_surface) { unsigned surf_index = stage_prog_data->binding_table.ssbo_start + const_surface->u[0]; - surface = fs_reg(surf_index); + surface = brw_imm_ud(surf_index); brw_mark_surface_used(prog_data, surf_index); } else { surface = vgrf(glsl_type::uint_type); bld.ADD(surface, get_nir_src(instr->src[0]), - fs_reg(stage_prog_data->binding_table.ssbo_start)); + brw_imm_ud(stage_prog_data->binding_table.ssbo_start)); /* Assume this may touch any SSBO. This is the same we do for other * UBO/SSBO accesses with non-constant surface. @@ -2597,7 +2599,7 @@ void fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr) { unsigned sampler = instr->sampler_index; - fs_reg sampler_reg(sampler); + fs_reg sampler_reg(brw_imm_ud(sampler)); int gather_component = instr->component; @@ -2676,7 +2678,7 @@ fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr) /* Emit code to evaluate the actual indexing expression */ sampler_reg = vgrf(glsl_type::uint_type); - bld.ADD(sampler_reg, src, fs_reg(sampler)); + bld.ADD(sampler_reg, src, brw_imm_ud(sampler)); sampler_reg = bld.emit_uniformize(sampler_reg); break; } @@ -2691,14 +2693,14 @@ fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr) key_tex->compressed_multisample_layout_mask & (1 << sampler)) { mcs = emit_mcs_fetch(coordinate, instr->coord_components, sampler_reg); } else { - mcs = fs_reg(0u); + mcs = brw_imm_ud(0u); } } for (unsigned i = 0; i < 3; i++) { if (instr->const_offset[i] != 0) { assert(offset_components == 0); - tex_offset = fs_reg(brw_texture_offset(instr->const_offset, 3)); + tex_offset = brw_imm_ud(brw_texture_offset(instr->const_offset, 3)); break; } } diff --git a/src/mesa/drivers/dri/i965/brw_fs_surface_builder.cpp b/src/mesa/drivers/dri/i965/brw_fs_surface_builder.cpp index 534d8490cdf..45694ec0894 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_surface_builder.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_surface_builder.cpp @@ -44,7 +44,7 @@ namespace brw { */ const fs_reg usurface = bld.emit_uniformize(surface); const fs_reg srcs[] = { - addr, src, usurface, fs_reg(dims), fs_reg(arg) + addr, src, usurface, brw_imm_ud(dims), brw_imm_ud(arg) }; const fs_reg dst = bld.vgrf(BRW_REGISTER_TYPE_UD, rsize); fs_inst *inst = bld.emit(opcode, dst, srcs, ARRAY_SIZE(srcs)); @@ -330,7 +330,7 @@ namespace { * messages causes a hang on IVB and VLV. */ set_predicate(pred, - bld.CMP(bld.null_reg_ud(), stride, fs_reg(4), + bld.CMP(bld.null_reg_ud(), stride, brw_imm_d(4), BRW_CONDITIONAL_G)); return BRW_PREDICATE_NORMAL; @@ -361,7 +361,7 @@ namespace { */ bld.CMP(bld.null_reg_ud(), retype(size, BRW_REGISTER_TYPE_UD), - fs_reg(0), BRW_CONDITIONAL_NZ); + brw_imm_d(0), BRW_CONDITIONAL_NZ); return BRW_PREDICATE_NORMAL; } else { @@ -438,7 +438,7 @@ namespace { * FINISHME: Factor out this frequently recurring pattern into a * helper function. */ - const fs_reg srcs[] = { addr, fs_reg(0), offset(addr, bld, 1) }; + const fs_reg srcs[] = { addr, brw_imm_d(0), offset(addr, bld, 1) }; const fs_reg dst = bld.vgrf(addr.type, dims); bld.LOAD_PAYLOAD(dst, srcs, dims, 0); return dst; @@ -488,7 +488,7 @@ namespace { bld.ADD(offset(addr, bld, c), offset(off, bld, c), (c < dims ? offset(retype(coord, BRW_REGISTER_TYPE_UD), bld, c) : - fs_reg(0))); + fs_reg(brw_imm_d(0)))); /* The layout of 3-D textures in memory is sort-of like a tiling * format. At each miplevel, the slices are arranged in rows of @@ -515,7 +515,7 @@ namespace { /* Decompose z into a major (tmp.y) and a minor (tmp.x) * index. */ - bld.BFE(offset(tmp, bld, 0), offset(tile, bld, 2), fs_reg(0), + bld.BFE(offset(tmp, bld, 0), offset(tile, bld, 2), brw_imm_d(0), offset(retype(coord, BRW_REGISTER_TYPE_UD), bld, 2)); bld.SHR(offset(tmp, bld, 1), offset(retype(coord, BRW_REGISTER_TYPE_UD), bld, 2), @@ -549,7 +549,7 @@ namespace { for (unsigned c = 0; c < 2; ++c) { /* Calculate the minor x and y indices. */ bld.BFE(offset(minor, bld, c), offset(tile, bld, c), - fs_reg(0), offset(addr, bld, c)); + brw_imm_d(0), offset(addr, bld, c)); /* Calculate the major x and y indices. */ bld.SHR(offset(major, bld, c), @@ -595,7 +595,7 @@ namespace { /* XOR tmp.x and tmp.y with bit 6 of the memory address. */ bld.XOR(tmp, tmp, offset(tmp, bld, 1)); - bld.AND(tmp, tmp, fs_reg(1 << 6)); + bld.AND(tmp, tmp, brw_imm_d(1 << 6)); bld.XOR(dst, dst, tmp); } @@ -647,7 +647,7 @@ namespace { const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD); /* Shift each component left to the correct bitfield position. */ - bld.SHL(tmp, offset(src, bld, c), fs_reg(shifts[c] % 32)); + bld.SHL(tmp, offset(src, bld, c), brw_imm_ud(shifts[c] % 32)); /* Add everything up. */ if (seen[shifts[c] / 32]) { @@ -679,13 +679,13 @@ namespace { /* Shift left to discard the most significant bits. */ bld.SHL(offset(dst, bld, c), offset(src, bld, shifts[c] / 32), - fs_reg(32 - shifts[c] % 32 - widths[c])); + brw_imm_ud(32 - shifts[c] % 32 - widths[c])); /* Shift back to the least significant bits using an arithmetic * shift to get sign extension on signed types. */ bld.ASR(offset(dst, bld, c), - offset(dst, bld, c), fs_reg(32 - widths[c])); + offset(dst, bld, c), brw_imm_ud(32 - widths[c])); } } @@ -709,13 +709,13 @@ namespace { if (widths[c]) { /* Clamp to the maximum value. */ bld.emit_minmax(offset(dst, bld, c), offset(src, bld, c), - fs_reg((int)scale(widths[c] - s)), + brw_imm_d((int)scale(widths[c] - s)), BRW_CONDITIONAL_L); /* Clamp to the minimum value. */ if (is_signed) bld.emit_minmax(offset(dst, bld, c), offset(dst, bld, c), - fs_reg(-(int)scale(widths[c] - s) - 1), + brw_imm_d(-(int)scale(widths[c] - s) - 1), BRW_CONDITIONAL_GE); } } @@ -741,12 +741,12 @@ namespace { /* Divide by the normalization constants. */ bld.MUL(offset(dst, bld, c), offset(dst, bld, c), - fs_reg(1.0f / scale(widths[c] - s))); + brw_imm_f(1.0f / scale(widths[c] - s))); /* Clamp to the minimum value. */ if (is_signed) bld.emit_minmax(offset(dst, bld, c), - offset(dst, bld, c), fs_reg(-1.0f), + offset(dst, bld, c), brw_imm_f(-1.0f), BRW_CONDITIONAL_GE); } } @@ -771,10 +771,10 @@ namespace { /* Clamp the normalized floating-point argument. */ if (is_signed) { bld.emit_minmax(offset(fdst, bld, c), offset(src, bld, c), - fs_reg(-1.0f), BRW_CONDITIONAL_GE); + brw_imm_f(-1.0f), BRW_CONDITIONAL_GE); bld.emit_minmax(offset(fdst, bld, c), offset(fdst, bld, c), - fs_reg(1.0f), BRW_CONDITIONAL_L); + brw_imm_f(1.0f), BRW_CONDITIONAL_L); } else { set_saturate(true, bld.MOV(offset(fdst, bld, c), offset(src, bld, c))); @@ -782,7 +782,7 @@ namespace { /* Multiply by the normalization constants. */ bld.MUL(offset(fdst, bld, c), offset(fdst, bld, c), - fs_reg((float)scale(widths[c] - s))); + brw_imm_f((float)scale(widths[c] - s))); /* Convert to integer. */ bld.RNDE(offset(fdst, bld, c), offset(fdst, bld, c)); @@ -814,7 +814,7 @@ namespace { */ if (widths[c] < 16) bld.SHL(offset(dst, bld, c), - offset(dst, bld, c), fs_reg(15 - widths[c])); + offset(dst, bld, c), brw_imm_ud(15 - widths[c])); /* Convert to 32-bit floating point. */ bld.F16TO32(offset(fdst, bld, c), offset(dst, bld, c)); @@ -842,7 +842,7 @@ namespace { /* Clamp to the minimum value. */ if (widths[c] < 16) bld.emit_minmax(offset(fdst, bld, c), offset(fdst, bld, c), - fs_reg(0.0f), BRW_CONDITIONAL_GE); + brw_imm_f(0.0f), BRW_CONDITIONAL_GE); /* Convert to 16-bit floating-point. */ bld.F32TO16(offset(dst, bld, c), offset(fdst, bld, c)); @@ -855,7 +855,7 @@ namespace { */ if (widths[c] < 16) bld.SHR(offset(dst, bld, c), offset(dst, bld, c), - fs_reg(15 - widths[c])); + brw_imm_ud(15 - widths[c])); } } @@ -874,7 +874,8 @@ namespace { for (unsigned c = 0; c < 4; ++c) bld.MOV(offset(dst, bld, c), - widths[c] ? offset(src, bld, c) : fs_reg(pad[c])); + widths[c] ? offset(src, bld, c) + : fs_reg(brw_imm_ud(pad[c]))); return dst; } @@ -939,7 +940,7 @@ namespace brw { /* An out of bounds surface access should give zero as result. */ for (unsigned c = 0; c < size; ++c) set_predicate(pred, bld.SEL(offset(tmp, bld, c), - offset(tmp, bld, c), fs_reg(0))); + offset(tmp, bld, c), brw_imm_d(0))); } /* Set the register type to D instead of UD if the data type is @@ -1122,7 +1123,7 @@ namespace brw { /* An unbound surface access should give zero as result. */ if (rsize) - set_predicate(pred, bld.SEL(tmp, tmp, fs_reg(0))); + set_predicate(pred, bld.SEL(tmp, tmp, brw_imm_d(0))); return tmp; } diff --git a/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp b/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp index a7bd9cea7af..d97fcf33b62 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp @@ -165,7 +165,7 @@ fs_visitor::rescale_texcoord(fs_reg coordinate, int coord_components, chan = offset(chan, bld, i); set_condmod(BRW_CONDITIONAL_GE, - bld.emit(BRW_OPCODE_SEL, chan, chan, fs_reg(0.0f))); + bld.emit(BRW_OPCODE_SEL, chan, chan, brw_imm_f(0.0f))); /* Our parameter comes in as 1.0/width or 1.0/height, * because that's what people normally want for doing @@ -203,7 +203,7 @@ fs_visitor::emit_mcs_fetch(const fs_reg &coordinate, unsigned components, const fs_reg dest = vgrf(glsl_type::uvec4_type); const fs_reg srcs[] = { coordinate, fs_reg(), fs_reg(), fs_reg(), fs_reg(), fs_reg(), - sampler, fs_reg(), fs_reg(components), fs_reg(0) + sampler, fs_reg(), brw_imm_ud(components), brw_imm_d(0) }; fs_inst *inst = bld.emit(SHADER_OPCODE_TXF_MCS_LOGICAL, dest, srcs, ARRAY_SIZE(srcs)); @@ -244,7 +244,7 @@ fs_visitor::emit_texture(ir_texture_opcode op, this->result = res; for (int i=0; i<4; i++) { - bld.MOV(res, fs_reg(swiz == SWIZZLE_ZERO ? 0.0f : 1.0f)); + bld.MOV(res, brw_imm_f(swiz == SWIZZLE_ZERO ? 0.0f : 1.0f)); res = offset(res, bld, 1); } return; @@ -256,7 +256,7 @@ fs_visitor::emit_texture(ir_texture_opcode op, * pass a valid LOD argument. */ assert(lod.file == BAD_FILE); - lod = fs_reg(0u); + lod = brw_imm_ud(0u); } if (coordinate.file != BAD_FILE) { @@ -274,7 +274,7 @@ fs_visitor::emit_texture(ir_texture_opcode op, const fs_reg srcs[] = { coordinate, shadow_c, lod, lod2, sample_index, mcs, sampler_reg, offset_value, - fs_reg(coord_components), fs_reg(grad_components) + brw_imm_d(coord_components), brw_imm_d(grad_components) }; enum opcode opcode; @@ -336,7 +336,7 @@ fs_visitor::emit_texture(ir_texture_opcode op, if (op == ir_txs && is_cube_array) { fs_reg depth = offset(dst, bld, 2); fs_reg fixed_depth = vgrf(glsl_type::int_type); - bld.emit(SHADER_OPCODE_INT_QUOTIENT, fixed_depth, depth, fs_reg(6)); + bld.emit(SHADER_OPCODE_INT_QUOTIENT, fixed_depth, depth, brw_imm_d(6)); fs_reg *fixed_payload = ralloc_array(mem_ctx, fs_reg, inst->regs_written); int components = inst->regs_written / (inst->exec_size / 8); @@ -367,7 +367,7 @@ fs_visitor::emit_gen6_gather_wa(uint8_t wa, fs_reg dst) for (int i = 0; i < 4; i++) { fs_reg dst_f = retype(dst, BRW_REGISTER_TYPE_F); /* Convert from UNORM to UINT */ - bld.MUL(dst_f, dst_f, fs_reg((float)((1 << width) - 1))); + bld.MUL(dst_f, dst_f, brw_imm_f((1 << width) - 1)); bld.MOV(dst, dst_f); if (wa & WA_SIGN) { @@ -375,8 +375,8 @@ fs_visitor::emit_gen6_gather_wa(uint8_t wa, fs_reg dst) * shifting the sign bit into place, then shifting back * preserving sign. */ - bld.SHL(dst, dst, fs_reg(32 - width)); - bld.ASR(dst, dst, fs_reg(32 - width)); + bld.SHL(dst, dst, brw_imm_d(32 - width)); + bld.ASR(dst, dst, brw_imm_d(32 - width)); } dst = offset(dst, bld, 1); @@ -440,9 +440,9 @@ fs_visitor::swizzle_result(ir_texture_opcode op, int dest_components, l = offset(l, bld, i); if (swiz == SWIZZLE_ZERO) { - bld.MOV(l, fs_reg(0.0f)); + bld.MOV(l, brw_imm_f(0.0f)); } else if (swiz == SWIZZLE_ONE) { - bld.MOV(l, fs_reg(1.0f)); + bld.MOV(l, brw_imm_f(1.0f)); } else { bld.MOV(l, offset(orig_val, bld, GET_SWZ(key_tex->swizzles[sampler], i))); @@ -462,7 +462,7 @@ fs_visitor::emit_dummy_fs() const float color[4] = { 1.0, 0.0, 1.0, 0.0 }; for (int i = 0; i < 4; i++) { bld.MOV(fs_reg(MRF, 2 + i * reg_width, BRW_REGISTER_TYPE_F), - fs_reg(color[i])); + brw_imm_f(color[i])); } fs_inst *write; @@ -681,7 +681,7 @@ fs_visitor::emit_alpha_test() fs_reg color = offset(outputs[0], bld, 3); /* f0.1 &= func(color, ref) */ - cmp = abld.CMP(bld.null_reg_f(), color, fs_reg(key->alpha_test_ref), + cmp = abld.CMP(bld.null_reg_f(), color, brw_imm_f(key->alpha_test_ref), cond_for_alpha_func(key->alpha_test_func)); } cmp->predicate = BRW_PREDICATE_NORMAL; @@ -714,7 +714,7 @@ fs_visitor::emit_single_fb_write(const fs_builder &bld, const fs_reg sources[] = { color0, color1, src0_alpha, src_depth, dst_depth, src_stencil, - sample_mask, fs_reg(components) + sample_mask, brw_imm_ud(components) }; assert(ARRAY_SIZE(sources) - 1 == FB_WRITE_LOGICAL_SRC_COMPONENTS); fs_inst *write = bld.emit(FS_OPCODE_FB_WRITE_LOGICAL, fs_reg(), @@ -948,12 +948,12 @@ fs_visitor::emit_urb_writes(const fs_reg &gs_vertex_count) fs_reg offset; if (gs_vertex_count.file == IMM) { - per_slot_offsets = fs_reg(output_vertex_size_owords * - gs_vertex_count.ud); + per_slot_offsets = brw_imm_ud(output_vertex_size_owords * + gs_vertex_count.ud); } else { per_slot_offsets = vgrf(glsl_type::int_type); bld.MUL(per_slot_offsets, gs_vertex_count, - fs_reg(output_vertex_size_owords)); + brw_imm_ud(output_vertex_size_owords)); } } @@ -976,7 +976,7 @@ fs_visitor::emit_urb_writes(const fs_reg &gs_vertex_count) } fs_reg zero(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD); - bld.MOV(zero, fs_reg(0u)); + bld.MOV(zero, brw_imm_ud(0u)); sources[length++] = zero; if (vue_map->slots_valid & VARYING_BIT_LAYER) @@ -1036,7 +1036,7 @@ fs_visitor::emit_urb_writes(const fs_reg &gs_vertex_count) for (unsigned i = 0; i < output_components[varying]; i++) sources[length++] = offset(this->outputs[varying], bld, i); for (unsigned i = output_components[varying]; i < 4; i++) - sources[length++] = fs_reg(0); + sources[length++] = brw_imm_d(0); } break; } @@ -1113,11 +1113,11 @@ fs_visitor::emit_barrier() const fs_builder pbld = bld.exec_all().group(8, 0); /* Clear the message payload */ - pbld.MOV(payload, fs_reg(0u)); + pbld.MOV(payload, brw_imm_ud(0u)); /* Copy bits 27:24 of r0.2 (barrier id) to the message payload reg.2 */ fs_reg r0_2 = fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD)); - pbld.AND(component(payload, 2), r0_2, fs_reg(0x0f000000u)); + pbld.AND(component(payload, 2), r0_2, brw_imm_ud(0x0f000000u)); /* Emit a gateway "barrier" message using the payload we set up, followed * by a wait instruction. diff --git a/src/mesa/drivers/dri/i965/brw_ir_fs.h b/src/mesa/drivers/dri/i965/brw_ir_fs.h index 7e977e9e727..0410053ce27 100644 --- a/src/mesa/drivers/dri/i965/brw_ir_fs.h +++ b/src/mesa/drivers/dri/i965/brw_ir_fs.h @@ -36,11 +36,6 @@ public: void init(); fs_reg(); - explicit fs_reg(float f); - explicit fs_reg(int32_t i); - explicit fs_reg(uint32_t u); - explicit fs_reg(uint8_t vf[4]); - explicit fs_reg(uint8_t vf0, uint8_t vf1, uint8_t vf2, uint8_t vf3); fs_reg(struct brw_reg reg); fs_reg(enum brw_reg_file file, int nr); fs_reg(enum brw_reg_file file, int nr, enum brw_reg_type type); diff --git a/src/mesa/drivers/dri/i965/test_fs_cmod_propagation.cpp b/src/mesa/drivers/dri/i965/test_fs_cmod_propagation.cpp index 62d39f70ec4..034d8a507fe 100644 --- a/src/mesa/drivers/dri/i965/test_fs_cmod_propagation.cpp +++ b/src/mesa/drivers/dri/i965/test_fs_cmod_propagation.cpp @@ -107,7 +107,7 @@ TEST_F(cmod_propagation_test, basic) fs_reg dest = v->vgrf(glsl_type::float_type); fs_reg src0 = v->vgrf(glsl_type::float_type); fs_reg src1 = v->vgrf(glsl_type::float_type); - fs_reg zero(0.0f); + fs_reg zero(brw_imm_f(0.0f)); bld.ADD(dest, src0, src1); bld.CMP(bld.null_reg_f(), dest, zero, BRW_CONDITIONAL_GE); @@ -139,7 +139,7 @@ TEST_F(cmod_propagation_test, cmp_nonzero) fs_reg dest = v->vgrf(glsl_type::float_type); fs_reg src0 = v->vgrf(glsl_type::float_type); fs_reg src1 = v->vgrf(glsl_type::float_type); - fs_reg nonzero(1.0f); + fs_reg nonzero(brw_imm_f(1.0f)); bld.ADD(dest, src0, src1); bld.CMP(bld.null_reg_f(), dest, nonzero, BRW_CONDITIONAL_GE); @@ -171,7 +171,7 @@ TEST_F(cmod_propagation_test, non_cmod_instruction) const fs_builder &bld = v->bld; fs_reg dest = v->vgrf(glsl_type::uint_type); fs_reg src0 = v->vgrf(glsl_type::uint_type); - fs_reg zero(0u); + fs_reg zero(brw_imm_ud(0u)); bld.FBL(dest, src0); bld.CMP(bld.null_reg_ud(), dest, zero, BRW_CONDITIONAL_GE); @@ -205,7 +205,7 @@ TEST_F(cmod_propagation_test, intervening_flag_write) fs_reg src0 = v->vgrf(glsl_type::float_type); fs_reg src1 = v->vgrf(glsl_type::float_type); fs_reg src2 = v->vgrf(glsl_type::float_type); - fs_reg zero(0.0f); + fs_reg zero(brw_imm_f(0.0f)); bld.ADD(dest, src0, src1); bld.CMP(bld.null_reg_f(), src2, zero, BRW_CONDITIONAL_GE); bld.CMP(bld.null_reg_f(), dest, zero, BRW_CONDITIONAL_GE); @@ -244,7 +244,7 @@ TEST_F(cmod_propagation_test, intervening_flag_read) fs_reg src0 = v->vgrf(glsl_type::float_type); fs_reg src1 = v->vgrf(glsl_type::float_type); fs_reg src2 = v->vgrf(glsl_type::float_type); - fs_reg zero(0.0f); + fs_reg zero(brw_imm_f(0.0f)); bld.ADD(dest0, src0, src1); set_predicate(BRW_PREDICATE_NORMAL, bld.SEL(dest1, src2, zero)); bld.CMP(bld.null_reg_f(), dest0, zero, BRW_CONDITIONAL_GE); @@ -282,7 +282,7 @@ TEST_F(cmod_propagation_test, intervening_dest_write) fs_reg src0 = v->vgrf(glsl_type::float_type); fs_reg src1 = v->vgrf(glsl_type::float_type); fs_reg src2 = v->vgrf(glsl_type::vec2_type); - fs_reg zero(0.0f); + fs_reg zero(brw_imm_f(0.0f)); bld.ADD(offset(dest, bld, 2), src0, src1); bld.emit(SHADER_OPCODE_TEX, dest, src2) ->regs_written = 4; @@ -323,7 +323,7 @@ TEST_F(cmod_propagation_test, intervening_flag_read_same_value) fs_reg src0 = v->vgrf(glsl_type::float_type); fs_reg src1 = v->vgrf(glsl_type::float_type); fs_reg src2 = v->vgrf(glsl_type::float_type); - fs_reg zero(0.0f); + fs_reg zero(brw_imm_f(0.0f)); set_condmod(BRW_CONDITIONAL_GE, bld.ADD(dest0, src0, src1)); set_predicate(BRW_PREDICATE_NORMAL, bld.SEL(dest1, src2, zero)); bld.CMP(bld.null_reg_f(), dest0, zero, BRW_CONDITIONAL_GE); @@ -360,7 +360,7 @@ TEST_F(cmod_propagation_test, negate) fs_reg dest = v->vgrf(glsl_type::float_type); fs_reg src0 = v->vgrf(glsl_type::float_type); fs_reg src1 = v->vgrf(glsl_type::float_type); - fs_reg zero(0.0f); + fs_reg zero(brw_imm_f(0.0f)); bld.ADD(dest, src0, src1); dest.negate = true; bld.CMP(bld.null_reg_f(), dest, zero, BRW_CONDITIONAL_GE); @@ -425,7 +425,7 @@ TEST_F(cmod_propagation_test, different_types_cmod_with_zero) fs_reg dest = v->vgrf(glsl_type::int_type); fs_reg src0 = v->vgrf(glsl_type::int_type); fs_reg src1 = v->vgrf(glsl_type::int_type); - fs_reg zero(0.0f); + fs_reg zero(brw_imm_f(0.0f)); bld.ADD(dest, src0, src1); bld.CMP(bld.null_reg_f(), retype(dest, BRW_REGISTER_TYPE_F), zero, BRW_CONDITIONAL_GE); @@ -458,8 +458,8 @@ TEST_F(cmod_propagation_test, andnz_one) const fs_builder &bld = v->bld; fs_reg dest = v->vgrf(glsl_type::int_type); fs_reg src0 = v->vgrf(glsl_type::float_type); - fs_reg zero(0.0f); - fs_reg one(1); + fs_reg zero(brw_imm_f(0.0f)); + fs_reg one(brw_imm_d(1)); bld.CMP(retype(dest, BRW_REGISTER_TYPE_F), src0, zero, BRW_CONDITIONAL_L); set_condmod(BRW_CONDITIONAL_NZ, @@ -493,8 +493,8 @@ TEST_F(cmod_propagation_test, andnz_non_one) const fs_builder &bld = v->bld; fs_reg dest = v->vgrf(glsl_type::int_type); fs_reg src0 = v->vgrf(glsl_type::float_type); - fs_reg zero(0.0f); - fs_reg nonone(38); + fs_reg zero(brw_imm_f(0.0f)); + fs_reg nonone(brw_imm_d(38)); bld.CMP(retype(dest, BRW_REGISTER_TYPE_F), src0, zero, BRW_CONDITIONAL_L); set_condmod(BRW_CONDITIONAL_NZ, @@ -528,8 +528,8 @@ TEST_F(cmod_propagation_test, andz_one) const fs_builder &bld = v->bld; fs_reg dest = v->vgrf(glsl_type::int_type); fs_reg src0 = v->vgrf(glsl_type::float_type); - fs_reg zero(0.0f); - fs_reg one(1); + fs_reg zero(brw_imm_f(0.0f)); + fs_reg one(brw_imm_d(1)); bld.CMP(retype(dest, BRW_REGISTER_TYPE_F), src0, zero, BRW_CONDITIONAL_L); set_condmod(BRW_CONDITIONAL_Z, |