diff options
author | Tom Stellard <[email protected]> | 2012-07-08 12:41:05 -0400 |
---|---|---|
committer | Tom Stellard <[email protected]> | 2012-07-09 13:43:11 +0000 |
commit | 76b44034b9b234d3db4012342f0fae677d4f10f6 (patch) | |
tree | bac085be50fa71417aaf8533e614b3deacc1db4f /src/gallium | |
parent | 39323e8f792a33f4fe3028c286a1638dc16a38a4 (diff) |
radeon/llvm: Rename namespace from AMDIL to AMDGPU
Diffstat (limited to 'src/gallium')
25 files changed, 361 insertions, 360 deletions
diff --git a/src/gallium/drivers/radeon/AMDGPUInstructions.td b/src/gallium/drivers/radeon/AMDGPUInstructions.td index d6897d57060..81b58c16e0f 100644 --- a/src/gallium/drivers/radeon/AMDGPUInstructions.td +++ b/src/gallium/drivers/radeon/AMDGPUInstructions.td @@ -16,7 +16,7 @@ class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instructio field bits<16> AMDILOp = 0; field bits<3> Gen = 0; - let Namespace = "AMDIL"; + let Namespace = "AMDGPU"; let OutOperandList = outs; let InOperandList = ins; let AsmString = asm; diff --git a/src/gallium/drivers/radeon/AMDGPURegisterInfo.td b/src/gallium/drivers/radeon/AMDGPURegisterInfo.td index 1707903ae7e..8181e023aa3 100644 --- a/src/gallium/drivers/radeon/AMDGPURegisterInfo.td +++ b/src/gallium/drivers/radeon/AMDGPURegisterInfo.td @@ -11,7 +11,7 @@ // //===----------------------------------------------------------------------===// -let Namespace = "AMDIL" in { +let Namespace = "AMDGPU" in { def sel_x : SubRegIndex; def sel_y : SubRegIndex; def sel_z : SubRegIndex; diff --git a/src/gallium/drivers/radeon/AMDGPUUtil.cpp b/src/gallium/drivers/radeon/AMDGPUUtil.cpp index 0d30e0075f8..63b359ffc14 100644 --- a/src/gallium/drivers/radeon/AMDGPUUtil.cpp +++ b/src/gallium/drivers/radeon/AMDGPUUtil.cpp @@ -30,11 +30,11 @@ bool AMDGPU::isPlaceHolderOpcode(unsigned opcode) { switch (opcode) { default: return false; - case AMDIL::RETURN: - case AMDIL::LOAD_INPUT: - case AMDIL::LAST: - case AMDIL::MASK_WRITE: - case AMDIL::RESERVE_REG: + case AMDGPU::RETURN: + case AMDGPU::LOAD_INPUT: + case AMDGPU::LAST: + case AMDGPU::MASK_WRITE: + case AMDGPU::RESERVE_REG: return true; } } @@ -44,17 +44,17 @@ bool AMDGPU::isTransOp(unsigned opcode) switch(opcode) { default: return false; - case AMDIL::COS_r600: - case AMDIL::COS_eg: - case AMDIL::MULLIT: - case AMDIL::MUL_LIT_r600: - case AMDIL::MUL_LIT_eg: - case AMDIL::EXP_IEEE_r600: - case AMDIL::EXP_IEEE_eg: - case AMDIL::LOG_CLAMPED_r600: - case AMDIL::LOG_IEEE_r600: - case AMDIL::LOG_CLAMPED_eg: - case AMDIL::LOG_IEEE_eg: + case AMDGPU::COS_r600: + case AMDGPU::COS_eg: + case AMDGPU::MULLIT: + case AMDGPU::MUL_LIT_r600: + case AMDGPU::MUL_LIT_eg: + case AMDGPU::EXP_IEEE_r600: + case AMDGPU::EXP_IEEE_eg: + case AMDGPU::LOG_CLAMPED_r600: + case AMDGPU::LOG_IEEE_r600: + case AMDGPU::LOG_CLAMPED_eg: + case AMDGPU::LOG_IEEE_eg: return true; } } @@ -63,20 +63,20 @@ bool AMDGPU::isTexOp(unsigned opcode) { switch(opcode) { default: return false; - case AMDIL::TEX_LD: - case AMDIL::TEX_GET_TEXTURE_RESINFO: - case AMDIL::TEX_SAMPLE: - case AMDIL::TEX_SAMPLE_C: - case AMDIL::TEX_SAMPLE_L: - case AMDIL::TEX_SAMPLE_C_L: - case AMDIL::TEX_SAMPLE_LB: - case AMDIL::TEX_SAMPLE_C_LB: - case AMDIL::TEX_SAMPLE_G: - case AMDIL::TEX_SAMPLE_C_G: - case AMDIL::TEX_GET_GRADIENTS_H: - case AMDIL::TEX_GET_GRADIENTS_V: - case AMDIL::TEX_SET_GRADIENTS_H: - case AMDIL::TEX_SET_GRADIENTS_V: + case AMDGPU::TEX_LD: + case AMDGPU::TEX_GET_TEXTURE_RESINFO: + case AMDGPU::TEX_SAMPLE: + case AMDGPU::TEX_SAMPLE_C: + case AMDGPU::TEX_SAMPLE_L: + case AMDGPU::TEX_SAMPLE_C_L: + case AMDGPU::TEX_SAMPLE_LB: + case AMDGPU::TEX_SAMPLE_C_LB: + case AMDGPU::TEX_SAMPLE_G: + case AMDGPU::TEX_SAMPLE_C_G: + case AMDGPU::TEX_GET_GRADIENTS_H: + case AMDGPU::TEX_GET_GRADIENTS_V: + case AMDGPU::TEX_SET_GRADIENTS_H: + case AMDGPU::TEX_SET_GRADIENTS_V: return true; } } @@ -85,8 +85,8 @@ bool AMDGPU::isReductionOp(unsigned opcode) { switch(opcode) { default: return false; - case AMDIL::DOT4_r600: - case AMDIL::DOT4_eg: + case AMDGPU::DOT4_r600: + case AMDGPU::DOT4_eg: return true; } } @@ -95,8 +95,8 @@ bool AMDGPU::isCubeOp(unsigned opcode) { switch(opcode) { default: return false; - case AMDIL::CUBE_r600: - case AMDIL::CUBE_eg: + case AMDGPU::CUBE_r600: + case AMDGPU::CUBE_eg: return true; } } @@ -106,25 +106,25 @@ bool AMDGPU::isFCOp(unsigned opcode) { switch(opcode) { default: return false; - case AMDIL::BREAK_LOGICALZ_f32: - case AMDIL::BREAK_LOGICALNZ_i32: - case AMDIL::BREAK_LOGICALZ_i32: - case AMDIL::BREAK_LOGICALNZ_f32: - case AMDIL::CONTINUE_LOGICALNZ_f32: - case AMDIL::IF_LOGICALNZ_i32: - case AMDIL::IF_LOGICALZ_f32: - case AMDIL::ELSE: - case AMDIL::ENDIF: - case AMDIL::ENDLOOP: - case AMDIL::IF_LOGICALNZ_f32: - case AMDIL::WHILELOOP: + case AMDGPU::BREAK_LOGICALZ_f32: + case AMDGPU::BREAK_LOGICALNZ_i32: + case AMDGPU::BREAK_LOGICALZ_i32: + case AMDGPU::BREAK_LOGICALNZ_f32: + case AMDGPU::CONTINUE_LOGICALNZ_f32: + case AMDGPU::IF_LOGICALNZ_i32: + case AMDGPU::IF_LOGICALZ_f32: + case AMDGPU::ELSE: + case AMDGPU::ENDIF: + case AMDGPU::ENDLOOP: + case AMDGPU::IF_LOGICALNZ_f32: + case AMDGPU::WHILELOOP: return true; } } -void AMDGPU::utilAddLiveIn(llvm::MachineFunction * MF, - llvm::MachineRegisterInfo & MRI, - const llvm::TargetInstrInfo * TII, +void AMDGPU::utilAddLiveIn(MachineFunction * MF, + MachineRegisterInfo & MRI, + const TargetInstrInfo * TII, unsigned physReg, unsigned virtReg) { if (!MRI.isLiveIn(physReg)) { diff --git a/src/gallium/drivers/radeon/AMDGPUUtil.h b/src/gallium/drivers/radeon/AMDGPUUtil.h index 633ea3bf6cf..e8b02b1d24b 100644 --- a/src/gallium/drivers/radeon/AMDGPUUtil.h +++ b/src/gallium/drivers/radeon/AMDGPUUtil.h @@ -20,8 +20,6 @@ class MachineFunction; class MachineRegisterInfo; class TargetInstrInfo; -} - namespace AMDGPU { bool isPlaceHolderOpcode(unsigned opcode); @@ -38,9 +36,11 @@ bool isFCOp(unsigned opcode); #define MO_FLAG_ABS (1 << 2) #define MO_FLAG_MASK (1 << 3) -void utilAddLiveIn(llvm::MachineFunction * MF, llvm::MachineRegisterInfo & MRI, - const llvm::TargetInstrInfo * TII, unsigned physReg, unsigned virtReg); +void utilAddLiveIn(MachineFunction * MF, MachineRegisterInfo & MRI, + const TargetInstrInfo * TII, unsigned physReg, unsigned virtReg); } // End namespace AMDGPU +} // End namespace llvm + #endif // AMDGPU_UTIL_H diff --git a/src/gallium/drivers/radeon/AMDILCFGStructurizer.cpp b/src/gallium/drivers/radeon/AMDILCFGStructurizer.cpp index ba7d246137e..1f1a6da086e 100644 --- a/src/gallium/drivers/radeon/AMDILCFGStructurizer.cpp +++ b/src/gallium/drivers/radeon/AMDILCFGStructurizer.cpp @@ -1371,10 +1371,10 @@ int CFGStructurizer<PassT>::improveSimpleJumpintoIf(BlockT *headBlk, bool landBlkHasOtherPred = (landBlk->pred_size() > 2); - //insert AMDIL::ENDIF to avoid special case "input landBlk == NULL" + //insert AMDGPU::ENDIF to avoid special case "input landBlk == NULL" typename BlockT::iterator insertPos = CFGTraits::getInstrPos - (landBlk, CFGTraits::insertInstrBefore(landBlk, AMDIL::ENDIF, passRep)); + (landBlk, CFGTraits::insertInstrBefore(landBlk, AMDGPU::ENDIF, passRep)); if (landBlkHasOtherPred) { unsigned immReg = @@ -1386,11 +1386,11 @@ int CFGStructurizer<PassT>::improveSimpleJumpintoIf(BlockT *headBlk, CFGTraits::insertCompareInstrBefore(landBlk, insertPos, passRep, cmpResReg, initReg, immReg); CFGTraits::insertCondBranchBefore(landBlk, insertPos, - AMDIL::IF_LOGICALZ_i32, passRep, + AMDGPU::IF_LOGICALZ_i32, passRep, cmpResReg, DebugLoc()); } - CFGTraits::insertCondBranchBefore(landBlk, insertPos, AMDIL::IF_LOGICALNZ_i32, + CFGTraits::insertCondBranchBefore(landBlk, insertPos, AMDGPU::IF_LOGICALNZ_i32, passRep, initReg, DebugLoc()); if (migrateTrue) { @@ -1400,7 +1400,7 @@ int CFGStructurizer<PassT>::improveSimpleJumpintoIf(BlockT *headBlk, // (initVal != 1). CFGTraits::insertAssignInstrBefore(trueBlk, passRep, initReg, 1); } - CFGTraits::insertInstrBefore(insertPos, AMDIL::ELSE, passRep); + CFGTraits::insertInstrBefore(insertPos, AMDGPU::ELSE, passRep); if (migrateFalse) { migrateInstruction(falseBlk, landBlk, insertPos); @@ -1409,11 +1409,11 @@ int CFGStructurizer<PassT>::improveSimpleJumpintoIf(BlockT *headBlk, // (initVal != 0) CFGTraits::insertAssignInstrBefore(falseBlk, passRep, initReg, 0); } - //CFGTraits::insertInstrBefore(insertPos, AMDIL::ENDIF, passRep); + //CFGTraits::insertInstrBefore(insertPos, AMDGPU::ENDIF, passRep); if (landBlkHasOtherPred) { // add endif - CFGTraits::insertInstrBefore(insertPos, AMDIL::ENDIF, passRep); + CFGTraits::insertInstrBefore(insertPos, AMDGPU::ENDIF, passRep); // put initReg = 2 to other predecessors of landBlk for (typename BlockT::pred_iterator predIter = landBlk->pred_begin(), @@ -1568,7 +1568,7 @@ void CFGStructurizer<PassT>::mergeIfthenelseBlock(InstrT *branchInstr, } retireBlock(curBlk, trueBlk); } - CFGTraits::insertInstrBefore(branchInstrPos, AMDIL::ELSE, passRep); + CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::ELSE, passRep); if (falseBlk) { curBlk->splice(branchInstrPos, falseBlk, FirstNonDebugInstr(falseBlk), @@ -1579,7 +1579,7 @@ void CFGStructurizer<PassT>::mergeIfthenelseBlock(InstrT *branchInstr, } retireBlock(curBlk, falseBlk); } - CFGTraits::insertInstrBefore(branchInstrPos, AMDIL::ENDIF, passRep); + CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::ENDIF, passRep); //curBlk->remove(branchInstrPos); branchInstr->eraseFromParent(); @@ -1608,13 +1608,13 @@ void CFGStructurizer<PassT>::mergeLooplandBlock(BlockT *dstBlk, } /* we last inserterd the DebugLoc in the - * BREAK_LOGICALZ_i32 or AMDIL::BREAK_LOGICALNZ statement in the current dstBlk. + * BREAK_LOGICALZ_i32 or AMDGPU::BREAK_LOGICALNZ statement in the current dstBlk. * search for the DebugLoc in the that statement. * if not found, we have to insert the empty/default DebugLoc */ InstrT *loopBreakInstr = CFGTraits::getLoopBreakInstr(dstBlk); DebugLoc DLBreak = (loopBreakInstr) ? loopBreakInstr->getDebugLoc() : DebugLoc(); - CFGTraits::insertInstrBefore(dstBlk, AMDIL::WHILELOOP, passRep, DLBreak); + CFGTraits::insertInstrBefore(dstBlk, AMDGPU::WHILELOOP, passRep, DLBreak); // Loop breakInitRegs are init before entering the loop. for (typename std::set<RegiT>::const_iterator iter = loopLand->breakInitRegs.begin(), @@ -1635,13 +1635,13 @@ void CFGStructurizer<PassT>::mergeLooplandBlock(BlockT *dstBlk, InstrT *continueInstr = CFGTraits::getContinueInstr(dstBlk); DebugLoc DLContinue = (continueInstr) ? continueInstr->getDebugLoc() : DebugLoc(); - CFGTraits::insertInstrEnd(dstBlk, AMDIL::ENDLOOP, passRep, DLContinue); + CFGTraits::insertInstrEnd(dstBlk, AMDGPU::ENDLOOP, passRep, DLContinue); // Loop breakOnRegs are check after the ENDLOOP: break the loop outside this // loop. for (typename std::set<RegiT>::const_iterator iter = loopLand->breakOnRegs.begin(), iterEnd = loopLand->breakOnRegs.end(); iter != iterEnd; ++iter) { - CFGTraits::insertCondBranchEnd(dstBlk, AMDIL::BREAK_LOGICALNZ_i32, passRep, + CFGTraits::insertCondBranchEnd(dstBlk, AMDGPU::BREAK_LOGICALNZ_i32, passRep, *iter); } @@ -1649,7 +1649,7 @@ void CFGStructurizer<PassT>::mergeLooplandBlock(BlockT *dstBlk, // loop. for (std::set<RegiT>::const_iterator iter = loopLand->contOnRegs.begin(), iterEnd = loopLand->contOnRegs.end(); iter != iterEnd; ++iter) { - CFGTraits::insertCondBranchEnd(dstBlk, AMDIL::CONTINUE_LOGICALNZ_i32, + CFGTraits::insertCondBranchEnd(dstBlk, AMDGPU::CONTINUE_LOGICALNZ_i32, passRep, *iter); } @@ -1713,8 +1713,8 @@ void CFGStructurizer<PassT>::mergeLoopbreakBlock(BlockT *exitingBlk, if (setReg != INVALIDREGNUM) { CFGTraits::insertAssignInstrBefore(branchInstrPos, passRep, setReg, 1); } - CFGTraits::insertInstrBefore(branchInstrPos, AMDIL::BREAK, passRep); - CFGTraits::insertInstrBefore(branchInstrPos, AMDIL::ENDIF, passRep); + CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::BREAK, passRep); + CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::ENDIF, passRep); } //if_logical //now branchInst can be erase safely @@ -1774,13 +1774,13 @@ void CFGStructurizer<PassT>::settleLoopcontBlock(BlockT *contingBlk, if (setReg != INVALIDREGNUM) { CFGTraits::insertAssignInstrBefore(branchInstrPos, passRep, setReg, 1); // insertEnd to ensure phi-moves, if exist, go before the continue-instr. - CFGTraits::insertInstrEnd(contingBlk, AMDIL::BREAK, passRep, DL); + CFGTraits::insertInstrEnd(contingBlk, AMDGPU::BREAK, passRep, DL); } else { // insertEnd to ensure phi-moves, if exist, go before the continue-instr. - CFGTraits::insertInstrEnd(contingBlk, AMDIL::CONTINUE, passRep, DL); + CFGTraits::insertInstrEnd(contingBlk, AMDGPU::CONTINUE, passRep, DL); } - CFGTraits::insertInstrEnd(contingBlk, AMDIL::ENDIF, passRep, DL); + CFGTraits::insertInstrEnd(contingBlk, AMDGPU::ENDIF, passRep, DL); } else { int branchOpcode = trueBranch == contBlk ? CFGTraits::getContinueNzeroOpcode(oldOpcode) @@ -1798,10 +1798,10 @@ void CFGStructurizer<PassT>::settleLoopcontBlock(BlockT *contingBlk, if (setReg != INVALIDREGNUM) { CFGTraits::insertAssignInstrBefore(contingBlk, passRep, setReg, 1); // insertEnd to ensure phi-moves, if exist, go before the continue-instr. - CFGTraits::insertInstrEnd(contingBlk, AMDIL::BREAK, passRep, CFGTraits::getLastDebugLocInBB(contingBlk)); + CFGTraits::insertInstrEnd(contingBlk, AMDGPU::BREAK, passRep, CFGTraits::getLastDebugLocInBB(contingBlk)); } else { // insertEnd to ensure phi-moves, if exist, go before the continue-instr. - CFGTraits::insertInstrEnd(contingBlk, AMDIL::CONTINUE, passRep, CFGTraits::getLastDebugLocInBB(contingBlk)); + CFGTraits::insertInstrEnd(contingBlk, AMDGPU::CONTINUE, passRep, CFGTraits::getLastDebugLocInBB(contingBlk)); } } //else @@ -1841,7 +1841,7 @@ CFGStructurizer<PassT>::relocateLoopcontBlock(LoopT *parentLoopRep, BlockT *newBlk = funcRep->CreateMachineBasicBlock(); funcRep->push_back(newBlk); //insert to function - CFGTraits::insertInstrEnd(newBlk, AMDIL::CONTINUE, passRep); + CFGTraits::insertInstrEnd(newBlk, AMDGPU::CONTINUE, passRep); SHOWNEWBLK(newBlk, "New continue block: "); for (typename std::set<BlockT*>::const_iterator iter = endBlkSet.begin(), @@ -1949,7 +1949,7 @@ CFGStructurizer<PassT>::addLoopEndbranchBlock(LoopT *loopRep, BuildMI(preBranchBlk, DL, tii->get(tii->getIEQOpcode()), condResReg) .addReg(endBranchReg).addReg(preValReg); - BuildMI(preBranchBlk, DL, tii->get(AMDIL::BRANCH_COND_i32)) + BuildMI(preBranchBlk, DL, tii->get(AMDGPU::BRANCH_COND_i32)) .addMBB(preExitBlk).addReg(condResReg); preBranchBlk->addSuccessor(preExitBlk); @@ -2166,7 +2166,7 @@ CFGStructurizer<PassT>::normalizeInfiniteLoopExit(LoopT* LoopRep) { funcRep->getRegInfo().createVirtualRegister(I32RC); CFGTraits::insertAssignInstrBefore(insertPos, passRep, immReg, 1); InstrT *newInstr = - CFGTraits::insertInstrBefore(insertPos, AMDIL::BRANCH_COND_i32, passRep); + CFGTraits::insertInstrBefore(insertPos, AMDGPU::BRANCH_COND_i32, passRep); MachineInstrBuilder(newInstr).addMBB(loopHeader).addReg(immReg, false); SHOWNEWINSTR(newInstr); @@ -2220,7 +2220,7 @@ void CFGStructurizer<PassT>::addDummyExitBlock(SmallVector<BlockT*, DEFAULT_VEC_SLOTS> &retBlks) { BlockT *dummyExitBlk = funcRep->CreateMachineBasicBlock(); funcRep->push_back(dummyExitBlk); //insert to function - CFGTraits::insertInstrEnd(dummyExitBlk, AMDIL::RETURN, passRep); + CFGTraits::insertInstrEnd(dummyExitBlk, AMDGPU::RETURN, passRep); for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::iterator iter = retBlks.begin(), @@ -2766,7 +2766,7 @@ struct CFGStructTraits<AMDILCFGStructurizer> static int getBreakNzeroOpcode(int oldOpcode) { switch(oldOpcode) { - ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::BREAK_LOGICALNZ); + ExpandCaseToAllScalarReturn(AMDGPU::BRANCH_COND, AMDGPU::BREAK_LOGICALNZ); default: assert(0 && "internal error"); }; @@ -2775,7 +2775,7 @@ struct CFGStructTraits<AMDILCFGStructurizer> static int getBreakZeroOpcode(int oldOpcode) { switch(oldOpcode) { - ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::BREAK_LOGICALZ); + ExpandCaseToAllScalarReturn(AMDGPU::BRANCH_COND, AMDGPU::BREAK_LOGICALZ); default: assert(0 && "internal error"); }; @@ -2784,7 +2784,7 @@ struct CFGStructTraits<AMDILCFGStructurizer> static int getBranchNzeroOpcode(int oldOpcode) { switch(oldOpcode) { - ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::IF_LOGICALNZ); + ExpandCaseToAllScalarReturn(AMDGPU::BRANCH_COND, AMDGPU::IF_LOGICALNZ); default: assert(0 && "internal error"); }; @@ -2793,7 +2793,7 @@ struct CFGStructTraits<AMDILCFGStructurizer> static int getBranchZeroOpcode(int oldOpcode) { switch(oldOpcode) { - ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::IF_LOGICALZ); + ExpandCaseToAllScalarReturn(AMDGPU::BRANCH_COND, AMDGPU::IF_LOGICALZ); default: assert(0 && "internal error"); }; @@ -2803,7 +2803,7 @@ struct CFGStructTraits<AMDILCFGStructurizer> static int getContinueNzeroOpcode(int oldOpcode) { switch(oldOpcode) { - ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::CONTINUE_LOGICALNZ); + ExpandCaseToAllScalarReturn(AMDGPU::BRANCH_COND, AMDGPU::CONTINUE_LOGICALNZ); default: assert(0 && "internal error"); }; @@ -2812,7 +2812,7 @@ struct CFGStructTraits<AMDILCFGStructurizer> static int getContinueZeroOpcode(int oldOpcode) { switch(oldOpcode) { - ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::CONTINUE_LOGICALZ); + ExpandCaseToAllScalarReturn(AMDGPU::BRANCH_COND, AMDGPU::CONTINUE_LOGICALZ); default: assert(0 && "internal error"); }; @@ -2844,7 +2844,7 @@ struct CFGStructTraits<AMDILCFGStructurizer> static bool isCondBranch(MachineInstr *instr) { switch (instr->getOpcode()) { - ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND); + ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND); break; default: return false; @@ -2854,7 +2854,7 @@ struct CFGStructTraits<AMDILCFGStructurizer> static bool isUncondBranch(MachineInstr *instr) { switch (instr->getOpcode()) { - case AMDIL::BRANCH: + case AMDGPU::BRANCH: break; default: return false; @@ -2911,7 +2911,7 @@ struct CFGStructTraits<AMDILCFGStructurizer> MachineBasicBlock::reverse_iterator iter = blk->rbegin(); if (iter != blk->rend()) { MachineInstr *instr = &(*iter); - if (instr->getOpcode() == AMDIL::RETURN) { + if (instr->getOpcode() == AMDGPU::RETURN) { return instr; } } @@ -2922,7 +2922,7 @@ struct CFGStructTraits<AMDILCFGStructurizer> MachineBasicBlock::reverse_iterator iter = blk->rbegin(); if (iter != blk->rend()) { MachineInstr *instr = &(*iter); - if (instr->getOpcode() == AMDIL::CONTINUE) { + if (instr->getOpcode() == AMDGPU::CONTINUE) { return instr; } } @@ -2932,7 +2932,7 @@ struct CFGStructTraits<AMDILCFGStructurizer> static MachineInstr *getLoopBreakInstr(MachineBasicBlock *blk) { for (MachineBasicBlock::iterator iter = blk->begin(); (iter != blk->end()); ++iter) { MachineInstr *instr = &(*iter); - if ((instr->getOpcode() == AMDIL::BREAK_LOGICALNZ_i32) || (instr->getOpcode() == AMDIL::BREAK_LOGICALZ_i32)) { + if ((instr->getOpcode() == AMDGPU::BREAK_LOGICALNZ_i32) || (instr->getOpcode() == AMDGPU::BREAK_LOGICALZ_i32)) { return instr; } } @@ -3173,8 +3173,8 @@ struct CFGStructTraits<AMDILCFGStructurizer> MachineBasicBlock::iterator iterEnd = entryBlk->end(); MachineBasicBlock::iterator iter = pre; while (iter != iterEnd) { - if (pre->getOpcode() == AMDIL::CONTINUE - && iter->getOpcode() == AMDIL::ENDLOOP) { + if (pre->getOpcode() == AMDGPU::CONTINUE + && iter->getOpcode() == AMDGPU::ENDLOOP) { contInstr.push_back(pre); } pre = iter; diff --git a/src/gallium/drivers/radeon/AMDILFormats.td b/src/gallium/drivers/radeon/AMDILFormats.td index 5418c645a53..5a71ded9b68 100644 --- a/src/gallium/drivers/radeon/AMDILFormats.td +++ b/src/gallium/drivers/radeon/AMDILFormats.td @@ -18,7 +18,7 @@ include "AMDILTokenDesc.td" class ILFormat<ILOpCode op, dag outs, dag ins, string asmstr, list<dag> pattern> : Instruction { - let Namespace = "AMDIL"; + let Namespace = "AMDGPU"; dag OutOperandList = outs; dag InOperandList = ins; ILOpCode operation = op; diff --git a/src/gallium/drivers/radeon/AMDILISelDAGToDAG.cpp b/src/gallium/drivers/radeon/AMDILISelDAGToDAG.cpp index c3212fb20cb..df0ac75c828 100644 --- a/src/gallium/drivers/radeon/AMDILISelDAGToDAG.cpp +++ b/src/gallium/drivers/radeon/AMDILISelDAGToDAG.cpp @@ -161,7 +161,7 @@ SDNode *AMDILDAGToDAGISel::Select(SDNode *N) { if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(N)) { unsigned int FI = FIN->getIndex(); EVT OpVT = N->getValueType(0); - unsigned int NewOpc = AMDIL::COPY; + unsigned int NewOpc = AMDGPU::COPY; SDValue TFI = CurDAG->getTargetFrameIndex(FI, MVT::i32); return CurDAG->SelectNodeTo(N, NewOpc, OpVT, TFI); } @@ -367,7 +367,7 @@ bool AMDILDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base, && isInt<16>(IMMOffset->getZExtValue())) { Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), CurDAG->getEntryNode().getDebugLoc(), - AMDIL::ZERO, MVT::i32); + AMDGPU::ZERO, MVT::i32); Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32); return true; } diff --git a/src/gallium/drivers/radeon/AMDILISelLowering.cpp b/src/gallium/drivers/radeon/AMDILISelLowering.cpp index 28380010a94..81951c15755 100644 --- a/src/gallium/drivers/radeon/AMDILISelLowering.cpp +++ b/src/gallium/drivers/radeon/AMDILISelLowering.cpp @@ -647,7 +647,7 @@ AMDILTargetLowering::LowerMemArgument( setOperationAction(ISD::Constant , MVT::i32 , Legal); setOperationAction(ISD::TRAP , MVT::Other , Legal); - setStackPointerRegisterToSaveRestore(AMDIL::SP); + setStackPointerRegisterToSaveRestore(AMDGPU::SP); setSchedulingPreference(Sched::RegPressure); setPow2DivIsCheap(false); setPrefLoopAlignment(16); @@ -1453,7 +1453,7 @@ AMDILTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, { SDValue Chain = Op.getOperand(0); SDValue Size = Op.getOperand(1); - unsigned int SPReg = AMDIL::SP; + unsigned int SPReg = AMDGPU::SP; DebugLoc DL = Op.getDebugLoc(); SDValue SP = DAG.getCopyFromReg(Chain, DL, diff --git a/src/gallium/drivers/radeon/AMDILInstrInfo.cpp b/src/gallium/drivers/radeon/AMDILInstrInfo.cpp index c7259d8b9f9..723d5a133a6 100644 --- a/src/gallium/drivers/radeon/AMDILInstrInfo.cpp +++ b/src/gallium/drivers/radeon/AMDILInstrInfo.cpp @@ -91,8 +91,8 @@ bool AMDILInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter, switch (iter->getOpcode()) { default: break; - ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND); - case AMDIL::BRANCH: + ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND); + case AMDGPU::BRANCH: return true; }; ++iter; @@ -113,7 +113,7 @@ bool AMDILInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, } else { MachineInstr *firstBranch = iter; if (!getNextBranchInstr(++iter, MBB)) { - if (firstBranch->getOpcode() == AMDIL::BRANCH) { + if (firstBranch->getOpcode() == AMDGPU::BRANCH) { TBB = firstBranch->getOperand(0).getMBB(); firstBranch->eraseFromParent(); retVal = false; @@ -129,7 +129,7 @@ bool AMDILInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, } else { MachineInstr *secondBranch = iter; if (!getNextBranchInstr(++iter, MBB)) { - if (secondBranch->getOpcode() == AMDIL::BRANCH) { + if (secondBranch->getOpcode() == AMDGPU::BRANCH) { TBB = firstBranch->getOperand(0).getMBB(); Cond.push_back(firstBranch->getOperand(1)); FBB = secondBranch->getOperand(0).getMBB(); @@ -154,8 +154,8 @@ unsigned int AMDILInstrInfo::getBranchInstr(const MachineOperand &op) const { switch (MI->getDesc().OpInfo->RegClass) { default: // FIXME: fallthrough?? - case AMDIL::GPRI32RegClassID: return AMDIL::BRANCH_COND_i32; - case AMDIL::GPRF32RegClassID: return AMDIL::BRANCH_COND_f32; + case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32; + case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32; }; } @@ -172,7 +172,7 @@ AMDILInstrInfo::InsertBranch(MachineBasicBlock &MBB, } if (FBB == 0) { if (Cond.empty()) { - BuildMI(&MBB, DL, get(AMDIL::BRANCH)).addMBB(TBB); + BuildMI(&MBB, DL, get(AMDGPU::BRANCH)).addMBB(TBB); } else { BuildMI(&MBB, DL, get(getBranchInstr(Cond[0]))) .addMBB(TBB).addReg(Cond[0].getReg()); @@ -181,7 +181,7 @@ AMDILInstrInfo::InsertBranch(MachineBasicBlock &MBB, } else { BuildMI(&MBB, DL, get(getBranchInstr(Cond[0]))) .addMBB(TBB).addReg(Cond[0].getReg()); - BuildMI(&MBB, DL, get(AMDIL::BRANCH)).addMBB(FBB); + BuildMI(&MBB, DL, get(AMDGPU::BRANCH)).addMBB(FBB); } assert(0 && "Inserting two branches not supported"); return 0; @@ -196,8 +196,8 @@ unsigned int AMDILInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { switch (I->getOpcode()) { default: return 0; - ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND); - case AMDIL::BRANCH: + ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND); + case AMDGPU::BRANCH: I->eraseFromParent(); break; } @@ -211,7 +211,7 @@ unsigned int AMDILInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { // FIXME: only one case?? default: return 1; - ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND); + ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND); I->eraseFromParent(); break; } @@ -224,9 +224,9 @@ MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) { return MBB->end(); } while (--tmp) { - if (tmp->getOpcode() == AMDIL::ENDLOOP - || tmp->getOpcode() == AMDIL::ENDIF - || tmp->getOpcode() == AMDIL::ELSE) { + if (tmp->getOpcode() == AMDGPU::ENDLOOP + || tmp->getOpcode() == AMDGPU::ENDIF + || tmp->getOpcode() == AMDGPU::ELSE) { if (tmp == MBB->begin()) { return tmp; } else { @@ -253,11 +253,11 @@ AMDILInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, DebugLoc DL; switch (RC->getID()) { - case AMDIL::GPRF32RegClassID: - Opc = AMDIL::PRIVATESTORE_f32; + case AMDGPU::GPRF32RegClassID: + Opc = AMDGPU::PRIVATESTORE_f32; break; - case AMDIL::GPRI32RegClassID: - Opc = AMDIL::PRIVATESTORE_i32; + case AMDGPU::GPRI32RegClassID: + Opc = AMDGPU::PRIVATESTORE_i32; break; } if (MI != MBB.end()) DL = MI->getDebugLoc(); @@ -288,11 +288,11 @@ AMDILInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, MachineFrameInfo &MFI = *MF.getFrameInfo(); DebugLoc DL; switch (RC->getID()) { - case AMDIL::GPRF32RegClassID: - Opc = AMDIL::PRIVATELOAD_f32; + case AMDGPU::GPRF32RegClassID: + Opc = AMDGPU::PRIVATELOAD_f32; break; - case AMDIL::GPRI32RegClassID: - Opc = AMDIL::PRIVATELOAD_i32; + case AMDGPU::GPRI32RegClassID: + Opc = AMDGPU::PRIVATELOAD_i32; break; } diff --git a/src/gallium/drivers/radeon/AMDILRegisterInfo.cpp b/src/gallium/drivers/radeon/AMDILRegisterInfo.cpp index 51f6135b2fc..989ccd9faf7 100644 --- a/src/gallium/drivers/radeon/AMDILRegisterInfo.cpp +++ b/src/gallium/drivers/radeon/AMDILRegisterInfo.cpp @@ -128,13 +128,13 @@ AMDILRegisterInfo::processFunctionBeforeFrameFinalized( unsigned int AMDILRegisterInfo::getRARegister() const { - return AMDIL::RA; + return AMDGPU::RA; } unsigned int AMDILRegisterInfo::getFrameRegister(const MachineFunction &MF) const { - return AMDIL::FP; + return AMDGPU::FP; } unsigned int diff --git a/src/gallium/drivers/radeon/AMDILRegisterInfo.h b/src/gallium/drivers/radeon/AMDILRegisterInfo.h index 4bfb9a742bd..892350b9e9e 100644 --- a/src/gallium/drivers/radeon/AMDILRegisterInfo.h +++ b/src/gallium/drivers/radeon/AMDILRegisterInfo.h @@ -83,7 +83,7 @@ namespace llvm virtual const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT) const { - return AMDIL::GPRI32RegisterClass; + return AMDGPU::GPRI32RegisterClass; } private: mutable int64_t baseOffset; diff --git a/src/gallium/drivers/radeon/AMDILRegisterInfo.td b/src/gallium/drivers/radeon/AMDILRegisterInfo.td index 94f2fc56f65..42235ff37a1 100644 --- a/src/gallium/drivers/radeon/AMDILRegisterInfo.td +++ b/src/gallium/drivers/radeon/AMDILRegisterInfo.td @@ -14,7 +14,7 @@ class AMDILReg<bits<16> num, string n> : Register<n> { field bits<16> Value; let Value = num; - let Namespace = "AMDIL"; + let Namespace = "AMDGPU"; } // We will start with 8 registers for each class before expanding to more @@ -84,7 +84,7 @@ def R1001: AMDILReg<1001, "r1001">, DwarfRegNum<[1001]>; def MEM : AMDILReg<999, "mem">, DwarfRegNum<[999]>; def RA : AMDILReg<998, "r998">, DwarfRegNum<[998]>; def FP : AMDILReg<997, "r997">, DwarfRegNum<[997]>; -def GPRI16 : RegisterClass<"AMDIL", [i16], 16, +def GPRI16 : RegisterClass<"AMDGPU", [i16], 16, (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)> { let AltOrders = [(add (sequence "R%u", 1, 20))]; @@ -92,7 +92,7 @@ def GPRI16 : RegisterClass<"AMDIL", [i16], 16, return 1; }]; } -def GPRI32 : RegisterClass<"AMDIL", [i32], 32, +def GPRI32 : RegisterClass<"AMDGPU", [i32], 32, (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)> { let AltOrders = [(add (sequence "R%u", 1, 20))]; @@ -100,7 +100,7 @@ def GPRI32 : RegisterClass<"AMDIL", [i32], 32, return 1; }]; } -def GPRF32 : RegisterClass<"AMDIL", [f32], 32, +def GPRF32 : RegisterClass<"AMDGPU", [f32], 32, (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)> { let AltOrders = [(add (sequence "R%u", 1, 20))]; diff --git a/src/gallium/drivers/radeon/AMDILUtilityFunctions.h b/src/gallium/drivers/radeon/AMDILUtilityFunctions.h index 419da393b20..e6666f97705 100644 --- a/src/gallium/drivers/radeon/AMDILUtilityFunctions.h +++ b/src/gallium/drivers/radeon/AMDILUtilityFunctions.h @@ -65,8 +65,8 @@ ExpandCaseToIntReturn(Instr, Return) // These macros expand to common groupings of RegClass ID's #define ExpandCaseTo1CompRegID \ -case AMDIL::GPRI32RegClassID: \ -case AMDIL::GPRF32RegClassID: +case AMDGPU::GPRI32RegClassID: \ +case AMDGPU::GPRF32RegClassID: #define ExpandCaseTo32BitType(Instr) \ case Instr##_i32: \ diff --git a/src/gallium/drivers/radeon/R600CodeEmitter.cpp b/src/gallium/drivers/radeon/R600CodeEmitter.cpp index 9db6ba86232..4c7962bcee4 100644 --- a/src/gallium/drivers/radeon/R600CodeEmitter.cpp +++ b/src/gallium/drivers/radeon/R600CodeEmitter.cpp @@ -181,13 +181,13 @@ bool R600CodeEmitter::runOnMachineFunction(MachineFunction &MF) { isReduction = false; isVector = false; isCube = false; - } else if (MI.getOpcode() == AMDIL::RETURN || - MI.getOpcode() == AMDIL::BUNDLE || - MI.getOpcode() == AMDIL::KILL) { + } else if (MI.getOpcode() == AMDGPU::RETURN || + MI.getOpcode() == AMDGPU::BUNDLE || + MI.getOpcode() == AMDGPU::KILL) { continue; } else { switch(MI.getOpcode()) { - case AMDIL::RAT_WRITE_CACHELESS_eg: + case AMDGPU::RAT_WRITE_CACHELESS_eg: { uint64_t inst = getBinaryCodeForInstr(MI); // Set End Of Program bit @@ -196,16 +196,16 @@ bool R600CodeEmitter::runOnMachineFunction(MachineFunction &MF) { // set in a prior pass. MachineBasicBlock::iterator NextI = llvm::next(I); MachineInstr &NextMI = *NextI; - if (NextMI.getOpcode() == AMDIL::RETURN) { + if (NextMI.getOpcode() == AMDGPU::RETURN) { inst |= (((uint64_t)1) << 53); } emitByte(INSTR_NATIVE); emit(inst); break; } - case AMDIL::VTX_READ_PARAM_eg: - case AMDIL::VTX_READ_GLOBAL_eg: - case AMDIL::VTX_READ_GLOBAL_128_eg: + case AMDGPU::VTX_READ_PARAM_eg: + case AMDGPU::VTX_READ_GLOBAL_eg: + case AMDGPU::VTX_READ_GLOBAL_128_eg: { uint64_t InstWord01 = getBinaryCodeForInstr(MI); uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset @@ -281,7 +281,7 @@ void R600CodeEmitter::emitSrc(const MachineOperand & MO, int chan_override) if (MO.isReg()) { unsigned reg = MO.getReg(); emitTwoBytes(getHWReg(reg)); - if (reg == AMDIL::ALU_LITERAL_X) { + if (reg == AMDGPU::ALU_LITERAL_X) { const MachineInstr * parent = MO.getParent(); unsigned immOpIndex = parent->getNumExplicitOperands() - 1; MachineOperand immOp = parent->getOperand(immOpIndex); @@ -312,7 +312,7 @@ void R600CodeEmitter::emitSrc(const MachineOperand & MO, int chan_override) if ((!(MO.getTargetFlags() & MO_FLAG_ABS)) && (MO.getTargetFlags() & MO_FLAG_NEG || (MO.isReg() && - (MO.getReg() == AMDIL::NEG_ONE || MO.getReg() == AMDIL::NEG_HALF)))){ + (MO.getReg() == AMDGPU::NEG_ONE || MO.getReg() == AMDGPU::NEG_HALF)))){ emitByte(1); } else { emitByte(0); @@ -413,7 +413,7 @@ void R600CodeEmitter::emitTexInstr(MachineInstr &MI) { unsigned opcode = MI.getOpcode(); - bool hasOffsets = (opcode == AMDIL::TEX_LD); + bool hasOffsets = (opcode == AMDGPU::TEX_LD); unsigned op_offset = hasOffsets ? 3 : 0; int64_t sampler = MI.getOperand(op_offset+2).getImm(); int64_t textureType = MI.getOperand(op_offset+3).getImm(); @@ -460,7 +460,7 @@ void R600CodeEmitter::emitTexInstr(MachineInstr &MI) if (textureType == TEXTURE_1D_ARRAY || textureType == TEXTURE_SHADOW1D_ARRAY) { - if (opcode == AMDIL::TEX_SAMPLE_C_L || opcode == AMDIL::TEX_SAMPLE_C_LB) { + if (opcode == AMDGPU::TEX_SAMPLE_C_L || opcode == AMDGPU::TEX_SAMPLE_C_LB) { coordType[ELEMENT_Y] = 0; } else { coordType[ELEMENT_Z] = 0; @@ -490,8 +490,8 @@ void R600CodeEmitter::emitTexInstr(MachineInstr &MI) || textureType == TEXTURE_SHADOW2D || textureType == TEXTURE_SHADOWRECT || textureType == TEXTURE_SHADOW1D_ARRAY) - && opcode != AMDIL::TEX_SAMPLE_C_L - && opcode != AMDIL::TEX_SAMPLE_C_LB) { + && opcode != AMDGPU::TEX_SAMPLE_C_L + && opcode != AMDGPU::TEX_SAMPLE_C_LB) { srcSelect[ELEMENT_W] = ELEMENT_Z; } @@ -517,37 +517,37 @@ void R600CodeEmitter::emitFCInstr(MachineInstr &MI) // Emit FC Instruction enum FCInstr instr; switch (MI.getOpcode()) { - case AMDIL::BREAK_LOGICALZ_f32: + case AMDGPU::BREAK_LOGICALZ_f32: instr = FC_BREAK; break; - case AMDIL::BREAK_LOGICALNZ_f32: - case AMDIL::BREAK_LOGICALNZ_i32: + case AMDGPU::BREAK_LOGICALNZ_f32: + case AMDGPU::BREAK_LOGICALNZ_i32: instr = FC_BREAK_NZ_INT; break; - case AMDIL::BREAK_LOGICALZ_i32: + case AMDGPU::BREAK_LOGICALZ_i32: instr = FC_BREAK_Z_INT; break; - case AMDIL::CONTINUE_LOGICALNZ_f32: - case AMDIL::CONTINUE_LOGICALNZ_i32: + case AMDGPU::CONTINUE_LOGICALNZ_f32: + case AMDGPU::CONTINUE_LOGICALNZ_i32: instr = FC_CONTINUE; break; - case AMDIL::IF_LOGICALNZ_f32: - case AMDIL::IF_LOGICALNZ_i32: + case AMDGPU::IF_LOGICALNZ_f32: + case AMDGPU::IF_LOGICALNZ_i32: instr = FC_IF; break; - case AMDIL::IF_LOGICALZ_f32: + case AMDGPU::IF_LOGICALZ_f32: abort(); break; - case AMDIL::ELSE: + case AMDGPU::ELSE: instr = FC_ELSE; break; - case AMDIL::ENDIF: + case AMDGPU::ENDIF: instr = FC_ENDIF; break; - case AMDIL::ENDLOOP: + case AMDGPU::ENDLOOP: instr = FC_ENDLOOP; break; - case AMDIL::WHILELOOP: + case AMDGPU::WHILELOOP: instr = FC_BGNLOOP; break; default: @@ -593,7 +593,7 @@ unsigned R600CodeEmitter::getHWReg(unsigned regNo) const unsigned hwReg; hwReg = TRI->getHWRegIndex(regNo); - if (AMDIL::R600_CReg32RegClass.contains(regNo)) { + if (AMDGPU::R600_CReg32RegClass.contains(regNo)) { hwReg += 512; } return hwReg; diff --git a/src/gallium/drivers/radeon/R600GenRegisterInfo.pl b/src/gallium/drivers/radeon/R600GenRegisterInfo.pl index 9e7cf61428b..6bbe21c5f0a 100644 --- a/src/gallium/drivers/radeon/R600GenRegisterInfo.pl +++ b/src/gallium/drivers/radeon/R600GenRegisterInfo.pl @@ -25,11 +25,11 @@ my $TREG_MAX = TEMP_REG_COUNT - 1; print <<STRING; class R600Reg <string name> : Register<name> { - let Namespace = "AMDIL"; + let Namespace = "AMDGPU"; } class R600Reg_128<string n, list<Register> subregs> : RegisterWithSubRegs<n, subregs> { - let Namespace = "AMDIL"; + let Namespace = "AMDGPU"; let SubRegIndices = [sel_x, sel_y, sel_z, sel_w]; } @@ -70,21 +70,21 @@ def NEG_ONE : R600Reg<"-1.0">; def PV_X : R600Reg<"pv.x">; def ALU_LITERAL_X : R600Reg<"literal.x">; -def R600_CReg32 : RegisterClass <"AMDIL", [f32, i32], 32, (add +def R600_CReg32 : RegisterClass <"AMDGPU", [f32, i32], 32, (add $creg_list)>; -def R600_TReg32 : RegisterClass <"AMDIL", [f32, i32], 32, (add +def R600_TReg32 : RegisterClass <"AMDGPU", [f32, i32], 32, (add $treg_string)>; -def R600_TReg32_X : RegisterClass <"AMDIL", [f32, i32], 32, (add +def R600_TReg32_X : RegisterClass <"AMDGPU", [f32, i32], 32, (add $treg_x_string)>; -def R600_Reg32 : RegisterClass <"AMDIL", [f32, i32], 32, (add +def R600_Reg32 : RegisterClass <"AMDGPU", [f32, i32], 32, (add R600_TReg32, R600_CReg32, ZERO, HALF, ONE, ONE_INT, PV_X, ALU_LITERAL_X, NEG_ONE, NEG_HALF)>; -def R600_Reg128 : RegisterClass<"AMDIL", [v4f32, v4i32], 128, (add +def R600_Reg128 : RegisterClass<"AMDGPU", [v4f32, v4i32], 128, (add $t128_string)> { let SubRegClasses = [(R600_TReg32 sel_x, sel_y, sel_z, sel_w)]; @@ -122,7 +122,7 @@ unsigned R600RegisterInfo::getHWRegIndexGen(unsigned reg) const STRING foreach my $key (keys(%index_map)) { foreach my $reg (@{$index_map{$key}}) { - print OUTFILE " case AMDIL::$reg:\n"; + print OUTFILE " case AMDGPU::$reg:\n"; } print OUTFILE " return $key;\n\n"; } @@ -139,7 +139,7 @@ STRING foreach my $key (keys(%chan_map)) { foreach my $reg (@{$chan_map{$key}}) { - print OUTFILE " case AMDIL::$reg:\n"; + print OUTFILE " case AMDGPU::$reg:\n"; } my $val; if ($key eq 'X') { diff --git a/src/gallium/drivers/radeon/R600ISelLowering.cpp b/src/gallium/drivers/radeon/R600ISelLowering.cpp index 5694c0bc9a2..00ab7519dd6 100644 --- a/src/gallium/drivers/radeon/R600ISelLowering.cpp +++ b/src/gallium/drivers/radeon/R600ISelLowering.cpp @@ -25,10 +25,10 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) : TII(static_cast<const R600InstrInfo*>(TM.getInstrInfo())) { setOperationAction(ISD::MUL, MVT::i64, Expand); - addRegisterClass(MVT::v4f32, &AMDIL::R600_Reg128RegClass); - addRegisterClass(MVT::f32, &AMDIL::R600_Reg32RegClass); - addRegisterClass(MVT::v4i32, &AMDIL::R600_Reg128RegClass); - addRegisterClass(MVT::i32, &AMDIL::R600_Reg32RegClass); + addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass); + addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass); + addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass); + addRegisterClass(MVT::i32, &AMDGPU::R600_Reg32RegClass); computeRegisterProperties(); setOperationAction(ISD::FSUB, MVT::f32, Expand); @@ -47,92 +47,92 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( switch (MI->getOpcode()) { default: return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); - case AMDIL::TGID_X: - addLiveIn(MI, MF, MRI, TII, AMDIL::T1_X); + case AMDGPU::TGID_X: + addLiveIn(MI, MF, MRI, TII, AMDGPU::T1_X); break; - case AMDIL::TGID_Y: - addLiveIn(MI, MF, MRI, TII, AMDIL::T1_Y); + case AMDGPU::TGID_Y: + addLiveIn(MI, MF, MRI, TII, AMDGPU::T1_Y); break; - case AMDIL::TGID_Z: - addLiveIn(MI, MF, MRI, TII, AMDIL::T1_Z); + case AMDGPU::TGID_Z: + addLiveIn(MI, MF, MRI, TII, AMDGPU::T1_Z); break; - case AMDIL::TIDIG_X: - addLiveIn(MI, MF, MRI, TII, AMDIL::T0_X); + case AMDGPU::TIDIG_X: + addLiveIn(MI, MF, MRI, TII, AMDGPU::T0_X); break; - case AMDIL::TIDIG_Y: - addLiveIn(MI, MF, MRI, TII, AMDIL::T0_Y); + case AMDGPU::TIDIG_Y: + addLiveIn(MI, MF, MRI, TII, AMDGPU::T0_Y); break; - case AMDIL::TIDIG_Z: - addLiveIn(MI, MF, MRI, TII, AMDIL::T0_Z); + case AMDGPU::TIDIG_Z: + addLiveIn(MI, MF, MRI, TII, AMDGPU::T0_Z); break; - case AMDIL::NGROUPS_X: + case AMDGPU::NGROUPS_X: lowerImplicitParameter(MI, *BB, MRI, 0); break; - case AMDIL::NGROUPS_Y: + case AMDGPU::NGROUPS_Y: lowerImplicitParameter(MI, *BB, MRI, 1); break; - case AMDIL::NGROUPS_Z: + case AMDGPU::NGROUPS_Z: lowerImplicitParameter(MI, *BB, MRI, 2); break; - case AMDIL::GLOBAL_SIZE_X: + case AMDGPU::GLOBAL_SIZE_X: lowerImplicitParameter(MI, *BB, MRI, 3); break; - case AMDIL::GLOBAL_SIZE_Y: + case AMDGPU::GLOBAL_SIZE_Y: lowerImplicitParameter(MI, *BB, MRI, 4); break; - case AMDIL::GLOBAL_SIZE_Z: + case AMDGPU::GLOBAL_SIZE_Z: lowerImplicitParameter(MI, *BB, MRI, 5); break; - case AMDIL::LOCAL_SIZE_X: + case AMDGPU::LOCAL_SIZE_X: lowerImplicitParameter(MI, *BB, MRI, 6); break; - case AMDIL::LOCAL_SIZE_Y: + case AMDGPU::LOCAL_SIZE_Y: lowerImplicitParameter(MI, *BB, MRI, 7); break; - case AMDIL::LOCAL_SIZE_Z: + case AMDGPU::LOCAL_SIZE_Z: lowerImplicitParameter(MI, *BB, MRI, 8); break; - case AMDIL::CLAMP_R600: + case AMDGPU::CLAMP_R600: MI->getOperand(0).addTargetFlag(MO_FLAG_CLAMP); - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::MOV)) + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::MOV)) .addOperand(MI->getOperand(0)) .addOperand(MI->getOperand(1)); break; - case AMDIL::FABS_R600: + case AMDGPU::FABS_R600: MI->getOperand(1).addTargetFlag(MO_FLAG_ABS); - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::MOV)) + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::MOV)) .addOperand(MI->getOperand(0)) .addOperand(MI->getOperand(1)); break; - case AMDIL::FNEG_R600: + case AMDGPU::FNEG_R600: MI->getOperand(1).addTargetFlag(MO_FLAG_NEG); - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::MOV)) + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::MOV)) .addOperand(MI->getOperand(0)) .addOperand(MI->getOperand(1)); break; - case AMDIL::R600_LOAD_CONST: + case AMDGPU::R600_LOAD_CONST: { int64_t RegIndex = MI->getOperand(1).getImm(); - unsigned ConstantReg = AMDIL::R600_CReg32RegClass.getRegister(RegIndex); - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::COPY)) + unsigned ConstantReg = AMDGPU::R600_CReg32RegClass.getRegister(RegIndex); + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::COPY)) .addOperand(MI->getOperand(0)) .addReg(ConstantReg); break; } - case AMDIL::LOAD_INPUT: + case AMDGPU::LOAD_INPUT: { int64_t RegIndex = MI->getOperand(1).getImm(); addLiveIn(MI, MF, MRI, TII, - AMDIL::R600_TReg32RegClass.getRegister(RegIndex)); + AMDGPU::R600_TReg32RegClass.getRegister(RegIndex)); break; } - case AMDIL::MASK_WRITE: + case AMDGPU::MASK_WRITE: { unsigned maskedRegister = MI->getOperand(0).getReg(); assert(TargetRegisterInfo::isVirtualRegister(maskedRegister)); @@ -143,21 +143,21 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( return BB; } - case AMDIL::RAT_WRITE_CACHELESS_eg: + case AMDGPU::RAT_WRITE_CACHELESS_eg: { // Convert to DWORD address unsigned NewAddr = MRI.createVirtualRegister( - AMDIL::R600_TReg32_XRegisterClass); + AMDGPU::R600_TReg32_XRegisterClass); unsigned ShiftValue = MRI.createVirtualRegister( - AMDIL::R600_TReg32RegisterClass); + AMDGPU::R600_TReg32RegisterClass); // XXX In theory, we should be able to pass ShiftValue directly to // the LSHR_eg instruction as an inline literal, but I tried doing it // this way and it didn't produce the correct results. - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::MOV), ShiftValue) - .addReg(AMDIL::ALU_LITERAL_X) + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::MOV), ShiftValue) + .addReg(AMDGPU::ALU_LITERAL_X) .addImm(2); - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::LSHR_eg), NewAddr) + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::LSHR_eg), NewAddr) .addOperand(MI->getOperand(1)) .addReg(ShiftValue); BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode())) @@ -166,12 +166,12 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( break; } - case AMDIL::STORE_OUTPUT: + case AMDGPU::STORE_OUTPUT: { int64_t OutputIndex = MI->getOperand(1).getImm(); - unsigned OutputReg = AMDIL::R600_TReg32RegClass.getRegister(OutputIndex); + unsigned OutputReg = AMDGPU::R600_TReg32RegClass.getRegister(OutputIndex); - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::COPY), OutputReg) + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::COPY), OutputReg) .addOperand(MI->getOperand(0)); if (!MRI.isLiveOut(OutputReg)) { @@ -180,30 +180,30 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( break; } - case AMDIL::RESERVE_REG: + case AMDGPU::RESERVE_REG: { R600MachineFunctionInfo * MFI = MF->getInfo<R600MachineFunctionInfo>(); int64_t ReservedIndex = MI->getOperand(0).getImm(); unsigned ReservedReg = - AMDIL::R600_TReg32RegClass.getRegister(ReservedIndex); + AMDGPU::R600_TReg32RegClass.getRegister(ReservedIndex); MFI->ReservedRegs.push_back(ReservedReg); break; } - case AMDIL::TXD: + case AMDGPU::TXD: { - unsigned t0 = MRI.createVirtualRegister(AMDIL::R600_Reg128RegisterClass); - unsigned t1 = MRI.createVirtualRegister(AMDIL::R600_Reg128RegisterClass); + unsigned t0 = MRI.createVirtualRegister(AMDGPU::R600_Reg128RegisterClass); + unsigned t1 = MRI.createVirtualRegister(AMDGPU::R600_Reg128RegisterClass); - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SET_GRADIENTS_H), t0) + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), t0) .addOperand(MI->getOperand(3)) .addOperand(MI->getOperand(4)) .addOperand(MI->getOperand(5)); - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SET_GRADIENTS_V), t1) + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), t1) .addOperand(MI->getOperand(2)) .addOperand(MI->getOperand(4)) .addOperand(MI->getOperand(5)); - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SAMPLE_G)) + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_G)) .addOperand(MI->getOperand(0)) .addOperand(MI->getOperand(1)) .addOperand(MI->getOperand(4)) @@ -212,20 +212,20 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( .addReg(t1, RegState::Implicit); break; } - case AMDIL::TXD_SHADOW: + case AMDGPU::TXD_SHADOW: { - unsigned t0 = MRI.createVirtualRegister(AMDIL::R600_Reg128RegisterClass); - unsigned t1 = MRI.createVirtualRegister(AMDIL::R600_Reg128RegisterClass); + unsigned t0 = MRI.createVirtualRegister(AMDGPU::R600_Reg128RegisterClass); + unsigned t1 = MRI.createVirtualRegister(AMDGPU::R600_Reg128RegisterClass); - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SET_GRADIENTS_H), t0) + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), t0) .addOperand(MI->getOperand(3)) .addOperand(MI->getOperand(4)) .addOperand(MI->getOperand(5)); - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SET_GRADIENTS_V), t1) + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), t1) .addOperand(MI->getOperand(2)) .addOperand(MI->getOperand(4)) .addOperand(MI->getOperand(5)); - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SAMPLE_C_G)) + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_C_G)) .addOperand(MI->getOperand(0)) .addOperand(MI->getOperand(1)) .addOperand(MI->getOperand(4)) @@ -246,14 +246,14 @@ void R600TargetLowering::lowerImplicitParameter(MachineInstr *MI, MachineBasicBl MachineRegisterInfo & MRI, unsigned dword_offset) const { MachineBasicBlock::iterator I = *MI; - unsigned PtrReg = MRI.createVirtualRegister(&AMDIL::R600_TReg32_XRegClass); - MRI.setRegClass(MI->getOperand(0).getReg(), &AMDIL::R600_TReg32_XRegClass); + unsigned PtrReg = MRI.createVirtualRegister(&AMDGPU::R600_TReg32_XRegClass); + MRI.setRegClass(MI->getOperand(0).getReg(), &AMDGPU::R600_TReg32_XRegClass); - BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::MOV), PtrReg) - .addReg(AMDIL::ALU_LITERAL_X) + BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::MOV), PtrReg) + .addReg(AMDGPU::ALU_LITERAL_X) .addImm(dword_offset * 4); - BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::VTX_READ_PARAM_eg)) + BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::VTX_READ_PARAM_eg)) .addOperand(MI->getOperand(0)) .addReg(PtrReg) .addImm(0); diff --git a/src/gallium/drivers/radeon/R600InstrInfo.cpp b/src/gallium/drivers/radeon/R600InstrInfo.cpp index e11c0bd60f7..77679abbfc1 100644 --- a/src/gallium/drivers/radeon/R600InstrInfo.cpp +++ b/src/gallium/drivers/radeon/R600InstrInfo.cpp @@ -49,12 +49,13 @@ R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB, bool KillSrc) const { - unsigned subRegMap[4] = {AMDIL::sel_x, AMDIL::sel_y, AMDIL::sel_z, AMDIL::sel_w}; + unsigned subRegMap[4] = {AMDGPU::sel_x, AMDGPU::sel_y, + AMDGPU::sel_z, AMDGPU::sel_w}; - if (AMDIL::R600_Reg128RegClass.contains(DestReg) - && AMDIL::R600_Reg128RegClass.contains(SrcReg)) { + if (AMDGPU::R600_Reg128RegClass.contains(DestReg) + && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) { for (unsigned i = 0; i < 4; i++) { - BuildMI(MBB, MI, DL, get(AMDIL::MOV)) + BuildMI(MBB, MI, DL, get(AMDGPU::MOV)) .addReg(RI.getSubReg(DestReg, subRegMap[i]), RegState::Define) .addReg(RI.getSubReg(SrcReg, subRegMap[i])) .addReg(DestReg, RegState::Define | RegState::Implicit); @@ -62,10 +63,10 @@ R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB, } else { /* We can't copy vec4 registers */ - assert(!AMDIL::R600_Reg128RegClass.contains(DestReg) - && !AMDIL::R600_Reg128RegClass.contains(SrcReg)); + assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg) + && !AMDGPU::R600_Reg128RegClass.contains(SrcReg)); - BuildMI(MBB, MI, DL, get(AMDIL::MOV), DestReg) + BuildMI(MBB, MI, DL, get(AMDGPU::MOV), DestReg) .addReg(SrcReg, getKillRegState(KillSrc)); } } @@ -73,9 +74,9 @@ R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg, int64_t Imm) const { - MachineInstr * MI = MF->CreateMachineInstr(get(AMDIL::MOV), DebugLoc()); + MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc()); MachineInstrBuilder(MI).addReg(DstReg, RegState::Define); - MachineInstrBuilder(MI).addReg(AMDIL::ALU_LITERAL_X); + MachineInstrBuilder(MI).addReg(AMDGPU::ALU_LITERAL_X); MachineInstrBuilder(MI).addImm(Imm); return MI; @@ -83,16 +84,16 @@ MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF, unsigned R600InstrInfo::getIEQOpcode() const { - return AMDIL::SETE_INT; + return AMDGPU::SETE_INT; } bool R600InstrInfo::isMov(unsigned Opcode) const { switch(Opcode) { default: return false; - case AMDIL::MOV: - case AMDIL::MOV_IMM_F32: - case AMDIL::MOV_IMM_I32: + case AMDGPU::MOV: + case AMDGPU::MOV_IMM_F32: + case AMDGPU::MOV_IMM_I32: return true; } } diff --git a/src/gallium/drivers/radeon/R600Instructions.td b/src/gallium/drivers/radeon/R600Instructions.td index da5f364839f..a7d29fe5488 100644 --- a/src/gallium/drivers/radeon/R600Instructions.td +++ b/src/gallium/drivers/radeon/R600Instructions.td @@ -23,7 +23,7 @@ class InstR600 <bits<32> inst, dag outs, dag ins, string asm, list<dag> pattern, bit isVector = 0; let Inst = inst; - let Namespace = "AMDIL"; + let Namespace = "AMDGPU"; let OutOperandList = outs; let InOperandList = ins; let AsmString = asm; @@ -43,7 +43,7 @@ class InstR600ISA <dag outs, dag ins, string asm, list<dag> pattern> : { field bits<64> Inst; - let Namespace = "AMDIL"; + let Namespace = "AMDGPU"; } def MEMxi : Operand<iPTR> { diff --git a/src/gallium/drivers/radeon/R600RegisterInfo.cpp b/src/gallium/drivers/radeon/R600RegisterInfo.cpp index 19df453b836..86bc169a10c 100644 --- a/src/gallium/drivers/radeon/R600RegisterInfo.cpp +++ b/src/gallium/drivers/radeon/R600RegisterInfo.cpp @@ -29,17 +29,17 @@ BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const BitVector Reserved(getNumRegs()); const R600MachineFunctionInfo * MFI = MF.getInfo<R600MachineFunctionInfo>(); - Reserved.set(AMDIL::ZERO); - Reserved.set(AMDIL::HALF); - Reserved.set(AMDIL::ONE); - Reserved.set(AMDIL::ONE_INT); - Reserved.set(AMDIL::NEG_HALF); - Reserved.set(AMDIL::NEG_ONE); - Reserved.set(AMDIL::PV_X); - Reserved.set(AMDIL::ALU_LITERAL_X); + Reserved.set(AMDGPU::ZERO); + Reserved.set(AMDGPU::HALF); + Reserved.set(AMDGPU::ONE); + Reserved.set(AMDGPU::ONE_INT); + Reserved.set(AMDGPU::NEG_HALF); + Reserved.set(AMDGPU::NEG_ONE); + Reserved.set(AMDGPU::PV_X); + Reserved.set(AMDGPU::ALU_LITERAL_X); - for (TargetRegisterClass::iterator I = AMDIL::R600_CReg32RegClass.begin(), - E = AMDIL::R600_CReg32RegClass.end(); I != E; ++I) { + for (TargetRegisterClass::iterator I = AMDGPU::R600_CReg32RegClass.begin(), + E = AMDGPU::R600_CReg32RegClass.end(); I != E; ++I) { Reserved.set(*I); } @@ -55,9 +55,9 @@ const TargetRegisterClass * R600RegisterInfo::getISARegClass(const TargetRegisterClass * rc) const { switch (rc->getID()) { - case AMDIL::GPRF32RegClassID: - case AMDIL::GPRI32RegClassID: - return &AMDIL::R600_Reg32RegClass; + case AMDGPU::GPRF32RegClassID: + case AMDGPU::GPRI32RegClassID: + return &AMDGPU::R600_Reg32RegClass; default: return rc; } } @@ -65,13 +65,13 @@ R600RegisterInfo::getISARegClass(const TargetRegisterClass * rc) const unsigned R600RegisterInfo::getHWRegIndex(unsigned reg) const { switch(reg) { - case AMDIL::ZERO: return 248; - case AMDIL::ONE: - case AMDIL::NEG_ONE: return 249; - case AMDIL::ONE_INT: return 250; - case AMDIL::HALF: - case AMDIL::NEG_HALF: return 252; - case AMDIL::ALU_LITERAL_X: return 253; + case AMDGPU::ZERO: return 248; + case AMDGPU::ONE: + case AMDGPU::NEG_ONE: return 249; + case AMDGPU::ONE_INT: return 250; + case AMDGPU::HALF: + case AMDGPU::NEG_HALF: return 252; + case AMDGPU::ALU_LITERAL_X: return 253; default: return getHWRegIndexGen(reg); } } @@ -79,13 +79,13 @@ unsigned R600RegisterInfo::getHWRegIndex(unsigned reg) const unsigned R600RegisterInfo::getHWRegChan(unsigned reg) const { switch(reg) { - case AMDIL::ZERO: - case AMDIL::ONE: - case AMDIL::ONE_INT: - case AMDIL::NEG_ONE: - case AMDIL::HALF: - case AMDIL::NEG_HALF: - case AMDIL::ALU_LITERAL_X: + case AMDGPU::ZERO: + case AMDGPU::ONE: + case AMDGPU::ONE_INT: + case AMDGPU::NEG_ONE: + case AMDGPU::HALF: + case AMDGPU::NEG_HALF: + case AMDGPU::ALU_LITERAL_X: return 0; default: return getHWRegChanGen(reg); } @@ -96,7 +96,7 @@ const TargetRegisterClass * R600RegisterInfo::getCFGStructurizerRegClass( { switch(VT.SimpleTy) { default: - case MVT::i32: return AMDIL::R600_TReg32RegisterClass; + case MVT::i32: return AMDGPU::R600_TReg32RegisterClass; } } #include "R600HwRegInfo.include" diff --git a/src/gallium/drivers/radeon/SIAssignInterpRegs.cpp b/src/gallium/drivers/radeon/SIAssignInterpRegs.cpp index a2d14b5fb2f..817a10120d2 100644 --- a/src/gallium/drivers/radeon/SIAssignInterpRegs.cpp +++ b/src/gallium/drivers/radeon/SIAssignInterpRegs.cpp @@ -65,22 +65,22 @@ bool SIAssignInterpRegsPass::runOnMachineFunction(MachineFunction &MF) { struct interp_info InterpUse[INTERP_VALUES] = { - {false, {AMDIL::PERSP_SAMPLE_I, AMDIL::PERSP_SAMPLE_J}, 2}, - {false, {AMDIL::PERSP_CENTER_I, AMDIL::PERSP_CENTER_J}, 2}, - {false, {AMDIL::PERSP_CENTROID_I, AMDIL::PERSP_CENTROID_J}, 2}, - {false, {AMDIL::PERSP_I_W, AMDIL::PERSP_J_W, AMDIL::PERSP_1_W}, 3}, - {false, {AMDIL::LINEAR_SAMPLE_I, AMDIL::LINEAR_SAMPLE_J}, 2}, - {false, {AMDIL::LINEAR_CENTER_I, AMDIL::LINEAR_CENTER_J}, 2}, - {false, {AMDIL::LINEAR_CENTROID_I, AMDIL::LINEAR_CENTROID_J}, 2}, - {false, {AMDIL::LINE_STIPPLE_TEX_COORD}, 1}, - {false, {AMDIL::POS_X_FLOAT}, 1}, - {false, {AMDIL::POS_Y_FLOAT}, 1}, - {false, {AMDIL::POS_Z_FLOAT}, 1}, - {false, {AMDIL::POS_W_FLOAT}, 1}, - {false, {AMDIL::FRONT_FACE}, 1}, - {false, {AMDIL::ANCILLARY}, 1}, - {false, {AMDIL::SAMPLE_COVERAGE}, 1}, - {false, {AMDIL::POS_FIXED_PT}, 1} + {false, {AMDGPU::PERSP_SAMPLE_I, AMDGPU::PERSP_SAMPLE_J}, 2}, + {false, {AMDGPU::PERSP_CENTER_I, AMDGPU::PERSP_CENTER_J}, 2}, + {false, {AMDGPU::PERSP_CENTROID_I, AMDGPU::PERSP_CENTROID_J}, 2}, + {false, {AMDGPU::PERSP_I_W, AMDGPU::PERSP_J_W, AMDGPU::PERSP_1_W}, 3}, + {false, {AMDGPU::LINEAR_SAMPLE_I, AMDGPU::LINEAR_SAMPLE_J}, 2}, + {false, {AMDGPU::LINEAR_CENTER_I, AMDGPU::LINEAR_CENTER_J}, 2}, + {false, {AMDGPU::LINEAR_CENTROID_I, AMDGPU::LINEAR_CENTROID_J}, 2}, + {false, {AMDGPU::LINE_STIPPLE_TEX_COORD}, 1}, + {false, {AMDGPU::POS_X_FLOAT}, 1}, + {false, {AMDGPU::POS_Y_FLOAT}, 1}, + {false, {AMDGPU::POS_Z_FLOAT}, 1}, + {false, {AMDGPU::POS_W_FLOAT}, 1}, + {false, {AMDGPU::FRONT_FACE}, 1}, + {false, {AMDGPU::ANCILLARY}, 1}, + {false, {AMDGPU::SAMPLE_COVERAGE}, 1}, + {false, {AMDGPU::POS_FIXED_PT}, 1} }; SIMachineFunctionInfo * MFI = MF.getInfo<SIMachineFunctionInfo>(); @@ -106,8 +106,8 @@ bool SIAssignInterpRegsPass::runOnMachineFunction(MachineFunction &MF) for (unsigned reg_idx = 0; reg_idx < InterpUse[interp_idx].reg_count; reg_idx++, used_vgprs++) { - unsigned new_reg = AMDIL::VReg_32RegisterClass->getRegister(used_vgprs); - unsigned virt_reg = MRI.createVirtualRegister(AMDIL::VReg_32RegisterClass); + unsigned new_reg = AMDGPU::VReg_32RegisterClass->getRegister(used_vgprs); + unsigned virt_reg = MRI.createVirtualRegister(AMDGPU::VReg_32RegisterClass); MRI.replaceRegWith(InterpUse[interp_idx].regs[reg_idx], virt_reg); AMDGPU::utilAddLiveIn(&MF, MRI, TM.getInstrInfo(), new_reg, virt_reg); } diff --git a/src/gallium/drivers/radeon/SICodeEmitter.cpp b/src/gallium/drivers/radeon/SICodeEmitter.cpp index 585d4180e4d..7b02aad49f6 100644 --- a/src/gallium/drivers/radeon/SICodeEmitter.cpp +++ b/src/gallium/drivers/radeon/SICodeEmitter.cpp @@ -108,29 +108,29 @@ void SICodeEmitter::emitState(MachineFunction & MF) continue; } reg = MO.getReg(); - if (reg == AMDIL::VCC) { + if (reg == AMDGPU::VCC) { VCCUsed = true; continue; } - if (AMDIL::SReg_32RegClass.contains(reg)) { + if (AMDGPU::SReg_32RegClass.contains(reg)) { isSGPR = true; width = 1; - } else if (AMDIL::VReg_32RegClass.contains(reg)) { + } else if (AMDGPU::VReg_32RegClass.contains(reg)) { isSGPR = false; width = 1; - } else if (AMDIL::SReg_64RegClass.contains(reg)) { + } else if (AMDGPU::SReg_64RegClass.contains(reg)) { isSGPR = true; width = 2; - } else if (AMDIL::VReg_64RegClass.contains(reg)) { + } else if (AMDGPU::VReg_64RegClass.contains(reg)) { isSGPR = false; width = 2; - } else if (AMDIL::SReg_128RegClass.contains(reg)) { + } else if (AMDGPU::SReg_128RegClass.contains(reg)) { isSGPR = true; width = 4; - } else if (AMDIL::VReg_128RegClass.contains(reg)) { + } else if (AMDGPU::VReg_128RegClass.contains(reg)) { isSGPR = false; width = 4; - } else if (AMDIL::SReg_256RegClass.contains(reg)) { + } else if (AMDGPU::SReg_256RegClass.contains(reg)) { isSGPR = true; width = 8; } else { @@ -171,14 +171,14 @@ bool SICodeEmitter::runOnMachineFunction(MachineFunction &MF) for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ++I) { MachineInstr &MI = *I; - if (MI.getOpcode() != AMDIL::KILL && MI.getOpcode() != AMDIL::RETURN) { + if (MI.getOpcode() != AMDGPU::KILL && MI.getOpcode() != AMDGPU::RETURN) { emitInstr(MI); } } } // Emit S_END_PGM MachineInstr * End = BuildMI(MF, DebugLoc(), - TM->getInstrInfo()->get(AMDIL::S_ENDPGM)); + TM->getInstrInfo()->get(AMDGPU::S_ENDPGM)); emitInstr(*End); return false; } @@ -304,8 +304,8 @@ uint64_t SICodeEmitter::VOPPostEncode(const MachineInstr &MI, continue; } unsigned reg = MI.getOperand(opIdx).getReg(); - if (AMDIL::VReg_32RegClass.contains(reg) - || AMDIL::VReg_64RegClass.contains(reg)) { + if (AMDGPU::VReg_32RegClass.contains(reg) + || AMDGPU::VReg_64RegClass.contains(reg)) { Value |= (VGPR_BIT(opIdx)) << vgprBitOffset; } } diff --git a/src/gallium/drivers/radeon/SIGenRegisterInfo.pl b/src/gallium/drivers/radeon/SIGenRegisterInfo.pl index bffbd0fc6cb..68b4fe357fa 100644 --- a/src/gallium/drivers/radeon/SIGenRegisterInfo.pl +++ b/src/gallium/drivers/radeon/SIGenRegisterInfo.pl @@ -26,7 +26,7 @@ my $INDEX_FILE = defined($ARGV[0]) ? $ARGV[0] : ''; print <<STRING; -let Namespace = "AMDIL" in { +let Namespace = "AMDGPU" in { def low : SubRegIndex; def high : SubRegIndex; @@ -41,21 +41,21 @@ let Namespace = "AMDIL" in { } class SIReg <string n> : Register<n> { - let Namespace = "AMDIL"; + let Namespace = "AMDGPU"; } class SI_64 <string n, list<Register> subregs> : RegisterWithSubRegs<n, subregs> { - let Namespace = "AMDIL"; + let Namespace = "AMDGPU"; let SubRegIndices = [low, high]; } class SI_128 <string n, list<Register> subregs> : RegisterWithSubRegs<n, subregs> { - let Namespace = "AMDIL"; + let Namespace = "AMDGPU"; let SubRegIndices = [sel_x, sel_y, sel_z, sel_w]; } class SI_256 <string n, list<Register> subregs> : RegisterWithSubRegs<n, subregs> { - let Namespace = "AMDIL"; + let Namespace = "AMDGPU"; let SubRegIndices = [sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7]; } @@ -138,11 +138,11 @@ for (my $i = 0; $i < $VGPR_COUNT; $i++) { print <<STRING; -def SReg_32 : RegisterClass<"AMDIL", [f32, i32], 32, +def SReg_32 : RegisterClass<"AMDGPU", [f32, i32], 32, (add (sequence "SGPR%u", 0, $SGPR_MAX_IDX), SREG_LIT_0, M0) >; -def VReg_32 : RegisterClass<"AMDIL", [f32, i32], 32, +def VReg_32 : RegisterClass<"AMDGPU", [f32, i32], 32, (add (sequence "VGPR%u", 0, $VGPR_MAX_IDX), PERSP_SAMPLE_I, PERSP_SAMPLE_J, PERSP_CENTER_I, PERSP_CENTER_J, @@ -163,11 +163,11 @@ def VReg_32 : RegisterClass<"AMDIL", [f32, i32], 32, ) >; -def AllReg_32 : RegisterClass<"AMDIL", [f32, i32], 32, +def AllReg_32 : RegisterClass<"AMDGPU", [f32, i32], 32, (add VReg_32, SReg_32) >; -def CCReg : RegisterClass<"AMDIL", [f32], 32, (add VCC, SCC)>; +def CCReg : RegisterClass<"AMDGPU", [f32], 32, (add VCC, SCC)>; STRING @@ -187,7 +187,7 @@ my $sgpr64_list = join(',', @SGPR64); my $vgpr64_list = join(',', @VGPR64); print <<STRING; -def AllReg_64 : RegisterClass<"AMDIL", [f64, i64], 64, +def AllReg_64 : RegisterClass<"AMDGPU", [f64, i64], 64, (add $sgpr64_list, $vgpr64_list) >; @@ -229,7 +229,7 @@ if ($INDEX_FILE ne '') { for my $key (keys(%hw_values)) { my @names = @{$hw_values{$key}}; for my $regname (@names) { - print $fh " case AMDIL::$regname:\n" + print $fh " case AMDGPU::$regname:\n" } print $fh " return $key;\n"; } @@ -266,7 +266,7 @@ sub print_reg_class { } my $reg_list = join(', ', @registers); - print "def $class_prefix\_$reg_width : RegisterClass<\"AMDIL\", [" . join (', ', @types) . "], $reg_width,\n (add $reg_list)\n>{\n"; + print "def $class_prefix\_$reg_width : RegisterClass<\"AMDGPU\", [" . join (', ', @types) . "], $reg_width,\n (add $reg_list)\n>{\n"; print " let SubRegClasses = [($class_prefix\_", ($reg_width / $component_count) , ' ', join(', ', @{$sub_reg_ref}), ")];\n}\n"; return @registers; } diff --git a/src/gallium/drivers/radeon/SIISelLowering.cpp b/src/gallium/drivers/radeon/SIISelLowering.cpp index 5b1959d5024..9970251128a 100644 --- a/src/gallium/drivers/radeon/SIISelLowering.cpp +++ b/src/gallium/drivers/radeon/SIISelLowering.cpp @@ -23,13 +23,13 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) : AMDGPUTargetLowering(TM), TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo())) { - addRegisterClass(MVT::v4f32, &AMDIL::VReg_128RegClass); - addRegisterClass(MVT::f32, &AMDIL::VReg_32RegClass); - addRegisterClass(MVT::i32, &AMDIL::VReg_32RegClass); - addRegisterClass(MVT::i64, &AMDIL::VReg_64RegClass); + addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); + addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass); + addRegisterClass(MVT::i32, &AMDGPU::VReg_32RegClass); + addRegisterClass(MVT::i64, &AMDGPU::VReg_64RegClass); - addRegisterClass(MVT::v4i32, &AMDIL::SReg_128RegClass); - addRegisterClass(MVT::v8i32, &AMDIL::SReg_256RegClass); + addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); + addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); computeRegisterProperties(); @@ -54,8 +54,8 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter( default: return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); - case AMDIL::CLAMP_SI: - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::V_MOV_B32_e64)) + case AMDGPU::CLAMP_SI: + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::V_MOV_B32_e64)) .addOperand(MI->getOperand(0)) .addOperand(MI->getOperand(1)) // VSRC1-2 are unused, but we still need to fill all the @@ -69,8 +69,8 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter( MI->eraseFromParent(); break; - case AMDIL::FABS_SI: - BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::V_MOV_B32_e64)) + case AMDGPU::FABS_SI: + BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::V_MOV_B32_e64)) .addOperand(MI->getOperand(0)) .addOperand(MI->getOperand(1)) // VSRC1-2 are unused, but we still need to fill all the @@ -84,22 +84,22 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter( MI->eraseFromParent(); break; - case AMDIL::SI_INTERP: + case AMDGPU::SI_INTERP: LowerSI_INTERP(MI, *BB, I, MRI); break; - case AMDIL::SI_INTERP_CONST: + case AMDGPU::SI_INTERP_CONST: LowerSI_INTERP_CONST(MI, *BB, I); break; - case AMDIL::SI_V_CNDLT: + case AMDGPU::SI_V_CNDLT: LowerSI_V_CNDLT(MI, *BB, I, MRI); break; - case AMDIL::USE_SGPR_32: - case AMDIL::USE_SGPR_64: + case AMDGPU::USE_SGPR_32: + case AMDGPU::USE_SGPR_64: lowerUSE_SGPR(MI, BB->getParent(), MRI); MI->eraseFromParent(); break; - case AMDIL::VS_LOAD_BUFFER_INDEX: - addLiveIn(MI, BB->getParent(), MRI, TII, AMDIL::VGPR0); + case AMDGPU::VS_LOAD_BUFFER_INDEX: + addLiveIn(MI, BB->getParent(), MRI, TII, AMDGPU::VGPR0); MI->eraseFromParent(); break; } @@ -109,14 +109,14 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter( void SITargetLowering::AppendS_WAITCNT(MachineInstr *MI, MachineBasicBlock &BB, MachineBasicBlock::iterator I) const { - BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::S_WAITCNT)) + BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_WAITCNT)) .addImm(0); } void SITargetLowering::LowerSI_INTERP(MachineInstr *MI, MachineBasicBlock &BB, MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const { - unsigned tmp = MRI.createVirtualRegister(&AMDIL::VReg_32RegClass); + unsigned tmp = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass); MachineOperand dst = MI->getOperand(0); MachineOperand iReg = MI->getOperand(1); MachineOperand jReg = MI->getOperand(2); @@ -124,16 +124,16 @@ void SITargetLowering::LowerSI_INTERP(MachineInstr *MI, MachineBasicBlock &BB, MachineOperand attr = MI->getOperand(4); MachineOperand params = MI->getOperand(5); - BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::S_MOV_B32)) - .addReg(AMDIL::M0) + BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_MOV_B32)) + .addReg(AMDGPU::M0) .addOperand(params); - BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_INTERP_P1_F32), tmp) + BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_P1_F32), tmp) .addOperand(iReg) .addOperand(attr_chan) .addOperand(attr); - BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_INTERP_P2_F32)) + BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_P2_F32)) .addOperand(dst) .addReg(tmp) .addOperand(jReg) @@ -151,11 +151,11 @@ void SITargetLowering::LowerSI_INTERP_CONST(MachineInstr *MI, MachineOperand attr = MI->getOperand(2); MachineOperand params = MI->getOperand(3); - BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::S_MOV_B32)) - .addReg(AMDIL::M0) + BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_MOV_B32)) + .addReg(AMDGPU::M0) .addOperand(params); - BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_INTERP_MOV_F32)) + BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_MOV_F32)) .addOperand(dst) .addOperand(attr_chan) .addOperand(attr); @@ -166,11 +166,11 @@ void SITargetLowering::LowerSI_INTERP_CONST(MachineInstr *MI, void SITargetLowering::LowerSI_V_CNDLT(MachineInstr *MI, MachineBasicBlock &BB, MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const { - BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_CMP_LT_F32_e32)) + BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_CMP_LT_F32_e32)) .addOperand(MI->getOperand(1)) - .addReg(AMDIL::SREG_LIT_0); + .addReg(AMDGPU::SREG_LIT_0); - BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_CNDMASK_B32)) + BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_CNDMASK_B32)) .addOperand(MI->getOperand(0)) .addOperand(MI->getOperand(2)) .addOperand(MI->getOperand(3)); diff --git a/src/gallium/drivers/radeon/SIInstrInfo.cpp b/src/gallium/drivers/radeon/SIInstrInfo.cpp index 1d464fec033..4438d67f877 100644 --- a/src/gallium/drivers/radeon/SIInstrInfo.cpp +++ b/src/gallium/drivers/radeon/SIInstrInfo.cpp @@ -38,7 +38,7 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, unsigned DestReg, unsigned SrcReg, bool KillSrc) const { - BuildMI(MBB, MI, DL, get(AMDIL::V_MOV_B32_e32), DestReg) + BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) .addReg(SrcReg, getKillRegState(KillSrc)); } @@ -59,7 +59,7 @@ unsigned SIInstrInfo::getEncodingBytes(const MachineInstr &MI) const } /* This instruction always has a literal */ - if (MI.getOpcode() == AMDIL::S_MOV_IMM_I32) { + if (MI.getOpcode() == AMDGPU::S_MOV_IMM_I32) { return 8; } @@ -80,7 +80,7 @@ unsigned SIInstrInfo::getEncodingBytes(const MachineInstr &MI) const MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg, int64_t Imm) const { - MachineInstr * MI = MF->CreateMachineInstr(get(AMDIL::V_MOV_IMM_I32), DebugLoc()); + MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_IMM_I32), DebugLoc()); MachineInstrBuilder(MI).addReg(DstReg, RegState::Define); MachineInstrBuilder(MI).addImm(Imm); @@ -92,13 +92,13 @@ bool SIInstrInfo::isMov(unsigned Opcode) const { switch(Opcode) { default: return false; - case AMDIL::S_MOV_B32: - case AMDIL::S_MOV_B64: - case AMDIL::V_MOV_B32_e32: - case AMDIL::V_MOV_B32_e64: - case AMDIL::V_MOV_IMM_F32: - case AMDIL::V_MOV_IMM_I32: - case AMDIL::S_MOV_IMM_I32: + case AMDGPU::S_MOV_B32: + case AMDGPU::S_MOV_B64: + case AMDGPU::V_MOV_B32_e32: + case AMDGPU::V_MOV_B32_e64: + case AMDGPU::V_MOV_IMM_F32: + case AMDGPU::V_MOV_IMM_I32: + case AMDGPU::S_MOV_IMM_I32: return true; } } diff --git a/src/gallium/drivers/radeon/SIRegisterInfo.cpp b/src/gallium/drivers/radeon/SIRegisterInfo.cpp index 79e2432cb52..0d0e612080e 100644 --- a/src/gallium/drivers/radeon/SIRegisterInfo.cpp +++ b/src/gallium/drivers/radeon/SIRegisterInfo.cpp @@ -34,8 +34,8 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const unsigned SIRegisterInfo::getBinaryCode(unsigned reg) const { switch (reg) { - case AMDIL::M0: return 124; - case AMDIL::SREG_LIT_0: return 128; + case AMDGPU::M0: return 124; + case AMDGPU::SREG_LIT_0: return 128; default: return getHWRegNum(reg); } } @@ -44,8 +44,8 @@ const TargetRegisterClass * SIRegisterInfo::getISARegClass(const TargetRegisterClass * rc) const { switch (rc->getID()) { - case AMDIL::GPRF32RegClassID: - return &AMDIL::VReg_32RegClass; + case AMDGPU::GPRF32RegClassID: + return &AMDGPU::VReg_32RegClass; default: return rc; } } @@ -55,7 +55,7 @@ const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass( { switch(VT.SimpleTy) { default: - case MVT::i32: return AMDIL::VReg_32RegisterClass; + case MVT::i32: return AMDGPU::VReg_32RegisterClass; } } #include "SIRegisterGetHWRegNum.inc" |