aboutsummaryrefslogtreecommitdiffstats
path: root/src/gallium
diff options
context:
space:
mode:
authorTom Stellard <[email protected]>2012-07-27 19:18:04 +0000
committerTom Stellard <[email protected]>2012-07-30 20:31:57 +0000
commitac669c32c6e80841e3ee63d65b58c0031b22e7b8 (patch)
treed330cb840e7398030ab97231f3deed34a88f8508 /src/gallium
parent3a0187b1b53eca3143286a5ae7917cd71117b902 (diff)
radeon/llvm: Merge AMDILInstrInfo.cpp into AMDGPUInstrInfo.cpp
Diffstat (limited to 'src/gallium')
-rw-r--r--src/gallium/drivers/radeon/AMDGPUInstrInfo.cpp386
-rw-r--r--src/gallium/drivers/radeon/AMDGPUInstrInfo.h117
-rw-r--r--src/gallium/drivers/radeon/AMDGPUUtil.cpp1
-rw-r--r--src/gallium/drivers/radeon/AMDIL.h3
-rw-r--r--src/gallium/drivers/radeon/AMDILCFGStructurizer.cpp20
-rw-r--r--src/gallium/drivers/radeon/AMDILISelDAGToDAG.cpp1
-rw-r--r--src/gallium/drivers/radeon/AMDILInstrInfo.cpp509
-rw-r--r--src/gallium/drivers/radeon/AMDILInstrInfo.h161
-rw-r--r--src/gallium/drivers/radeon/AMDILPeepholeOptimizer.cpp2
-rw-r--r--src/gallium/drivers/radeon/Makefile.sources1
-rw-r--r--src/gallium/drivers/radeon/R600CodeEmitter.cpp2
-rw-r--r--src/gallium/drivers/radeon/R600InstrInfo.h2
12 files changed, 512 insertions, 693 deletions
diff --git a/src/gallium/drivers/radeon/AMDGPUInstrInfo.cpp b/src/gallium/drivers/radeon/AMDGPUInstrInfo.cpp
index d2bb4e16b47..81b62ccd24d 100644
--- a/src/gallium/drivers/radeon/AMDGPUInstrInfo.cpp
+++ b/src/gallium/drivers/radeon/AMDGPUInstrInfo.cpp
@@ -16,12 +16,394 @@
#include "AMDGPURegisterInfo.h"
#include "AMDGPUTargetMachine.h"
#include "AMDIL.h"
+#include "AMDILUtilityFunctions.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#define GET_INSTRINFO_CTOR
+#include "AMDGPUGenInstrInfo.inc"
+
using namespace llvm;
-AMDGPUInstrInfo::AMDGPUInstrInfo(AMDGPUTargetMachine &tm)
- : AMDILInstrInfo(tm), TM(tm) { }
+AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
+ : AMDGPUGenInstrInfo(), RI(tm, *this), TM(tm) { }
+
+const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
+ return RI;
+}
+
+bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
+ unsigned &SrcReg, unsigned &DstReg,
+ unsigned &SubIdx) const {
+// TODO: Implement this function
+ return false;
+}
+
+unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+// TODO: Implement this function
+ return 0;
+}
+
+unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
+ int &FrameIndex) const {
+// TODO: Implement this function
+ return 0;
+}
+
+bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
+ const MachineMemOperand *&MMO,
+ int &FrameIndex) const {
+// TODO: Implement this function
+ return false;
+}
+unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+// TODO: Implement this function
+ return 0;
+}
+unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
+ int &FrameIndex) const {
+// TODO: Implement this function
+ return 0;
+}
+bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
+ const MachineMemOperand *&MMO,
+ int &FrameIndex) const {
+// TODO: Implement this function
+ return false;
+}
+
+MachineInstr *
+AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
+ MachineBasicBlock::iterator &MBBI,
+ LiveVariables *LV) const {
+// TODO: Implement this function
+ return NULL;
+}
+bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
+ MachineBasicBlock &MBB) const {
+ while (iter != MBB.end()) {
+ switch (iter->getOpcode()) {
+ default:
+ break;
+ ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND);
+ case AMDGPU::BRANCH:
+ return true;
+ };
+ ++iter;
+ }
+ return false;
+}
+
+bool AMDGPUInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ bool retVal = true;
+ return retVal;
+ MachineBasicBlock::iterator iter = MBB.begin();
+ if (!getNextBranchInstr(iter, MBB)) {
+ retVal = false;
+ } else {
+ MachineInstr *firstBranch = iter;
+ if (!getNextBranchInstr(++iter, MBB)) {
+ if (firstBranch->getOpcode() == AMDGPU::BRANCH) {
+ TBB = firstBranch->getOperand(0).getMBB();
+ firstBranch->eraseFromParent();
+ retVal = false;
+ } else {
+ TBB = firstBranch->getOperand(0).getMBB();
+ FBB = *(++MBB.succ_begin());
+ if (FBB == TBB) {
+ FBB = *(MBB.succ_begin());
+ }
+ Cond.push_back(firstBranch->getOperand(1));
+ retVal = false;
+ }
+ } else {
+ MachineInstr *secondBranch = iter;
+ if (!getNextBranchInstr(++iter, MBB)) {
+ if (secondBranch->getOpcode() == AMDGPU::BRANCH) {
+ TBB = firstBranch->getOperand(0).getMBB();
+ Cond.push_back(firstBranch->getOperand(1));
+ FBB = secondBranch->getOperand(0).getMBB();
+ secondBranch->eraseFromParent();
+ retVal = false;
+ } else {
+ assert(0 && "Should not have two consecutive conditional branches");
+ }
+ } else {
+ MBB.getParent()->viewCFG();
+ assert(0 && "Should not have three branch instructions in"
+ " a single basic block");
+ retVal = false;
+ }
+ }
+ }
+ return retVal;
+}
+
+unsigned int AMDGPUInstrInfo::getBranchInstr(const MachineOperand &op) const {
+ const MachineInstr *MI = op.getParent();
+
+ switch (MI->getDesc().OpInfo->RegClass) {
+ default: // FIXME: fallthrough??
+ case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
+ case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
+ };
+}
+
+unsigned int
+AMDGPUInstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const
+{
+ assert(TBB && "InsertBranch must not be told to insert a fallthrough");
+ for (unsigned int x = 0; x < Cond.size(); ++x) {
+ Cond[x].getParent()->dump();
+ }
+ if (FBB == 0) {
+ if (Cond.empty()) {
+ BuildMI(&MBB, DL, get(AMDGPU::BRANCH)).addMBB(TBB);
+ } else {
+ BuildMI(&MBB, DL, get(getBranchInstr(Cond[0])))
+ .addMBB(TBB).addReg(Cond[0].getReg());
+ }
+ return 1;
+ } else {
+ BuildMI(&MBB, DL, get(getBranchInstr(Cond[0])))
+ .addMBB(TBB).addReg(Cond[0].getReg());
+ BuildMI(&MBB, DL, get(AMDGPU::BRANCH)).addMBB(FBB);
+ }
+ assert(0 && "Inserting two branches not supported");
+ return 0;
+}
+
+unsigned int AMDGPUInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator I = MBB.end();
+ if (I == MBB.begin()) {
+ return 0;
+ }
+ --I;
+ switch (I->getOpcode()) {
+ default:
+ return 0;
+ ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND);
+ case AMDGPU::BRANCH:
+ I->eraseFromParent();
+ break;
+ }
+ I = MBB.end();
+
+ if (I == MBB.begin()) {
+ return 1;
+ }
+ --I;
+ switch (I->getOpcode()) {
+ // FIXME: only one case??
+ default:
+ return 1;
+ ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND);
+ I->eraseFromParent();
+ break;
+ }
+ return 2;
+}
+
+MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) {
+ MachineBasicBlock::iterator tmp = MBB->end();
+ if (!MBB->size()) {
+ return MBB->end();
+ }
+ while (--tmp) {
+ if (tmp->getOpcode() == AMDGPU::ENDLOOP
+ || tmp->getOpcode() == AMDGPU::ENDIF
+ || tmp->getOpcode() == AMDGPU::ELSE) {
+ if (tmp == MBB->begin()) {
+ return tmp;
+ } else {
+ continue;
+ }
+ } else {
+ return ++tmp;
+ }
+ }
+ return MBB->end();
+}
+
+void
+AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned SrcReg, bool isKill,
+ int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ unsigned int Opc = 0;
+ // MachineInstr *curMI = MI;
+ MachineFunction &MF = *(MBB.getParent());
+ MachineFrameInfo &MFI = *MF.getFrameInfo();
+
+ DebugLoc DL;
+ switch (RC->getID()) {
+ case AMDGPU::GPRF32RegClassID:
+ Opc = AMDGPU::PRIVATESTORE_f32;
+ break;
+ case AMDGPU::GPRI32RegClassID:
+ Opc = AMDGPU::PRIVATESTORE_i32;
+ break;
+ }
+ if (MI != MBB.end()) DL = MI->getDebugLoc();
+ MachineMemOperand *MMO =
+ new MachineMemOperand(
+ MachinePointerInfo::getFixedStack(FrameIndex),
+ MachineMemOperand::MOLoad,
+ MFI.getObjectSize(FrameIndex),
+ MFI.getObjectAlignment(FrameIndex));
+ if (MI != MBB.end()) {
+ DL = MI->getDebugLoc();
+ }
+ BuildMI(MBB, MI, DL, get(Opc))
+ .addReg(SrcReg, getKillRegState(isKill))
+ .addFrameIndex(FrameIndex)
+ .addMemOperand(MMO)
+ .addImm(0);
+}
+
+void
+AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ unsigned int Opc = 0;
+ MachineFunction &MF = *(MBB.getParent());
+ MachineFrameInfo &MFI = *MF.getFrameInfo();
+ DebugLoc DL;
+ switch (RC->getID()) {
+ case AMDGPU::GPRF32RegClassID:
+ Opc = AMDGPU::PRIVATELOAD_f32;
+ break;
+ case AMDGPU::GPRI32RegClassID:
+ Opc = AMDGPU::PRIVATELOAD_i32;
+ break;
+ }
+
+ MachineMemOperand *MMO =
+ new MachineMemOperand(
+ MachinePointerInfo::getFixedStack(FrameIndex),
+ MachineMemOperand::MOLoad,
+ MFI.getObjectSize(FrameIndex),
+ MFI.getObjectAlignment(FrameIndex));
+ if (MI != MBB.end()) {
+ DL = MI->getDebugLoc();
+ }
+ BuildMI(MBB, MI, DL, get(Opc))
+ .addReg(DestReg, RegState::Define)
+ .addFrameIndex(FrameIndex)
+ .addMemOperand(MMO)
+ .addImm(0);
+}
+MachineInstr *
+AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const {
+// TODO: Implement this function
+ return 0;
+}
+MachineInstr*
+AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr *LoadMI) const {
+ // TODO: Implement this function
+ return 0;
+}
+bool
+AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const
+{
+ // TODO: Implement this function
+ return false;
+}
+bool
+AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
+ unsigned Reg, bool UnfoldLoad,
+ bool UnfoldStore,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const {
+ // TODO: Implement this function
+ return false;
+}
+
+bool
+AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
+ SmallVectorImpl<SDNode*> &NewNodes) const {
+ // TODO: Implement this function
+ return false;
+}
+
+unsigned
+AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
+ bool UnfoldLoad, bool UnfoldStore,
+ unsigned *LoadRegIndex) const {
+ // TODO: Implement this function
+ return 0;
+}
+
+bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
+ int64_t Offset1, int64_t Offset2,
+ unsigned NumLoads) const {
+ assert(Offset2 > Offset1
+ && "Second offset should be larger than first offset!");
+ // If we have less than 16 loads in a row, and the offsets are within 16,
+ // then schedule together.
+ // TODO: Make the loads schedule near if it fits in a cacheline
+ return (NumLoads < 16 && (Offset2 - Offset1) < 16);
+}
+
+bool
+AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
+ const {
+ // TODO: Implement this function
+ return true;
+}
+void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const {
+ // TODO: Implement this function
+}
+
+bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
+ // TODO: Implement this function
+ return false;
+}
+bool
+AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
+ const SmallVectorImpl<MachineOperand> &Pred2)
+ const {
+ // TODO: Implement this function
+ return false;
+}
+
+bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
+ std::vector<MachineOperand> &Pred) const {
+ // TODO: Implement this function
+ return false;
+}
+
+bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
+ // TODO: Implement this function
+ return MI->getDesc().isPredicable();
+}
+
+bool
+AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
+ // TODO: Implement this function
+ return true;
+}
MachineInstr * AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
DebugLoc DL) const
diff --git a/src/gallium/drivers/radeon/AMDGPUInstrInfo.h b/src/gallium/drivers/radeon/AMDGPUInstrInfo.h
index e6b79c867a8..5bf3e454e11 100644
--- a/src/gallium/drivers/radeon/AMDGPUInstrInfo.h
+++ b/src/gallium/drivers/radeon/AMDGPUInstrInfo.h
@@ -16,10 +16,15 @@
#define AMDGPUINSTRUCTIONINFO_H_
#include "AMDGPURegisterInfo.h"
-#include "AMDILInstrInfo.h"
+#include "AMDGPUInstrInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include <map>
+#define GET_INSTRINFO_HEADER
+#define GET_INSTRINFO_ENUM
+#include "AMDGPUGenInstrInfo.inc"
+
namespace llvm {
class AMDGPUTargetMachine;
@@ -27,15 +32,119 @@ class MachineFunction;
class MachineInstr;
class MachineInstrBuilder;
-class AMDGPUInstrInfo : public AMDILInstrInfo {
+class AMDGPUInstrInfo : public AMDGPUGenInstrInfo {
private:
- AMDGPUTargetMachine & TM;
+ const AMDGPURegisterInfo RI;
+ TargetMachine &TM;
+ bool getNextBranchInstr(MachineBasicBlock::iterator &iter,
+ MachineBasicBlock &MBB) const;
+ unsigned int getBranchInstr(const MachineOperand &op) const;
public:
- explicit AMDGPUInstrInfo(AMDGPUTargetMachine &tm);
+ explicit AMDGPUInstrInfo(TargetMachine &tm);
virtual const AMDGPURegisterInfo &getRegisterInfo() const = 0;
+ bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
+ unsigned &DstReg, unsigned &SubIdx) const;
+
+ unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
+ unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
+ int &FrameIndex) const;
+ bool hasLoadFromStackSlot(const MachineInstr *MI,
+ const MachineMemOperand *&MMO,
+ int &FrameIndex) const;
+ unsigned isStoreFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
+ unsigned isStoreFromStackSlotPostFE(const MachineInstr *MI,
+ int &FrameIndex) const;
+ bool hasStoreFromStackSlot(const MachineInstr *MI,
+ const MachineMemOperand *&MMO,
+ int &FrameIndex) const;
+
+ MachineInstr *
+ convertToThreeAddress(MachineFunction::iterator &MFI,
+ MachineBasicBlock::iterator &MBBI,
+ LiveVariables *LV) const;
+
+ bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const;
+
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const;
+
+ unsigned
+ InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
+
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const = 0;
+
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const;
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const;
+
+protected:
+ MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const;
+ MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr *LoadMI) const;
+public:
+ bool canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const;
+ bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
+ unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
+ SmallVectorImpl<MachineInstr *> &NewMIs) const;
+ bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
+ SmallVectorImpl<SDNode *> &NewNodes) const;
+ unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
+ bool UnfoldLoad, bool UnfoldStore,
+ unsigned *LoadRegIndex = 0) const;
+ bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
+ int64_t Offset1, int64_t Offset2,
+ unsigned NumLoads) const;
+
+ bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
+ void insertNoop(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const;
+ bool isPredicated(const MachineInstr *MI) const;
+ bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
+ const SmallVectorImpl<MachineOperand> &Pred2) const;
+ bool DefinesPredicate(MachineInstr *MI,
+ std::vector<MachineOperand> &Pred) const;
+ bool isPredicable(MachineInstr *MI) const;
+ bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const;
+
+ // Helper functions that check the opcode for status information
+ bool isLoadInst(llvm::MachineInstr *MI) const;
+ bool isExtLoadInst(llvm::MachineInstr *MI) const;
+ bool isSWSExtLoadInst(llvm::MachineInstr *MI) const;
+ bool isSExtLoadInst(llvm::MachineInstr *MI) const;
+ bool isZExtLoadInst(llvm::MachineInstr *MI) const;
+ bool isAExtLoadInst(llvm::MachineInstr *MI) const;
+ bool isStoreInst(llvm::MachineInstr *MI) const;
+ bool isTruncStoreInst(llvm::MachineInstr *MI) const;
+
+ virtual MachineInstr* getMovImmInstr(MachineFunction *MF, unsigned DstReg,
+ int64_t Imm) const = 0;
+ virtual unsigned getIEQOpcode() const = 0;
+ virtual bool isMov(unsigned opcode) const = 0;
+
/// convertToISA - Convert the AMDIL MachineInstr to a supported ISA
/// MachineInstr
virtual MachineInstr * convertToISA(MachineInstr & MI, MachineFunction &MF,
diff --git a/src/gallium/drivers/radeon/AMDGPUUtil.cpp b/src/gallium/drivers/radeon/AMDGPUUtil.cpp
index 63b359ffc14..b571f4b41c2 100644
--- a/src/gallium/drivers/radeon/AMDGPUUtil.cpp
+++ b/src/gallium/drivers/radeon/AMDGPUUtil.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "AMDGPUUtil.h"
+#include "AMDGPUInstrInfo.h"
#include "AMDGPURegisterInfo.h"
#include "AMDIL.h"
#include "llvm/CodeGen/MachineFunction.h"
diff --git a/src/gallium/drivers/radeon/AMDIL.h b/src/gallium/drivers/radeon/AMDIL.h
index 635ff008824..34f32ffe37f 100644
--- a/src/gallium/drivers/radeon/AMDIL.h
+++ b/src/gallium/drivers/radeon/AMDIL.h
@@ -104,9 +104,6 @@ extern Target TheAMDILTarget;
extern Target TheAMDGPUTarget;
} // end namespace llvm;
-#define GET_INSTRINFO_ENUM
-#include "AMDGPUGenInstrInfo.inc"
-
/// Include device information enumerations
#include "AMDILDeviceInfo.h"
diff --git a/src/gallium/drivers/radeon/AMDILCFGStructurizer.cpp b/src/gallium/drivers/radeon/AMDILCFGStructurizer.cpp
index 79063635c16..fe94328d452 100644
--- a/src/gallium/drivers/radeon/AMDILCFGStructurizer.cpp
+++ b/src/gallium/drivers/radeon/AMDILCFGStructurizer.cpp
@@ -10,8 +10,8 @@
#define DEBUGME 0
#define DEBUG_TYPE "structcfg"
+#include "AMDGPUInstrInfo.h"
#include "AMDIL.h"
-#include "AMDILInstrInfo.h"
#include "AMDILUtilityFunctions.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/SmallVector.h"
@@ -1871,8 +1871,8 @@ typename CFGStructurizer<PassT>::BlockT *
CFGStructurizer<PassT>::addLoopEndbranchBlock(LoopT *loopRep,
BlockTSmallerVector &exitingBlks,
BlockTSmallerVector &exitBlks) {
- const AMDILInstrInfo *tii =
- static_cast<const AMDILInstrInfo *>(passRep->getTargetInstrInfo());
+ const AMDGPUInstrInfo *tii =
+ static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo());
const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
RegiT endBranchReg = static_cast<int>
@@ -2892,7 +2892,7 @@ struct CFGStructTraits<AMDILCFGStructurizer>
// instruction. Such move instruction "belong to" the loop backward-edge.
//
static MachineInstr *getLoopendBlockBranchInstr(MachineBasicBlock *blk) {
- const AMDILInstrInfo * TII = static_cast<const AMDILInstrInfo *>(
+ const AMDGPUInstrInfo * TII = static_cast<const AMDGPUInstrInfo *>(
blk->getParent()->getTarget().getInstrInfo());
for (MachineBasicBlock::reverse_iterator iter = blk->rbegin(),
@@ -3083,8 +3083,8 @@ struct CFGStructTraits<AMDILCFGStructurizer>
AMDILCFGStructurizer *passRep,
RegiT regNum, int regVal) {
MachineInstr *oldInstr = &(*instrPos);
- const AMDILInstrInfo *tii =
- static_cast<const AMDILInstrInfo *>(passRep->getTargetInstrInfo());
+ const AMDGPUInstrInfo *tii =
+ static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo());
MachineBasicBlock *blk = oldInstr->getParent();
MachineInstr *newInstr = tii->getMovImmInstr(blk->getParent(), regNum,
regVal);
@@ -3096,8 +3096,8 @@ struct CFGStructTraits<AMDILCFGStructurizer>
static void insertAssignInstrBefore(MachineBasicBlock *blk,
AMDILCFGStructurizer *passRep,
RegiT regNum, int regVal) {
- const AMDILInstrInfo *tii =
- static_cast<const AMDILInstrInfo *>(passRep->getTargetInstrInfo());
+ const AMDGPUInstrInfo *tii =
+ static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo());
MachineInstr *newInstr = tii->getMovImmInstr(blk->getParent(), regNum,
regVal);
@@ -3116,8 +3116,8 @@ struct CFGStructTraits<AMDILCFGStructurizer>
AMDILCFGStructurizer *passRep,
RegiT dstReg, RegiT src1Reg,
RegiT src2Reg) {
- const AMDILInstrInfo *tii =
- static_cast<const AMDILInstrInfo *>(passRep->getTargetInstrInfo());
+ const AMDGPUInstrInfo *tii =
+ static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo());
MachineInstr *newInstr =
blk->getParent()->CreateMachineInstr(tii->get(tii->getIEQOpcode()), DebugLoc());
diff --git a/src/gallium/drivers/radeon/AMDILISelDAGToDAG.cpp b/src/gallium/drivers/radeon/AMDILISelDAGToDAG.cpp
index ebd7b266056..b3969441eb7 100644
--- a/src/gallium/drivers/radeon/AMDILISelDAGToDAG.cpp
+++ b/src/gallium/drivers/radeon/AMDILISelDAGToDAG.cpp
@@ -10,6 +10,7 @@
// This file defines an instruction selector for the AMDIL target.
//
//===----------------------------------------------------------------------===//
+#include "AMDGPUInstrInfo.h"
#include "AMDGPUISelLowering.h" // For AMDGPUISD
#include "AMDGPURegisterInfo.h"
#include "AMDILDevices.h"
diff --git a/src/gallium/drivers/radeon/AMDILInstrInfo.cpp b/src/gallium/drivers/radeon/AMDILInstrInfo.cpp
deleted file mode 100644
index 5dc86649667..00000000000
--- a/src/gallium/drivers/radeon/AMDILInstrInfo.cpp
+++ /dev/null
@@ -1,509 +0,0 @@
-//===- AMDILInstrInfo.cpp - AMDIL Instruction Information -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-// This file contains the AMDIL implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDILInstrInfo.h"
-#include "AMDIL.h"
-#include "AMDILISelLowering.h"
-#include "AMDILUtilityFunctions.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/Instructions.h"
-
-#define GET_INSTRINFO_CTOR
-#include "AMDGPUGenInstrInfo.inc"
-
-using namespace llvm;
-
-AMDILInstrInfo::AMDILInstrInfo(TargetMachine &tm)
- : AMDGPUGenInstrInfo(),
- RI(tm, *this),
- TM(tm) {
-}
-
-const AMDGPURegisterInfo &AMDILInstrInfo::getRegisterInfo() const {
- return RI;
-}
-
-bool AMDILInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
- unsigned &SrcReg, unsigned &DstReg,
- unsigned &SubIdx) const {
-// TODO: Implement this function
- return false;
-}
-
-unsigned AMDILInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
-// TODO: Implement this function
- return 0;
-}
-
-unsigned AMDILInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
- int &FrameIndex) const {
-// TODO: Implement this function
- return 0;
-}
-
-bool AMDILInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
- const MachineMemOperand *&MMO,
- int &FrameIndex) const {
-// TODO: Implement this function
- return false;
-}
-unsigned AMDILInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
-// TODO: Implement this function
- return 0;
-}
-unsigned AMDILInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
- int &FrameIndex) const {
-// TODO: Implement this function
- return 0;
-}
-bool AMDILInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
- const MachineMemOperand *&MMO,
- int &FrameIndex) const {
-// TODO: Implement this function
- return false;
-}
-
-MachineInstr *
-AMDILInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const {
-// TODO: Implement this function
- return NULL;
-}
-bool AMDILInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
- MachineBasicBlock &MBB) const {
- while (iter != MBB.end()) {
- switch (iter->getOpcode()) {
- default:
- break;
- ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND);
- case AMDGPU::BRANCH:
- return true;
- };
- ++iter;
- }
- return false;
-}
-
-bool AMDILInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const {
- bool retVal = true;
- return retVal;
- MachineBasicBlock::iterator iter = MBB.begin();
- if (!getNextBranchInstr(iter, MBB)) {
- retVal = false;
- } else {
- MachineInstr *firstBranch = iter;
- if (!getNextBranchInstr(++iter, MBB)) {
- if (firstBranch->getOpcode() == AMDGPU::BRANCH) {
- TBB = firstBranch->getOperand(0).getMBB();
- firstBranch->eraseFromParent();
- retVal = false;
- } else {
- TBB = firstBranch->getOperand(0).getMBB();
- FBB = *(++MBB.succ_begin());
- if (FBB == TBB) {
- FBB = *(MBB.succ_begin());
- }
- Cond.push_back(firstBranch->getOperand(1));
- retVal = false;
- }
- } else {
- MachineInstr *secondBranch = iter;
- if (!getNextBranchInstr(++iter, MBB)) {
- if (secondBranch->getOpcode() == AMDGPU::BRANCH) {
- TBB = firstBranch->getOperand(0).getMBB();
- Cond.push_back(firstBranch->getOperand(1));
- FBB = secondBranch->getOperand(0).getMBB();
- secondBranch->eraseFromParent();
- retVal = false;
- } else {
- assert(0 && "Should not have two consecutive conditional branches");
- }
- } else {
- MBB.getParent()->viewCFG();
- assert(0 && "Should not have three branch instructions in"
- " a single basic block");
- retVal = false;
- }
- }
- }
- return retVal;
-}
-
-unsigned int AMDILInstrInfo::getBranchInstr(const MachineOperand &op) const {
- const MachineInstr *MI = op.getParent();
-
- switch (MI->getDesc().OpInfo->RegClass) {
- default: // FIXME: fallthrough??
- case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
- case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
- };
-}
-
-unsigned int
-AMDILInstrInfo::InsertBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const
-{
- assert(TBB && "InsertBranch must not be told to insert a fallthrough");
- for (unsigned int x = 0; x < Cond.size(); ++x) {
- Cond[x].getParent()->dump();
- }
- if (FBB == 0) {
- if (Cond.empty()) {
- BuildMI(&MBB, DL, get(AMDGPU::BRANCH)).addMBB(TBB);
- } else {
- BuildMI(&MBB, DL, get(getBranchInstr(Cond[0])))
- .addMBB(TBB).addReg(Cond[0].getReg());
- }
- return 1;
- } else {
- BuildMI(&MBB, DL, get(getBranchInstr(Cond[0])))
- .addMBB(TBB).addReg(Cond[0].getReg());
- BuildMI(&MBB, DL, get(AMDGPU::BRANCH)).addMBB(FBB);
- }
- assert(0 && "Inserting two branches not supported");
- return 0;
-}
-
-unsigned int AMDILInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator I = MBB.end();
- if (I == MBB.begin()) {
- return 0;
- }
- --I;
- switch (I->getOpcode()) {
- default:
- return 0;
- ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND);
- case AMDGPU::BRANCH:
- I->eraseFromParent();
- break;
- }
- I = MBB.end();
-
- if (I == MBB.begin()) {
- return 1;
- }
- --I;
- switch (I->getOpcode()) {
- // FIXME: only one case??
- default:
- return 1;
- ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND);
- I->eraseFromParent();
- break;
- }
- return 2;
-}
-
-MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) {
- MachineBasicBlock::iterator tmp = MBB->end();
- if (!MBB->size()) {
- return MBB->end();
- }
- while (--tmp) {
- if (tmp->getOpcode() == AMDGPU::ENDLOOP
- || tmp->getOpcode() == AMDGPU::ENDIF
- || tmp->getOpcode() == AMDGPU::ELSE) {
- if (tmp == MBB->begin()) {
- return tmp;
- } else {
- continue;
- }
- } else {
- return ++tmp;
- }
- }
- return MBB->end();
-}
-
-void
-AMDILInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill,
- int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- unsigned int Opc = 0;
- // MachineInstr *curMI = MI;
- MachineFunction &MF = *(MBB.getParent());
- MachineFrameInfo &MFI = *MF.getFrameInfo();
-
- DebugLoc DL;
- switch (RC->getID()) {
- case AMDGPU::GPRF32RegClassID:
- Opc = AMDGPU::PRIVATESTORE_f32;
- break;
- case AMDGPU::GPRI32RegClassID:
- Opc = AMDGPU::PRIVATESTORE_i32;
- break;
- }
- if (MI != MBB.end()) DL = MI->getDebugLoc();
- MachineMemOperand *MMO =
- new MachineMemOperand(
- MachinePointerInfo::getFixedStack(FrameIndex),
- MachineMemOperand::MOLoad,
- MFI.getObjectSize(FrameIndex),
- MFI.getObjectAlignment(FrameIndex));
- if (MI != MBB.end()) {
- DL = MI->getDebugLoc();
- }
- BuildMI(MBB, MI, DL, get(Opc))
- .addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FrameIndex)
- .addMemOperand(MMO)
- .addImm(0);
-}
-
-void
-AMDILInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- unsigned int Opc = 0;
- MachineFunction &MF = *(MBB.getParent());
- MachineFrameInfo &MFI = *MF.getFrameInfo();
- DebugLoc DL;
- switch (RC->getID()) {
- case AMDGPU::GPRF32RegClassID:
- Opc = AMDGPU::PRIVATELOAD_f32;
- break;
- case AMDGPU::GPRI32RegClassID:
- Opc = AMDGPU::PRIVATELOAD_i32;
- break;
- }
-
- MachineMemOperand *MMO =
- new MachineMemOperand(
- MachinePointerInfo::getFixedStack(FrameIndex),
- MachineMemOperand::MOLoad,
- MFI.getObjectSize(FrameIndex),
- MFI.getObjectAlignment(FrameIndex));
- if (MI != MBB.end()) {
- DL = MI->getDebugLoc();
- }
- BuildMI(MBB, MI, DL, get(Opc))
- .addReg(DestReg, RegState::Define)
- .addFrameIndex(FrameIndex)
- .addMemOperand(MMO)
- .addImm(0);
-}
-MachineInstr *
-AMDILInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const {
-// TODO: Implement this function
- return 0;
-}
-MachineInstr*
-AMDILInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr *LoadMI) const {
- // TODO: Implement this function
- return 0;
-}
-bool
-AMDILInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const
-{
- // TODO: Implement this function
- return false;
-}
-bool
-AMDILInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
- unsigned Reg, bool UnfoldLoad,
- bool UnfoldStore,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
- // TODO: Implement this function
- return false;
-}
-
-bool
-AMDILInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
- SmallVectorImpl<SDNode*> &NewNodes) const {
- // TODO: Implement this function
- return false;
-}
-
-unsigned
-AMDILInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
- bool UnfoldLoad, bool UnfoldStore,
- unsigned *LoadRegIndex) const {
- // TODO: Implement this function
- return 0;
-}
-
-bool AMDILInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
- int64_t Offset1, int64_t Offset2,
- unsigned NumLoads) const {
- assert(Offset2 > Offset1
- && "Second offset should be larger than first offset!");
- // If we have less than 16 loads in a row, and the offsets are within 16,
- // then schedule together.
- // TODO: Make the loads schedule near if it fits in a cacheline
- return (NumLoads < 16 && (Offset2 - Offset1) < 16);
-}
-
-bool
-AMDILInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
- const {
- // TODO: Implement this function
- return true;
-}
-void AMDILInstrInfo::insertNoop(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const {
- // TODO: Implement this function
-}
-
-bool AMDILInstrInfo::isPredicated(const MachineInstr *MI) const {
- // TODO: Implement this function
- return false;
-}
-bool
-AMDILInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2)
- const {
- // TODO: Implement this function
- return false;
-}
-
-bool AMDILInstrInfo::DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const {
- // TODO: Implement this function
- return false;
-}
-
-bool AMDILInstrInfo::isPredicable(MachineInstr *MI) const {
- // TODO: Implement this function
- return MI->getDesc().isPredicable();
-}
-
-bool
-AMDILInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
- // TODO: Implement this function
- return true;
-}
-
-bool AMDILInstrInfo::isLoadInst(MachineInstr *MI) const {
- if (strstr(getName(MI->getOpcode()), "LOADCONST")) {
- return false;
- }
- return strstr(getName(MI->getOpcode()), "LOAD");
-}
-
-bool AMDILInstrInfo::isSWSExtLoadInst(MachineInstr *MI) const
-{
- return false;
-}
-
-bool AMDILInstrInfo::isExtLoadInst(MachineInstr *MI) const {
- return strstr(getName(MI->getOpcode()), "EXTLOAD");
-}
-
-bool AMDILInstrInfo::isSExtLoadInst(MachineInstr *MI) const {
- return strstr(getName(MI->getOpcode()), "SEXTLOAD");
-}
-
-bool AMDILInstrInfo::isAExtLoadInst(MachineInstr *MI) const {
- return strstr(getName(MI->getOpcode()), "AEXTLOAD");
-}
-
-bool AMDILInstrInfo::isZExtLoadInst(MachineInstr *MI) const {
- return strstr(getName(MI->getOpcode()), "ZEXTLOAD");
-}
-
-bool AMDILInstrInfo::isStoreInst(MachineInstr *MI) const {
- return strstr(getName(MI->getOpcode()), "STORE");
-}
-
-bool AMDILInstrInfo::isTruncStoreInst(MachineInstr *MI) const {
- return strstr(getName(MI->getOpcode()), "TRUNCSTORE");
-}
-
-bool AMDILInstrInfo::isAtomicInst(MachineInstr *MI) const {
- return strstr(getName(MI->getOpcode()), "ATOM");
-}
-
-bool AMDILInstrInfo::isVolatileInst(MachineInstr *MI) const {
- if (!MI->memoperands_empty()) {
- for (MachineInstr::mmo_iterator mob = MI->memoperands_begin(),
- moe = MI->memoperands_end(); mob != moe; ++mob) {
- // If there is a volatile mem operand, this is a volatile instruction.
- if ((*mob)->isVolatile()) {
- return true;
- }
- }
- }
- return false;
-}
-bool AMDILInstrInfo::isGlobalInst(llvm::MachineInstr *MI) const
-{
- return strstr(getName(MI->getOpcode()), "GLOBAL");
-}
-bool AMDILInstrInfo::isPrivateInst(llvm::MachineInstr *MI) const
-{
- return strstr(getName(MI->getOpcode()), "PRIVATE");
-}
-bool AMDILInstrInfo::isConstantInst(llvm::MachineInstr *MI) const
-{
- return strstr(getName(MI->getOpcode()), "CONSTANT")
- || strstr(getName(MI->getOpcode()), "CPOOL");
-}
-bool AMDILInstrInfo::isRegionInst(llvm::MachineInstr *MI) const
-{
- return strstr(getName(MI->getOpcode()), "REGION");
-}
-bool AMDILInstrInfo::isLocalInst(llvm::MachineInstr *MI) const
-{
- return strstr(getName(MI->getOpcode()), "LOCAL");
-}
-bool AMDILInstrInfo::isImageInst(llvm::MachineInstr *MI) const
-{
- return strstr(getName(MI->getOpcode()), "IMAGE");
-}
-bool AMDILInstrInfo::isAppendInst(llvm::MachineInstr *MI) const
-{
- return strstr(getName(MI->getOpcode()), "APPEND");
-}
-bool AMDILInstrInfo::isRegionAtomic(llvm::MachineInstr *MI) const
-{
- return strstr(getName(MI->getOpcode()), "ATOM_R");
-}
-bool AMDILInstrInfo::isLocalAtomic(llvm::MachineInstr *MI) const
-{
- return strstr(getName(MI->getOpcode()), "ATOM_L");
-}
-bool AMDILInstrInfo::isGlobalAtomic(llvm::MachineInstr *MI) const
-{
- return strstr(getName(MI->getOpcode()), "ATOM_G")
- || isArenaAtomic(MI);
-}
-bool AMDILInstrInfo::isArenaAtomic(llvm::MachineInstr *MI) const
-{
- return strstr(getName(MI->getOpcode()), "ATOM_A");
-}
diff --git a/src/gallium/drivers/radeon/AMDILInstrInfo.h b/src/gallium/drivers/radeon/AMDILInstrInfo.h
deleted file mode 100644
index 551c5465349..00000000000
--- a/src/gallium/drivers/radeon/AMDILInstrInfo.h
+++ /dev/null
@@ -1,161 +0,0 @@
-//===- AMDILInstrInfo.h - AMDIL Instruction Information ---------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-// This file contains the AMDIL implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef AMDILINSTRUCTIONINFO_H_
-#define AMDILINSTRUCTIONINFO_H_
-
-#include "AMDGPURegisterInfo.h"
-#include "llvm/Target/TargetInstrInfo.h"
-
-#define GET_INSTRINFO_HEADER
-#include "AMDGPUGenInstrInfo.inc"
-
-namespace llvm {
- // AMDIL - This namespace holds all of the target specific flags that
- // instruction info tracks.
- //
- //class AMDILTargetMachine;
-class AMDILInstrInfo : public AMDGPUGenInstrInfo {
-private:
- const AMDGPURegisterInfo RI;
- TargetMachine &TM;
- bool getNextBranchInstr(MachineBasicBlock::iterator &iter,
- MachineBasicBlock &MBB) const;
- unsigned int getBranchInstr(const MachineOperand &op) const;
-public:
- explicit AMDILInstrInfo(TargetMachine &tm);
-
- // getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
- // such, whenever a client has an instance of instruction info, it should
- // always be able to get register info as well (through this method).
- const AMDGPURegisterInfo &getRegisterInfo() const;
-
- bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
- unsigned &DstReg, unsigned &SubIdx) const;
-
- unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
- unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
- int &FrameIndex) const;
- bool hasLoadFromStackSlot(const MachineInstr *MI,
- const MachineMemOperand *&MMO,
- int &FrameIndex) const;
- unsigned isStoreFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
- unsigned isStoreFromStackSlotPostFE(const MachineInstr *MI,
- int &FrameIndex) const;
- bool hasStoreFromStackSlot(const MachineInstr *MI,
- const MachineMemOperand *&MMO,
- int &FrameIndex) const;
-
- MachineInstr *
- convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const;
-
- bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const;
-
- unsigned RemoveBranch(MachineBasicBlock &MBB) const;
-
- unsigned
- InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
-
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const = 0;
-
- void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
- void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
-protected:
- MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
- MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr *LoadMI) const;
-public:
- bool canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const;
- bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
- unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
- SmallVectorImpl<MachineInstr *> &NewMIs) const;
- bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
- SmallVectorImpl<SDNode *> &NewNodes) const;
- unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
- bool UnfoldLoad, bool UnfoldStore,
- unsigned *LoadRegIndex = 0) const;
- bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
- int64_t Offset1, int64_t Offset2,
- unsigned NumLoads) const;
-
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
- void insertNoop(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const;
- bool isPredicated(const MachineInstr *MI) const;
- bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const;
- bool DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const;
- bool isPredicable(MachineInstr *MI) const;
- bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const;
-
- // Helper functions that check the opcode for status information
- bool isLoadInst(llvm::MachineInstr *MI) const;
- bool isExtLoadInst(llvm::MachineInstr *MI) const;
- bool isSWSExtLoadInst(llvm::MachineInstr *MI) const;
- bool isSExtLoadInst(llvm::MachineInstr *MI) const;
- bool isZExtLoadInst(llvm::MachineInstr *MI) const;
- bool isAExtLoadInst(llvm::MachineInstr *MI) const;
- bool isStoreInst(llvm::MachineInstr *MI) const;
- bool isTruncStoreInst(llvm::MachineInstr *MI) const;
- bool isAtomicInst(llvm::MachineInstr *MI) const;
- bool isVolatileInst(llvm::MachineInstr *MI) const;
- bool isGlobalInst(llvm::MachineInstr *MI) const;
- bool isPrivateInst(llvm::MachineInstr *MI) const;
- bool isConstantInst(llvm::MachineInstr *MI) const;
- bool isRegionInst(llvm::MachineInstr *MI) const;
- bool isLocalInst(llvm::MachineInstr *MI) const;
- bool isImageInst(llvm::MachineInstr *MI) const;
- bool isAppendInst(llvm::MachineInstr *MI) const;
- bool isRegionAtomic(llvm::MachineInstr *MI) const;
- bool isLocalAtomic(llvm::MachineInstr *MI) const;
- bool isGlobalAtomic(llvm::MachineInstr *MI) const;
- bool isArenaAtomic(llvm::MachineInstr *MI) const;
-
- virtual MachineInstr * getMovImmInstr(MachineFunction *MF, unsigned DstReg,
- int64_t Imm) const = 0;
-
- virtual unsigned getIEQOpcode() const = 0;
-
- virtual bool isMov(unsigned Opcode) const = 0;
-};
-
-}
-
-#endif // AMDILINSTRINFO_H_
diff --git a/src/gallium/drivers/radeon/AMDILPeepholeOptimizer.cpp b/src/gallium/drivers/radeon/AMDILPeepholeOptimizer.cpp
index 5b5932ac8c2..2f51d6f2989 100644
--- a/src/gallium/drivers/radeon/AMDILPeepholeOptimizer.cpp
+++ b/src/gallium/drivers/radeon/AMDILPeepholeOptimizer.cpp
@@ -9,7 +9,7 @@
#include "AMDILAlgorithms.tpp"
#include "AMDILDevices.h"
-#include "AMDILInstrInfo.h"
+#include "AMDGPUInstrInfo.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
diff --git a/src/gallium/drivers/radeon/Makefile.sources b/src/gallium/drivers/radeon/Makefile.sources
index 1f59a251213..27e1e1b72f0 100644
--- a/src/gallium/drivers/radeon/Makefile.sources
+++ b/src/gallium/drivers/radeon/Makefile.sources
@@ -22,7 +22,6 @@ CPP_SOURCES := \
AMDILDeviceInfo.cpp \
AMDILEvergreenDevice.cpp \
AMDILFrameLowering.cpp \
- AMDILInstrInfo.cpp \
AMDILIntrinsicInfo.cpp \
AMDILISelDAGToDAG.cpp \
AMDILISelLowering.cpp \
diff --git a/src/gallium/drivers/radeon/R600CodeEmitter.cpp b/src/gallium/drivers/radeon/R600CodeEmitter.cpp
index c6b64c3db3a..f7b807c539a 100644
--- a/src/gallium/drivers/radeon/R600CodeEmitter.cpp
+++ b/src/gallium/drivers/radeon/R600CodeEmitter.cpp
@@ -19,7 +19,7 @@
#include "AMDGPU.h"
#include "AMDGPUCodeEmitter.h"
#include "AMDGPUUtil.h"
-#include "AMDILInstrInfo.h"
+#include "AMDGPUInstrInfo.h"
#include "AMDILUtilityFunctions.h"
#include "R600InstrInfo.h"
#include "R600RegisterInfo.h"
diff --git a/src/gallium/drivers/radeon/R600InstrInfo.h b/src/gallium/drivers/radeon/R600InstrInfo.h
index b9cbcc81a5e..f6afee3a979 100644
--- a/src/gallium/drivers/radeon/R600InstrInfo.h
+++ b/src/gallium/drivers/radeon/R600InstrInfo.h
@@ -15,7 +15,7 @@
#define R600INSTRUCTIONINFO_H_
#include "AMDIL.h"
-#include "AMDILInstrInfo.h"
+#include "AMDGPUInstrInfo.h"
#include "R600RegisterInfo.h"
#include <map>