summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/radeon/AMDILInstrInfo.cpp
diff options
context:
space:
mode:
authorTom Stellard <[email protected]>2012-01-06 17:38:37 -0500
committerTom Stellard <[email protected]>2012-04-13 10:32:06 -0400
commita75c6163e605f35b14f26930dd9227e4f337ec9e (patch)
tree0263219cbab9282896f874060bb03d445c4de891 /src/gallium/drivers/radeon/AMDILInstrInfo.cpp
parente55cf4854d594eae9ac3f6abd24f4e616eea894f (diff)
radeonsi: initial WIP SI code
This commit adds initial support for acceleration on SI chips. egltri is starting to work. The SI/R600 llvm backend is currently included in mesa but that may change in the future. The plan is to write a single gallium driver and use gallium to support X acceleration. This commit contains patches from: Tom Stellard <[email protected]> Michel Dänzer <[email protected]> Alex Deucher <[email protected]> Vadim Girlin <[email protected]> Signed-off-by: Alex Deucher <[email protected]> The following commits were squashed in: ====================================================================== radeonsi: Remove unused winsys pointer This was removed from r600g in commit: commit 96d882939d612fcc8332f107befec470ed4359de Author: Marek Olšák <[email protected]> Date: Fri Feb 17 01:49:49 2012 +0100 gallium: remove unused winsys pointers in pipe_screen and pipe_context A winsys is already a private object of a driver. ====================================================================== radeonsi: Copy color clamping CAPs from r600 Not sure if the values of these CAPS are correct for radeonsi, but the same changed were made to r600g in commit: commit bc1c8369384b5e16547c5bf9728aa78f8dfd66cc Author: Marek Olšák <[email protected]> Date: Mon Jan 23 03:11:17 2012 +0100 st/mesa: do vertex and fragment color clamping in shaders For ARB_color_buffer_float. Most hardware can't do it and st/mesa is the perfect place for a fallback. The exceptions are: - r500 (vertex clamp only) - nv50 (both) - nvc0 (both) - softpipe (both) We also have to take into account that r300 can do CLAMPED vertex colors only, while r600 can do UNCLAMPED vertex colors only. The difference can be expressed with the two new CAPs. ====================================================================== radeonsi: Remove PIPE_CAP_OUTPUT_READ This CAP was dropped in commit: commit 04e324008759282728a95a1394bac2c4c2a1a3f9 Author: Marek Olšák <[email protected]> Date: Thu Feb 23 23:44:36 2012 +0100 gallium: remove PIPE_SHADER_CAP_OUTPUT_READ r600g is the only driver which has made use of it. The reason the CAP was added was to fix some piglit tests when the GLSL pass lower_output_reads didn't exist. However, not removing output reads breaks the fallback for glClampColorARB, which assumes outputs are not readable. The fix would be non-trivial and my personal preference is to remove the CAP, considering that reading outputs is uncommon and that we can now use lower_output_reads to fix the issue that the CAP was supposed to workaround in the first place. ====================================================================== radeonsi: Add missing parameters to rws->buffer_get_tiling() call This was changed in commit: commit c0c979eebc076b95cc8d18a013ce2968fe6311ad Author: Jerome Glisse <[email protected]> Date: Mon Jan 30 17:22:13 2012 -0500 r600g: add support for common surface allocator for tiling v13 Tiled surface have all kind of alignment constraint that needs to be met. Instead of having all this code duplicated btw ddx and mesa use common code in libdrm_radeon this also ensure that both ddx and mesa compute those alignment in the same way. v2 fix evergreen v3 fix compressed texture and workaround cube texture issue by disabling 2D array mode for cubemap (need to check if r7xx and newer are also affected by the issue) v4 fix texture array v5 fix evergreen and newer, split surface values computation from mipmap tree generation so that we can get them directly from the ddx v6 final fix to evergreen tile split value v7 fix mipmap offset to avoid to use random value, use color view depth view to address different layer as hardware is doing some magic rotation depending on the layer v8 fix COLOR_VIEW on r6xx for linear array mode, use COLOR_VIEW on evergreen, align bytes per pixel to a multiple of a dword v9 fix handling of stencil on evergreen, half fix for compressed texture v10 fix evergreen compressed texture proper support for stencil tile split. Fix stencil issue when array mode was clear by the kernel, always program stencil bo. On evergreen depth buffer bo need to be big enough to hold depth buffer + stencil buffer as even with stencil disabled things get written there. v11 rebase on top of mesa, fix pitch issue with 1d surface on evergreen, old ddx overestimate those. Fix linear case when pitch*height < 64. Fix r300g. v12 Fix linear case when pitch*height < 64 for old path, adapt to libdrm API change v13 add libdrm check Signed-off-by: Jerome Glisse <[email protected]> ====================================================================== radeonsi: Remove PIPE_TRANSFER_MAP_PERMANENTLY This was removed in commit: commit 62f44f670bb0162e89fd4786af877f8da9ff607c Author: Marek Olšák <[email protected]> Date: Mon Mar 5 13:45:00 2012 +0100 Revert "gallium: add flag PIPE_TRANSFER_MAP_PERMANENTLY" This reverts commit 0950086376b1c8b7fb89eda81ed7f2f06dee58bc. It was decided to refactor the transfer API instead of adding workarounds to address the performance issues. ====================================================================== radeonsi: Handle PIPE_VIDEO_CAP_PREFERED_FORMAT. Reintroduced in commit 9d9afcb5bac2931d4b8e6d1aa571e941c5110c90. ====================================================================== radeonsi: nuke the fallback for vertex and fragment color clamping Ported from r600g commit c2b800cf38b299c1ab1c53dc0e4ea00c7acef853. ====================================================================== radeonsi: don't expose transform_feedback2 without kernel support Ported from r600g commit 15146fd1bcbb08e44a1cbb984440ee1a5de63d48. ====================================================================== radeonsi: Handle PIPE_CAP_GLSL_FEATURE_LEVEL. Ported from r600g part of commit 171be755223d99f8cc5cc1bdaf8bd7b4caa04b4f. ====================================================================== radeonsi: set minimum point size to 1.0 for non-sprite non-aa points. Ported from r600g commit f183cc9ce3ad1d043bdf8b38fd519e8f437714fc. ====================================================================== radeonsi: rework and consolidate stencilref state setting. Ported from r600g commit a2361946e782b57f0c63587841ca41c0ea707070. ====================================================================== radeonsi: cleanup setting DB_SHADER_CONTROL. Ported from r600g commit 3d061caaed13b646ff40754f8ebe73f3d4983c5b. ====================================================================== radeonsi: Get rid of register masks. Ported from r600g commits 3d061caaed13b646ff40754f8ebe73f3d4983c5b..9344ab382a1765c1a7c2560e771485edf4954fe2. ====================================================================== radeonsi: get rid of r600_context_reg. Ported from r600g commits 9344ab382a1765c1a7c2560e771485edf4954fe2..bed20f02a771f43e1c5092254705701c228cfa7f. ====================================================================== radeonsi: Fix regression from 'Get rid of register masks'. ====================================================================== radeonsi: optimize r600_resource_va. Ported from r600g commit 669d8766ff3403938794eb80d7769347b6e52174. ====================================================================== radeonsi: remove u8,u16,u32,u64 types. Ported from r600g commit 78293b99b23268e6698f1267aaf40647c17d95a5. ====================================================================== radeonsi: merge r600_context with r600_pipe_context. Ported from r600g commit e4340c1908a6a3b09e1a15d5195f6da7d00494d0. ====================================================================== radeonsi: Miscellaneous context cleanups. Ported from r600g commits e4340c1908a6a3b09e1a15d5195f6da7d00494d0..621e0db71c5ddcb379171064a4f720c9cf01e888. ====================================================================== radeonsi: add a new simple API for state emission. Ported from r600g commits 621e0db71c5ddcb379171064a4f720c9cf01e888..f661405637bba32c2cfbeecf6e2e56e414e9521e. ====================================================================== radeonsi: Also remove sbu_flags member of struct r600_reg. Requires using sid.h instead of r600d.h for the new CP_COHER_CNTL definitions, so some code needs to be disabled for now. ====================================================================== radeonsi: Miscellaneous simplifications. Ported from r600g commits 38bf2763482b4f1b6d95cd51aecec75601d8b90f and b0337b679ad4c2feae59215104cfa60b58a619d5. ====================================================================== radeonsi: Handle PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION. Ported from commit 8b4f7b0672d663273310fffa9490ad996f5b914a. ====================================================================== radeonsi: Use a fake reloc to sleep for fences. Ported from r600g commit 8cd03b933cf868ff867e2db4a0937005a02fd0e4. ====================================================================== radeonsi: adapt to get_query_result interface change. Ported from r600g commit 4445e170bee23a3607ece0e010adef7058ac6a11.
Diffstat (limited to 'src/gallium/drivers/radeon/AMDILInstrInfo.cpp')
-rw-r--r--src/gallium/drivers/radeon/AMDILInstrInfo.cpp709
1 files changed, 709 insertions, 0 deletions
diff --git a/src/gallium/drivers/radeon/AMDILInstrInfo.cpp b/src/gallium/drivers/radeon/AMDILInstrInfo.cpp
new file mode 100644
index 00000000000..fbc3e45b357
--- /dev/null
+++ b/src/gallium/drivers/radeon/AMDILInstrInfo.cpp
@@ -0,0 +1,709 @@
+//===- AMDILInstrInfo.cpp - AMDIL Instruction Information -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This file contains the AMDIL implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+#include "AMDILInstrInfo.h"
+#include "AMDILUtilityFunctions.h"
+
+#define GET_INSTRINFO_CTOR
+#include "AMDILGenInstrInfo.inc"
+
+#include "AMDILInstrInfo.h"
+#include "AMDILUtilityFunctions.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/Instructions.h"
+
+using namespace llvm;
+
+AMDILInstrInfo::AMDILInstrInfo(AMDILTargetMachine &tm)
+ : AMDILGenInstrInfo(AMDIL::ADJCALLSTACKDOWN, AMDIL::ADJCALLSTACKUP),
+ RI(tm, *this),
+ TM(tm) {
+}
+
+const AMDILRegisterInfo &AMDILInstrInfo::getRegisterInfo() const {
+ return RI;
+}
+
+/// Return true if the instruction is a register to register move and leave the
+/// source and dest operands in the passed parameters.
+bool AMDILInstrInfo::isMoveInstr(const MachineInstr &MI, unsigned int &SrcReg,
+ unsigned int &DstReg, unsigned int &SrcSubIdx,
+ unsigned int &DstSubIdx) const {
+ // FIXME: we should look for:
+ // add with 0
+ //assert(0 && "is Move Instruction has not been implemented yet!");
+ //return true;
+ if (!isMove(MI.getOpcode())) {
+ return false;
+ }
+ if (!MI.getOperand(0).isReg() || !MI.getOperand(1).isReg()) {
+ return false;
+ }
+ SrcReg = MI.getOperand(1).getReg();
+ DstReg = MI.getOperand(0).getReg();
+ DstSubIdx = 0;
+ SrcSubIdx = 0;
+ return true;
+}
+
+bool AMDILInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
+ unsigned &SrcReg, unsigned &DstReg,
+ unsigned &SubIdx) const {
+// TODO: Implement this function
+ return false;
+}
+
+unsigned AMDILInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+// TODO: Implement this function
+ return 0;
+}
+
+unsigned AMDILInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
+ int &FrameIndex) const {
+// TODO: Implement this function
+ return 0;
+}
+
+bool AMDILInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
+ const MachineMemOperand *&MMO,
+ int &FrameIndex) const {
+// TODO: Implement this function
+ return false;
+}
+unsigned AMDILInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+// TODO: Implement this function
+ return 0;
+}
+unsigned AMDILInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
+ int &FrameIndex) const {
+// TODO: Implement this function
+ return 0;
+}
+bool AMDILInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
+ const MachineMemOperand *&MMO,
+ int &FrameIndex) const {
+// TODO: Implement this function
+ return false;
+}
+#if 0
+void
+AMDILInstrInfo::reMaterialize(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, unsigned SubIdx,
+ const MachineInstr *Orig,
+ const TargetRegisterInfo *TRI) const {
+// TODO: Implement this function
+}
+
+MachineInst AMDILInstrInfo::duplicate(MachineInstr *Orig,
+ MachineFunction &MF) const {
+// TODO: Implement this function
+ return NULL;
+}
+#endif
+MachineInstr *
+AMDILInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
+ MachineBasicBlock::iterator &MBBI,
+ LiveVariables *LV) const {
+// TODO: Implement this function
+ return NULL;
+}
+#if 0
+MachineInst AMDILInstrInfo::commuteInstruction(MachineInstr *MI,
+ bool NewMI = false) const {
+// TODO: Implement this function
+ return NULL;
+}
+bool
+AMDILInstrInfo::findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
+ unsigned &SrcOpIdx2) const
+{
+// TODO: Implement this function
+}
+bool
+AMDILInstrInfo::produceSameValue(const MachineInstr *MI0,
+ const MachineInstr *MI1) const
+{
+// TODO: Implement this function
+}
+#endif
+bool AMDILInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
+ MachineBasicBlock &MBB) const {
+ while (iter != MBB.end()) {
+ switch (iter->getOpcode()) {
+ default:
+ break;
+ ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND);
+ case AMDIL::BRANCH:
+ return true;
+ };
+ ++iter;
+ }
+ return false;
+}
+
+bool AMDILInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ bool retVal = true;
+ return retVal;
+ MachineBasicBlock::iterator iter = MBB.begin();
+ if (!getNextBranchInstr(iter, MBB)) {
+ retVal = false;
+ } else {
+ MachineInstr *firstBranch = iter;
+ if (!getNextBranchInstr(++iter, MBB)) {
+ if (firstBranch->getOpcode() == AMDIL::BRANCH) {
+ TBB = firstBranch->getOperand(0).getMBB();
+ firstBranch->eraseFromParent();
+ retVal = false;
+ } else {
+ TBB = firstBranch->getOperand(0).getMBB();
+ FBB = *(++MBB.succ_begin());
+ if (FBB == TBB) {
+ FBB = *(MBB.succ_begin());
+ }
+ Cond.push_back(firstBranch->getOperand(1));
+ retVal = false;
+ }
+ } else {
+ MachineInstr *secondBranch = iter;
+ if (!getNextBranchInstr(++iter, MBB)) {
+ if (secondBranch->getOpcode() == AMDIL::BRANCH) {
+ TBB = firstBranch->getOperand(0).getMBB();
+ Cond.push_back(firstBranch->getOperand(1));
+ FBB = secondBranch->getOperand(0).getMBB();
+ secondBranch->eraseFromParent();
+ retVal = false;
+ } else {
+ assert(0 && "Should not have two consecutive conditional branches");
+ }
+ } else {
+ MBB.getParent()->viewCFG();
+ assert(0 && "Should not have three branch instructions in"
+ " a single basic block");
+ retVal = false;
+ }
+ }
+ }
+ return retVal;
+}
+
+unsigned int AMDILInstrInfo::getBranchInstr(const MachineOperand &op) const {
+ const MachineInstr *MI = op.getParent();
+
+ switch (MI->getDesc().OpInfo->RegClass) {
+ default: // FIXME: fallthrough??
+ case AMDIL::GPRI8RegClassID: return AMDIL::BRANCH_COND_i8;
+ case AMDIL::GPRI16RegClassID: return AMDIL::BRANCH_COND_i16;
+ case AMDIL::GPRI32RegClassID: return AMDIL::BRANCH_COND_i32;
+ case AMDIL::GPRI64RegClassID: return AMDIL::BRANCH_COND_i64;
+ case AMDIL::GPRF32RegClassID: return AMDIL::BRANCH_COND_f32;
+ case AMDIL::GPRF64RegClassID: return AMDIL::BRANCH_COND_f64;
+ };
+}
+
+unsigned int
+AMDILInstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const
+{
+ assert(TBB && "InsertBranch must not be told to insert a fallthrough");
+ for (unsigned int x = 0; x < Cond.size(); ++x) {
+ Cond[x].getParent()->dump();
+ }
+ if (FBB == 0) {
+ if (Cond.empty()) {
+ BuildMI(&MBB, DL, get(AMDIL::BRANCH)).addMBB(TBB);
+ } else {
+ BuildMI(&MBB, DL, get(getBranchInstr(Cond[0])))
+ .addMBB(TBB).addReg(Cond[0].getReg());
+ }
+ return 1;
+ } else {
+ BuildMI(&MBB, DL, get(getBranchInstr(Cond[0])))
+ .addMBB(TBB).addReg(Cond[0].getReg());
+ BuildMI(&MBB, DL, get(AMDIL::BRANCH)).addMBB(FBB);
+ }
+ assert(0 && "Inserting two branches not supported");
+ return 0;
+}
+
+unsigned int AMDILInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator I = MBB.end();
+ if (I == MBB.begin()) {
+ return 0;
+ }
+ --I;
+ switch (I->getOpcode()) {
+ default:
+ return 0;
+ ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND);
+ case AMDIL::BRANCH:
+ I->eraseFromParent();
+ break;
+ }
+ I = MBB.end();
+
+ if (I == MBB.begin()) {
+ return 1;
+ }
+ --I;
+ switch (I->getOpcode()) {
+ // FIXME: only one case??
+ default:
+ return 1;
+ ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND);
+ I->eraseFromParent();
+ break;
+ }
+ return 2;
+}
+
+MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) {
+ MachineBasicBlock::iterator tmp = MBB->end();
+ if (!MBB->size()) {
+ return MBB->end();
+ }
+ while (--tmp) {
+ if (tmp->getOpcode() == AMDIL::ENDLOOP
+ || tmp->getOpcode() == AMDIL::ENDIF
+ || tmp->getOpcode() == AMDIL::ELSE) {
+ if (tmp == MBB->begin()) {
+ return tmp;
+ } else {
+ continue;
+ }
+ } else {
+ return ++tmp;
+ }
+ }
+ return MBB->end();
+}
+
+bool
+AMDILInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DestReg, unsigned SrcReg,
+ const TargetRegisterClass *DestRC,
+ const TargetRegisterClass *SrcRC,
+ DebugLoc DL) const {
+ // If we are adding to the end of a basic block we can safely assume that the
+ // move is caused by a PHI node since all move instructions that are non-PHI
+ // have already been inserted into the basic blocks Therefor we call the skip
+ // flow control instruction to move the iterator before the flow control
+ // instructions and put the move instruction there.
+ bool phi = (DestReg < 1025) || (SrcReg < 1025);
+ int movInst = phi ? getMoveInstFromID(DestRC->getID())
+ : getPHIMoveInstFromID(DestRC->getID());
+
+ MachineBasicBlock::iterator iTemp = (I == MBB.end()) ? skipFlowControl(&MBB)
+ : I;
+ if (DestRC != SrcRC) {
+ //int convInst;
+ size_t dSize = DestRC->getSize();
+ size_t sSize = SrcRC->getSize();
+ if (dSize > sSize) {
+ // Elements are going to get duplicated.
+ BuildMI(MBB, iTemp, DL, get(movInst), DestReg).addReg(SrcReg);
+ } else if (dSize == sSize) {
+ // Direct copy, conversions are not handled.
+ BuildMI(MBB, iTemp, DL, get(movInst), DestReg).addReg(SrcReg);
+ } else if (dSize < sSize) {
+ // Elements are going to get dropped.
+ BuildMI(MBB, iTemp, DL, get(movInst), DestReg).addReg(SrcReg);
+ }
+ } else {
+ BuildMI( MBB, iTemp, DL, get(movInst), DestReg).addReg(SrcReg);
+ }
+ return true;
+}
+void
+AMDILInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const
+{
+ BuildMI(MBB, MI, DL, get(AMDIL::MOVE_v4i32), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+#if 0
+ DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg)
+ << " to " << RI.getName(DestReg) << '\n');
+ abort();
+#endif
+}
+void
+AMDILInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned SrcReg, bool isKill,
+ int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ unsigned int Opc = 0;
+ // MachineInstr *curMI = MI;
+ MachineFunction &MF = *(MBB.getParent());
+ MachineFrameInfo &MFI = *MF.getFrameInfo();
+
+ DebugLoc DL;
+ switch (RC->getID()) {
+ default:
+ Opc = AMDIL::PRIVATESTORE_v4i32;
+ break;
+ case AMDIL::GPRF32RegClassID:
+ Opc = AMDIL::PRIVATESTORE_f32;
+ break;
+ case AMDIL::GPRF64RegClassID:
+ Opc = AMDIL::PRIVATESTORE_f64;
+ break;
+ case AMDIL::GPRI16RegClassID:
+ Opc = AMDIL::PRIVATESTORE_i16;
+ break;
+ case AMDIL::GPRI32RegClassID:
+ Opc = AMDIL::PRIVATESTORE_i32;
+ break;
+ case AMDIL::GPRI8RegClassID:
+ Opc = AMDIL::PRIVATESTORE_i8;
+ break;
+ case AMDIL::GPRI64RegClassID:
+ Opc = AMDIL::PRIVATESTORE_i64;
+ break;
+ case AMDIL::GPRV2F32RegClassID:
+ Opc = AMDIL::PRIVATESTORE_v2f32;
+ break;
+ case AMDIL::GPRV2F64RegClassID:
+ Opc = AMDIL::PRIVATESTORE_v2f64;
+ break;
+ case AMDIL::GPRV2I16RegClassID:
+ Opc = AMDIL::PRIVATESTORE_v2i16;
+ break;
+ case AMDIL::GPRV2I32RegClassID:
+ Opc = AMDIL::PRIVATESTORE_v2i32;
+ break;
+ case AMDIL::GPRV2I8RegClassID:
+ Opc = AMDIL::PRIVATESTORE_v2i8;
+ break;
+ case AMDIL::GPRV2I64RegClassID:
+ Opc = AMDIL::PRIVATESTORE_v2i64;
+ break;
+ case AMDIL::GPRV4F32RegClassID:
+ Opc = AMDIL::PRIVATESTORE_v4f32;
+ break;
+ case AMDIL::GPRV4I16RegClassID:
+ Opc = AMDIL::PRIVATESTORE_v4i16;
+ break;
+ case AMDIL::GPRV4I32RegClassID:
+ Opc = AMDIL::PRIVATESTORE_v4i32;
+ break;
+ case AMDIL::GPRV4I8RegClassID:
+ Opc = AMDIL::PRIVATESTORE_v4i8;
+ break;
+ }
+ if (MI != MBB.end()) DL = MI->getDebugLoc();
+ MachineMemOperand *MMO =
+ new MachineMemOperand(
+ MachinePointerInfo::getFixedStack(FrameIndex),
+ MachineMemOperand::MOLoad,
+ MFI.getObjectSize(FrameIndex),
+ MFI.getObjectAlignment(FrameIndex));
+ if (MI != MBB.end()) {
+ DL = MI->getDebugLoc();
+ }
+ MachineInstr *nMI = BuildMI(MBB, MI, DL, get(Opc))
+ .addReg(SrcReg, getKillRegState(isKill))
+ .addFrameIndex(FrameIndex)
+ .addMemOperand(MMO)
+ .addImm(0);
+ AMDILAS::InstrResEnc curRes;
+ curRes.bits.ResourceID
+ = TM.getSubtargetImpl()->device()->getResourceID(AMDILDevice::SCRATCH_ID);
+ setAsmPrinterFlags(nMI, curRes);
+}
+
+void
+AMDILInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ unsigned int Opc = 0;
+ MachineFunction &MF = *(MBB.getParent());
+ MachineFrameInfo &MFI = *MF.getFrameInfo();
+ DebugLoc DL;
+ switch (RC->getID()) {
+ default:
+ Opc = AMDIL::PRIVATELOAD_v4i32;
+ break;
+ case AMDIL::GPRF32RegClassID:
+ Opc = AMDIL::PRIVATELOAD_f32;
+ break;
+ case AMDIL::GPRF64RegClassID:
+ Opc = AMDIL::PRIVATELOAD_f64;
+ break;
+ case AMDIL::GPRI16RegClassID:
+ Opc = AMDIL::PRIVATELOAD_i16;
+ break;
+ case AMDIL::GPRI32RegClassID:
+ Opc = AMDIL::PRIVATELOAD_i32;
+ break;
+ case AMDIL::GPRI8RegClassID:
+ Opc = AMDIL::PRIVATELOAD_i8;
+ break;
+ case AMDIL::GPRI64RegClassID:
+ Opc = AMDIL::PRIVATELOAD_i64;
+ break;
+ case AMDIL::GPRV2F32RegClassID:
+ Opc = AMDIL::PRIVATELOAD_v2f32;
+ break;
+ case AMDIL::GPRV2F64RegClassID:
+ Opc = AMDIL::PRIVATELOAD_v2f64;
+ break;
+ case AMDIL::GPRV2I16RegClassID:
+ Opc = AMDIL::PRIVATELOAD_v2i16;
+ break;
+ case AMDIL::GPRV2I32RegClassID:
+ Opc = AMDIL::PRIVATELOAD_v2i32;
+ break;
+ case AMDIL::GPRV2I8RegClassID:
+ Opc = AMDIL::PRIVATELOAD_v2i8;
+ break;
+ case AMDIL::GPRV2I64RegClassID:
+ Opc = AMDIL::PRIVATELOAD_v2i64;
+ break;
+ case AMDIL::GPRV4F32RegClassID:
+ Opc = AMDIL::PRIVATELOAD_v4f32;
+ break;
+ case AMDIL::GPRV4I16RegClassID:
+ Opc = AMDIL::PRIVATELOAD_v4i16;
+ break;
+ case AMDIL::GPRV4I32RegClassID:
+ Opc = AMDIL::PRIVATELOAD_v4i32;
+ break;
+ case AMDIL::GPRV4I8RegClassID:
+ Opc = AMDIL::PRIVATELOAD_v4i8;
+ break;
+ }
+
+ MachineMemOperand *MMO =
+ new MachineMemOperand(
+ MachinePointerInfo::getFixedStack(FrameIndex),
+ MachineMemOperand::MOLoad,
+ MFI.getObjectSize(FrameIndex),
+ MFI.getObjectAlignment(FrameIndex));
+ if (MI != MBB.end()) {
+ DL = MI->getDebugLoc();
+ }
+ MachineInstr* nMI = BuildMI(MBB, MI, DL, get(Opc))
+ .addReg(DestReg, RegState::Define)
+ .addFrameIndex(FrameIndex)
+ .addMemOperand(MMO)
+ .addImm(0);
+ AMDILAS::InstrResEnc curRes;
+ curRes.bits.ResourceID
+ = TM.getSubtargetImpl()->device()->getResourceID(AMDILDevice::SCRATCH_ID);
+ setAsmPrinterFlags(nMI, curRes);
+
+}
+MachineInstr *
+AMDILInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const {
+// TODO: Implement this function
+ return 0;
+}
+MachineInstr*
+AMDILInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr *LoadMI) const {
+ // TODO: Implement this function
+ return 0;
+}
+bool
+AMDILInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const
+{
+ // TODO: Implement this function
+ return false;
+}
+bool
+AMDILInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
+ unsigned Reg, bool UnfoldLoad,
+ bool UnfoldStore,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const {
+ // TODO: Implement this function
+ return false;
+}
+
+bool
+AMDILInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
+ SmallVectorImpl<SDNode*> &NewNodes) const {
+ // TODO: Implement this function
+ return false;
+}
+
+unsigned
+AMDILInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
+ bool UnfoldLoad, bool UnfoldStore,
+ unsigned *LoadRegIndex) const {
+ // TODO: Implement this function
+ return 0;
+}
+
+bool
+AMDILInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
+ int64_t &Offset1,
+ int64_t &Offset2) const {
+ return false;
+ if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) {
+ return false;
+ }
+ const MachineSDNode *mload1 = dyn_cast<MachineSDNode>(Load1);
+ const MachineSDNode *mload2 = dyn_cast<MachineSDNode>(Load2);
+ if (!mload1 || !mload2) {
+ return false;
+ }
+ if (mload1->memoperands_empty() ||
+ mload2->memoperands_empty()) {
+ return false;
+ }
+ MachineMemOperand *memOp1 = (*mload1->memoperands_begin());
+ MachineMemOperand *memOp2 = (*mload2->memoperands_begin());
+ const Value *mv1 = memOp1->getValue();
+ const Value *mv2 = memOp2->getValue();
+ if (!memOp1->isLoad() || !memOp2->isLoad()) {
+ return false;
+ }
+ if (getBasePointerValue(mv1) == getBasePointerValue(mv2)) {
+ if (isa<GetElementPtrInst>(mv1) && isa<GetElementPtrInst>(mv2)) {
+ const GetElementPtrInst *gep1 = dyn_cast<GetElementPtrInst>(mv1);
+ const GetElementPtrInst *gep2 = dyn_cast<GetElementPtrInst>(mv2);
+ if (!gep1 || !gep2) {
+ return false;
+ }
+ if (gep1->getNumOperands() != gep2->getNumOperands()) {
+ return false;
+ }
+ for (unsigned i = 0, e = gep1->getNumOperands() - 1; i < e; ++i) {
+ const Value *op1 = gep1->getOperand(i);
+ const Value *op2 = gep2->getOperand(i);
+ if (op1 != op2) {
+ // If any value except the last one is different, return false.
+ return false;
+ }
+ }
+ unsigned size = gep1->getNumOperands()-1;
+ if (!isa<ConstantInt>(gep1->getOperand(size))
+ || !isa<ConstantInt>(gep2->getOperand(size))) {
+ return false;
+ }
+ Offset1 = dyn_cast<ConstantInt>(gep1->getOperand(size))->getSExtValue();
+ Offset2 = dyn_cast<ConstantInt>(gep2->getOperand(size))->getSExtValue();
+ return true;
+ } else if (isa<Argument>(mv1) && isa<Argument>(mv2)) {
+ return false;
+ } else if (isa<GlobalValue>(mv1) && isa<GlobalValue>(mv2)) {
+ return false;
+ }
+ }
+ return false;
+}
+
+bool AMDILInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
+ int64_t Offset1, int64_t Offset2,
+ unsigned NumLoads) const {
+ assert(Offset2 > Offset1
+ && "Second offset should be larger than first offset!");
+ // If we have less than 16 loads in a row, and the offsets are within 16,
+ // then schedule together.
+ // TODO: Make the loads schedule near if it fits in a cacheline
+ return (NumLoads < 16 && (Offset2 - Offset1) < 16);
+}
+
+bool
+AMDILInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
+ const {
+ // TODO: Implement this function
+ return true;
+}
+void AMDILInstrInfo::insertNoop(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const {
+ // TODO: Implement this function
+}
+
+bool AMDILInstrInfo::isPredicated(const MachineInstr *MI) const {
+ // TODO: Implement this function
+ return false;
+}
+#if 0
+bool AMDILInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
+ // TODO: Implement this function
+}
+
+bool AMDILInstrInfo::PredicateInstruction(MachineInstr *MI,
+ const SmallVectorImpl<MachineOperand> &Pred) const {
+ // TODO: Implement this function
+}
+#endif
+bool
+AMDILInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
+ const SmallVectorImpl<MachineOperand> &Pred2)
+ const {
+ // TODO: Implement this function
+ return false;
+}
+
+bool AMDILInstrInfo::DefinesPredicate(MachineInstr *MI,
+ std::vector<MachineOperand> &Pred) const {
+ // TODO: Implement this function
+ return false;
+}
+
+bool AMDILInstrInfo::isPredicable(MachineInstr *MI) const {
+ // TODO: Implement this function
+ return MI->getDesc().isPredicable();
+}
+
+bool
+AMDILInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
+ // TODO: Implement this function
+ return true;
+}
+
+unsigned AMDILInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
+ // TODO: Implement this function
+ return 0;
+}
+
+#if 0
+unsigned
+AMDILInstrInfo::GetFunctionSizeInBytes(const MachineFunction &MF) const {
+ // TODO: Implement this function
+ return 0;
+}
+
+unsigned AMDILInstrInfo::getInlineAsmLength(const char *Str,
+ const MCAsmInfo &MAI) const {
+ // TODO: Implement this function
+ return 0;
+}
+#endif