blob: 4742283f68852b7ae0824ff3540c90a56f603df1 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
|
//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of the TargetInstrInfo class that is
// common to all AMD GPUs.
//
//===----------------------------------------------------------------------===//
#include "AMDGPUInstrInfo.h"
#include "AMDGPURegisterInfo.h"
#include "AMDGPUTargetMachine.h"
#include "AMDIL.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
using namespace llvm;
AMDGPUInstrInfo::AMDGPUInstrInfo(AMDGPUTargetMachine &tm)
: AMDILInstrInfo(tm), TM(tm)
{
const AMDILDevice * dev = TM.getSubtarget<AMDILSubtarget>().device();
for (unsigned i = 0; i < AMDIL::INSTRUCTION_LIST_END; i++) {
const MCInstrDesc & instDesc = get(i);
uint32_t instGen = (instDesc.TSFlags >> 40) & 0x7;
uint32_t inst = (instDesc.TSFlags >> 48) & 0xffff;
if (inst == 0) {
continue;
}
switch (instGen) {
case AMDGPUInstrInfo::R600_CAYMAN:
if (dev->getGeneration() > AMDILDeviceInfo::HD6XXX) {
continue;
}
break;
case AMDGPUInstrInfo::R600:
if (dev->getGeneration() != AMDILDeviceInfo::HD4XXX) {
continue;
}
break;
case AMDGPUInstrInfo::EG_CAYMAN:
if (dev->getGeneration() < AMDILDeviceInfo::HD5XXX
|| dev->getGeneration() > AMDILDeviceInfo::HD6XXX) {
continue;
}
break;
case AMDGPUInstrInfo::CAYMAN:
if (dev->getDeviceFlag() != OCL_DEVICE_CAYMAN) {
continue;
}
break;
case AMDGPUInstrInfo::SI:
if (dev->getGeneration() != AMDILDeviceInfo::HD7XXX) {
continue;
}
break;
default:
abort();
break;
}
unsigned amdilOpcode = GetRealAMDILOpcode(inst);
amdilToISA[amdilOpcode] = instDesc.Opcode;
}
}
MachineInstr * AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
DebugLoc DL) const
{
MachineInstrBuilder newInstr;
MachineRegisterInfo &MRI = MF.getRegInfo();
const AMDGPURegisterInfo & RI = getRegisterInfo();
unsigned ISAOpcode = getISAOpcode(MI.getOpcode());
/* Create the new instruction */
newInstr = BuildMI(MF, DL, TM.getInstrInfo()->get(ISAOpcode));
for (unsigned i = 0; i < MI.getNumOperands(); i++) {
MachineOperand &MO = MI.getOperand(i);
/* Convert dst regclass to one that is supported by the ISA */
if (MO.isReg() && MO.isDef()) {
if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
assert(newRegClass);
MRI.setRegClass(MO.getReg(), newRegClass);
}
}
/* Add the operand to the new instruction */
newInstr.addOperand(MO);
}
return newInstr;
}
unsigned AMDGPUInstrInfo::getISAOpcode(unsigned opcode) const
{
if (amdilToISA.count(opcode) == 0) {
return opcode;
} else {
return amdilToISA.find(opcode)->second;
}
}
bool AMDGPUInstrInfo::isRegPreload(const MachineInstr &MI) const
{
return (get(MI.getOpcode()).TSFlags >> AMDGPU_TFLAG_SHIFTS::PRELOAD_REG) & 0x1;
}
#include "AMDGPUInstrEnums.include"
|