summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/radeon/AMDILIOExpansion.cpp
diff options
context:
space:
mode:
authorTom Stellard <[email protected]>2012-01-06 17:38:37 -0500
committerTom Stellard <[email protected]>2012-04-13 10:32:06 -0400
commita75c6163e605f35b14f26930dd9227e4f337ec9e (patch)
tree0263219cbab9282896f874060bb03d445c4de891 /src/gallium/drivers/radeon/AMDILIOExpansion.cpp
parente55cf4854d594eae9ac3f6abd24f4e616eea894f (diff)
radeonsi: initial WIP SI code
This commit adds initial support for acceleration on SI chips. egltri is starting to work. The SI/R600 llvm backend is currently included in mesa but that may change in the future. The plan is to write a single gallium driver and use gallium to support X acceleration. This commit contains patches from: Tom Stellard <[email protected]> Michel Dänzer <[email protected]> Alex Deucher <[email protected]> Vadim Girlin <[email protected]> Signed-off-by: Alex Deucher <[email protected]> The following commits were squashed in: ====================================================================== radeonsi: Remove unused winsys pointer This was removed from r600g in commit: commit 96d882939d612fcc8332f107befec470ed4359de Author: Marek Olšák <[email protected]> Date: Fri Feb 17 01:49:49 2012 +0100 gallium: remove unused winsys pointers in pipe_screen and pipe_context A winsys is already a private object of a driver. ====================================================================== radeonsi: Copy color clamping CAPs from r600 Not sure if the values of these CAPS are correct for radeonsi, but the same changed were made to r600g in commit: commit bc1c8369384b5e16547c5bf9728aa78f8dfd66cc Author: Marek Olšák <[email protected]> Date: Mon Jan 23 03:11:17 2012 +0100 st/mesa: do vertex and fragment color clamping in shaders For ARB_color_buffer_float. Most hardware can't do it and st/mesa is the perfect place for a fallback. The exceptions are: - r500 (vertex clamp only) - nv50 (both) - nvc0 (both) - softpipe (both) We also have to take into account that r300 can do CLAMPED vertex colors only, while r600 can do UNCLAMPED vertex colors only. The difference can be expressed with the two new CAPs. ====================================================================== radeonsi: Remove PIPE_CAP_OUTPUT_READ This CAP was dropped in commit: commit 04e324008759282728a95a1394bac2c4c2a1a3f9 Author: Marek Olšák <[email protected]> Date: Thu Feb 23 23:44:36 2012 +0100 gallium: remove PIPE_SHADER_CAP_OUTPUT_READ r600g is the only driver which has made use of it. The reason the CAP was added was to fix some piglit tests when the GLSL pass lower_output_reads didn't exist. However, not removing output reads breaks the fallback for glClampColorARB, which assumes outputs are not readable. The fix would be non-trivial and my personal preference is to remove the CAP, considering that reading outputs is uncommon and that we can now use lower_output_reads to fix the issue that the CAP was supposed to workaround in the first place. ====================================================================== radeonsi: Add missing parameters to rws->buffer_get_tiling() call This was changed in commit: commit c0c979eebc076b95cc8d18a013ce2968fe6311ad Author: Jerome Glisse <[email protected]> Date: Mon Jan 30 17:22:13 2012 -0500 r600g: add support for common surface allocator for tiling v13 Tiled surface have all kind of alignment constraint that needs to be met. Instead of having all this code duplicated btw ddx and mesa use common code in libdrm_radeon this also ensure that both ddx and mesa compute those alignment in the same way. v2 fix evergreen v3 fix compressed texture and workaround cube texture issue by disabling 2D array mode for cubemap (need to check if r7xx and newer are also affected by the issue) v4 fix texture array v5 fix evergreen and newer, split surface values computation from mipmap tree generation so that we can get them directly from the ddx v6 final fix to evergreen tile split value v7 fix mipmap offset to avoid to use random value, use color view depth view to address different layer as hardware is doing some magic rotation depending on the layer v8 fix COLOR_VIEW on r6xx for linear array mode, use COLOR_VIEW on evergreen, align bytes per pixel to a multiple of a dword v9 fix handling of stencil on evergreen, half fix for compressed texture v10 fix evergreen compressed texture proper support for stencil tile split. Fix stencil issue when array mode was clear by the kernel, always program stencil bo. On evergreen depth buffer bo need to be big enough to hold depth buffer + stencil buffer as even with stencil disabled things get written there. v11 rebase on top of mesa, fix pitch issue with 1d surface on evergreen, old ddx overestimate those. Fix linear case when pitch*height < 64. Fix r300g. v12 Fix linear case when pitch*height < 64 for old path, adapt to libdrm API change v13 add libdrm check Signed-off-by: Jerome Glisse <[email protected]> ====================================================================== radeonsi: Remove PIPE_TRANSFER_MAP_PERMANENTLY This was removed in commit: commit 62f44f670bb0162e89fd4786af877f8da9ff607c Author: Marek Olšák <[email protected]> Date: Mon Mar 5 13:45:00 2012 +0100 Revert "gallium: add flag PIPE_TRANSFER_MAP_PERMANENTLY" This reverts commit 0950086376b1c8b7fb89eda81ed7f2f06dee58bc. It was decided to refactor the transfer API instead of adding workarounds to address the performance issues. ====================================================================== radeonsi: Handle PIPE_VIDEO_CAP_PREFERED_FORMAT. Reintroduced in commit 9d9afcb5bac2931d4b8e6d1aa571e941c5110c90. ====================================================================== radeonsi: nuke the fallback for vertex and fragment color clamping Ported from r600g commit c2b800cf38b299c1ab1c53dc0e4ea00c7acef853. ====================================================================== radeonsi: don't expose transform_feedback2 without kernel support Ported from r600g commit 15146fd1bcbb08e44a1cbb984440ee1a5de63d48. ====================================================================== radeonsi: Handle PIPE_CAP_GLSL_FEATURE_LEVEL. Ported from r600g part of commit 171be755223d99f8cc5cc1bdaf8bd7b4caa04b4f. ====================================================================== radeonsi: set minimum point size to 1.0 for non-sprite non-aa points. Ported from r600g commit f183cc9ce3ad1d043bdf8b38fd519e8f437714fc. ====================================================================== radeonsi: rework and consolidate stencilref state setting. Ported from r600g commit a2361946e782b57f0c63587841ca41c0ea707070. ====================================================================== radeonsi: cleanup setting DB_SHADER_CONTROL. Ported from r600g commit 3d061caaed13b646ff40754f8ebe73f3d4983c5b. ====================================================================== radeonsi: Get rid of register masks. Ported from r600g commits 3d061caaed13b646ff40754f8ebe73f3d4983c5b..9344ab382a1765c1a7c2560e771485edf4954fe2. ====================================================================== radeonsi: get rid of r600_context_reg. Ported from r600g commits 9344ab382a1765c1a7c2560e771485edf4954fe2..bed20f02a771f43e1c5092254705701c228cfa7f. ====================================================================== radeonsi: Fix regression from 'Get rid of register masks'. ====================================================================== radeonsi: optimize r600_resource_va. Ported from r600g commit 669d8766ff3403938794eb80d7769347b6e52174. ====================================================================== radeonsi: remove u8,u16,u32,u64 types. Ported from r600g commit 78293b99b23268e6698f1267aaf40647c17d95a5. ====================================================================== radeonsi: merge r600_context with r600_pipe_context. Ported from r600g commit e4340c1908a6a3b09e1a15d5195f6da7d00494d0. ====================================================================== radeonsi: Miscellaneous context cleanups. Ported from r600g commits e4340c1908a6a3b09e1a15d5195f6da7d00494d0..621e0db71c5ddcb379171064a4f720c9cf01e888. ====================================================================== radeonsi: add a new simple API for state emission. Ported from r600g commits 621e0db71c5ddcb379171064a4f720c9cf01e888..f661405637bba32c2cfbeecf6e2e56e414e9521e. ====================================================================== radeonsi: Also remove sbu_flags member of struct r600_reg. Requires using sid.h instead of r600d.h for the new CP_COHER_CNTL definitions, so some code needs to be disabled for now. ====================================================================== radeonsi: Miscellaneous simplifications. Ported from r600g commits 38bf2763482b4f1b6d95cd51aecec75601d8b90f and b0337b679ad4c2feae59215104cfa60b58a619d5. ====================================================================== radeonsi: Handle PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION. Ported from commit 8b4f7b0672d663273310fffa9490ad996f5b914a. ====================================================================== radeonsi: Use a fake reloc to sleep for fences. Ported from r600g commit 8cd03b933cf868ff867e2db4a0937005a02fd0e4. ====================================================================== radeonsi: adapt to get_query_result interface change. Ported from r600g commit 4445e170bee23a3607ece0e010adef7058ac6a11.
Diffstat (limited to 'src/gallium/drivers/radeon/AMDILIOExpansion.cpp')
-rw-r--r--src/gallium/drivers/radeon/AMDILIOExpansion.cpp1160
1 files changed, 1160 insertions, 0 deletions
diff --git a/src/gallium/drivers/radeon/AMDILIOExpansion.cpp b/src/gallium/drivers/radeon/AMDILIOExpansion.cpp
new file mode 100644
index 00000000000..68d8eef344d
--- /dev/null
+++ b/src/gallium/drivers/radeon/AMDILIOExpansion.cpp
@@ -0,0 +1,1160 @@
+//===----------- AMDILIOExpansion.cpp - IO Expansion Pass -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+// The AMDIL IO Expansion class expands pseudo IO instructions into a sequence
+// of instructions that produces the correct results. These instructions are
+// not expanded earlier in the pass because any pass before this can assume to
+// be able to generate a load/store instruction. So this pass can only have
+// passes that execute after it if no load/store instructions can be generated.
+//===----------------------------------------------------------------------===//
+#include "AMDILIOExpansion.h"
+#include "AMDIL.h"
+#include "AMDILDevices.h"
+#include "AMDILGlobalManager.h"
+#include "AMDILKernelManager.h"
+#include "AMDILMachineFunctionInfo.h"
+#include "AMDILTargetMachine.h"
+#include "AMDILUtilityFunctions.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Support/DebugLoc.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Value.h"
+
+using namespace llvm;
+
+char AMDILIOExpansion::ID = 0;
+namespace llvm {
+ FunctionPass*
+ createAMDILIOExpansion(TargetMachine &TM AMDIL_OPT_LEVEL_DECL)
+ {
+ return TM.getSubtarget<AMDILSubtarget>()
+ .device()->getIOExpansion(TM AMDIL_OPT_LEVEL_VAR);
+ }
+}
+
+AMDILIOExpansion::AMDILIOExpansion(TargetMachine &tm
+ AMDIL_OPT_LEVEL_DECL) :
+ MachineFunctionPass(ID), TM(tm)
+{
+ mSTM = &tm.getSubtarget<AMDILSubtarget>();
+ mDebug = DEBUGME;
+ mTII = tm.getInstrInfo();
+ mKM = NULL;
+}
+
+AMDILIOExpansion::~AMDILIOExpansion()
+{
+}
+ bool
+AMDILIOExpansion::runOnMachineFunction(MachineFunction &MF)
+{
+ mKM = const_cast<AMDILKernelManager*>(mSTM->getKernelManager());
+ mMFI = MF.getInfo<AMDILMachineFunctionInfo>();
+ for (MachineFunction::iterator MFI = MF.begin(), MFE = MF.end();
+ MFI != MFE; ++MFI) {
+ MachineBasicBlock *MBB = MFI;
+ for (MachineBasicBlock::iterator MBI = MBB->begin(), MBE = MBB->end();
+ MBI != MBE; ++MBI) {
+ MachineInstr *MI = MBI;
+ if (isIOInstruction(MI)) {
+ mBB = MBB;
+ saveInst = false;
+ expandIOInstruction(MI);
+ if (!saveInst) {
+ // erase returns the instruction after
+ // and we want the instruction before
+ MBI = MBB->erase(MI);
+ --MBI;
+ }
+ }
+ }
+ }
+ return false;
+}
+const char *AMDILIOExpansion::getPassName() const
+{
+ return "AMDIL Generic IO Expansion Pass";
+}
+ bool
+AMDILIOExpansion::isIOInstruction(MachineInstr *MI)
+{
+ if (!MI) {
+ return false;
+ }
+ switch(MI->getOpcode()) {
+ default:
+ return false;
+ ExpandCaseToAllTypes(AMDIL::CPOOLLOAD)
+ ExpandCaseToAllTypes(AMDIL::CPOOLSEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::CPOOLZEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::CPOOLAEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::CONSTANTLOAD)
+ ExpandCaseToAllTypes(AMDIL::CONSTANTSEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::CONSTANTZEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::CONSTANTAEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::PRIVATELOAD)
+ ExpandCaseToAllTypes(AMDIL::PRIVATESEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::PRIVATEZEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::PRIVATEAEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::PRIVATESTORE)
+ ExpandCaseToAllTruncTypes(AMDIL::PRIVATETRUNCSTORE)
+ ExpandCaseToAllTypes(AMDIL::REGIONSTORE)
+ ExpandCaseToAllTruncTypes(AMDIL::REGIONTRUNCSTORE)
+ ExpandCaseToAllTypes(AMDIL::REGIONLOAD)
+ ExpandCaseToAllTypes(AMDIL::REGIONSEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::REGIONZEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::REGIONAEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::LOCALSTORE)
+ ExpandCaseToAllTruncTypes(AMDIL::LOCALTRUNCSTORE)
+ ExpandCaseToAllTypes(AMDIL::LOCALLOAD)
+ ExpandCaseToAllTypes(AMDIL::LOCALSEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::LOCALZEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::LOCALAEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::GLOBALLOAD)
+ ExpandCaseToAllTypes(AMDIL::GLOBALSEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::GLOBALAEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::GLOBALZEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::GLOBALSTORE)
+ ExpandCaseToAllTruncTypes(AMDIL::GLOBALTRUNCSTORE)
+ return true;
+ };
+ return false;
+}
+void
+AMDILIOExpansion::expandIOInstruction(MachineInstr *MI)
+{
+ assert(isIOInstruction(MI) && "Must be an IO instruction to "
+ "be passed to this function!");
+ switch (MI->getOpcode()) {
+ default:
+ assert(0 && "Not an IO Instruction!");
+ ExpandCaseToAllTypes(AMDIL::GLOBALLOAD);
+ ExpandCaseToAllTypes(AMDIL::GLOBALSEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::GLOBALZEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::GLOBALAEXTLOAD);
+ expandGlobalLoad(MI);
+ break;
+ ExpandCaseToAllTypes(AMDIL::REGIONLOAD);
+ ExpandCaseToAllTypes(AMDIL::REGIONSEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::REGIONZEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::REGIONAEXTLOAD);
+ expandRegionLoad(MI);
+ break;
+ ExpandCaseToAllTypes(AMDIL::LOCALLOAD);
+ ExpandCaseToAllTypes(AMDIL::LOCALSEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::LOCALZEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::LOCALAEXTLOAD);
+ expandLocalLoad(MI);
+ break;
+ ExpandCaseToAllTypes(AMDIL::CONSTANTLOAD);
+ ExpandCaseToAllTypes(AMDIL::CONSTANTSEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::CONSTANTZEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::CONSTANTAEXTLOAD);
+ expandConstantLoad(MI);
+ break;
+ ExpandCaseToAllTypes(AMDIL::PRIVATELOAD);
+ ExpandCaseToAllTypes(AMDIL::PRIVATESEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::PRIVATEZEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::PRIVATEAEXTLOAD);
+ expandPrivateLoad(MI);
+ break;
+ ExpandCaseToAllTypes(AMDIL::CPOOLLOAD);
+ ExpandCaseToAllTypes(AMDIL::CPOOLSEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::CPOOLZEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::CPOOLAEXTLOAD);
+ expandConstantPoolLoad(MI);
+ break;
+ ExpandCaseToAllTruncTypes(AMDIL::GLOBALTRUNCSTORE)
+ ExpandCaseToAllTypes(AMDIL::GLOBALSTORE);
+ expandGlobalStore(MI);
+ break;
+ ExpandCaseToAllTruncTypes(AMDIL::PRIVATETRUNCSTORE);
+ ExpandCaseToAllTypes(AMDIL::PRIVATESTORE);
+ expandPrivateStore(MI);
+ break;
+ ExpandCaseToAllTruncTypes(AMDIL::REGIONTRUNCSTORE);
+ ExpandCaseToAllTypes(AMDIL::REGIONSTORE);
+ expandRegionStore(MI);
+ break;
+ ExpandCaseToAllTruncTypes(AMDIL::LOCALTRUNCSTORE);
+ ExpandCaseToAllTypes(AMDIL::LOCALSTORE);
+ expandLocalStore(MI);
+ break;
+ }
+}
+ bool
+AMDILIOExpansion::isAddrCalcInstr(MachineInstr *MI)
+{
+ switch(MI->getOpcode()) {
+ ExpandCaseToAllTypes(AMDIL::PRIVATELOAD)
+ ExpandCaseToAllTypes(AMDIL::PRIVATESEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::PRIVATEZEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::PRIVATEAEXTLOAD)
+ {
+ // This section of code is a workaround for the problem of
+ // globally scoped constant address variables. The problems
+ // comes that although they are declared in the constant
+ // address space, all variables must be allocated in the
+ // private address space. So when there is a load from
+ // the global address, it automatically goes into the private
+ // address space. However, the data section is placed in the
+ // constant address space so we need to check to see if our
+ // load base address is a global variable or not. Only if it
+ // is not a global variable can we do the address calculation
+ // into the private memory ring.
+
+ MachineMemOperand& memOp = (**MI->memoperands_begin());
+ const Value *V = memOp.getValue();
+ if (V) {
+ const GlobalValue *GV = dyn_cast<GlobalVariable>(V);
+ return mSTM->device()->usesSoftware(AMDILDeviceInfo::PrivateMem)
+ && !(GV);
+ } else {
+ return false;
+ }
+ }
+ ExpandCaseToAllTypes(AMDIL::CPOOLLOAD);
+ ExpandCaseToAllTypes(AMDIL::CPOOLSEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::CPOOLZEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::CPOOLAEXTLOAD);
+ return MI->getOperand(1).isReg();
+ ExpandCaseToAllTruncTypes(AMDIL::PRIVATETRUNCSTORE);
+ ExpandCaseToAllTypes(AMDIL::PRIVATESTORE);
+ return mSTM->device()->usesSoftware(AMDILDeviceInfo::PrivateMem);
+ ExpandCaseToAllTruncTypes(AMDIL::LOCALTRUNCSTORE);
+ ExpandCaseToAllTypes(AMDIL::LOCALSTORE);
+ ExpandCaseToAllTypes(AMDIL::LOCALLOAD);
+ ExpandCaseToAllTypes(AMDIL::LOCALSEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::LOCALZEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::LOCALAEXTLOAD);
+ return mSTM->device()->usesSoftware(AMDILDeviceInfo::LocalMem);
+ };
+ return false;
+
+}
+ bool
+AMDILIOExpansion::isExtendLoad(MachineInstr *MI)
+{
+ return isSExtLoadInst(TM.getInstrInfo(), MI) ||
+ isZExtLoadInst(TM.getInstrInfo(), MI) ||
+ isAExtLoadInst(TM.getInstrInfo(), MI)
+ || isSWSExtLoadInst(MI);
+}
+
+ bool
+AMDILIOExpansion::isHardwareRegion(MachineInstr *MI)
+{
+ switch(MI->getOpcode()) {
+ default:
+ return false;
+ break;
+ ExpandCaseToAllTypes(AMDIL::REGIONLOAD)
+ ExpandCaseToAllTypes(AMDIL::REGIONSEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::REGIONZEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::REGIONAEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::REGIONSTORE)
+ ExpandCaseToAllTruncTypes(AMDIL::REGIONTRUNCSTORE)
+ return mSTM->device()->usesHardware(AMDILDeviceInfo::RegionMem);
+ };
+ return false;
+}
+ bool
+AMDILIOExpansion::isHardwareLocal(MachineInstr *MI)
+{
+ switch(MI->getOpcode()) {
+ default:
+ return false;
+ break;
+ ExpandCaseToAllTypes(AMDIL::LOCALLOAD)
+ ExpandCaseToAllTypes(AMDIL::LOCALSEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::LOCALZEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::LOCALAEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::LOCALSTORE)
+ ExpandCaseToAllTruncTypes(AMDIL::LOCALTRUNCSTORE)
+ return mSTM->device()->usesHardware(AMDILDeviceInfo::LocalMem);
+ };
+ return false;
+}
+ bool
+AMDILIOExpansion::isPackedData(MachineInstr *MI)
+{
+ switch(MI->getOpcode()) {
+ default:
+ if (isTruncStoreInst(TM.getInstrInfo(), MI)) {
+ switch (MI->getDesc().OpInfo[0].RegClass) {
+ default:
+ break;
+ case AMDIL::GPRV2I64RegClassID:
+ case AMDIL::GPRV2I32RegClassID:
+ switch (getMemorySize(MI)) {
+ case 2:
+ case 4:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case AMDIL::GPRV4I32RegClassID:
+ switch (getMemorySize(MI)) {
+ case 4:
+ case 8:
+ return true;
+ default:
+ break;
+ }
+ break;
+ }
+ }
+ break;
+ ExpandCaseToPackedTypes(AMDIL::CPOOLLOAD);
+ ExpandCaseToPackedTypes(AMDIL::CPOOLSEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::CPOOLZEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::CPOOLAEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::GLOBALLOAD);
+ ExpandCaseToPackedTypes(AMDIL::GLOBALSEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::GLOBALZEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::GLOBALAEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::LOCALLOAD);
+ ExpandCaseToPackedTypes(AMDIL::LOCALSEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::LOCALZEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::LOCALAEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::REGIONLOAD);
+ ExpandCaseToPackedTypes(AMDIL::REGIONSEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::REGIONZEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::REGIONAEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::PRIVATELOAD);
+ ExpandCaseToPackedTypes(AMDIL::PRIVATESEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::PRIVATEZEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::PRIVATEAEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::CONSTANTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::CONSTANTSEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::CONSTANTAEXTLOAD);
+ ExpandCaseToPackedTypes(AMDIL::CONSTANTZEXTLOAD);
+ ExpandCaseToAllTruncTypes(AMDIL::GLOBALTRUNCSTORE)
+ ExpandCaseToAllTruncTypes(AMDIL::PRIVATETRUNCSTORE);
+ ExpandCaseToAllTruncTypes(AMDIL::LOCALTRUNCSTORE);
+ ExpandCaseToAllTruncTypes(AMDIL::REGIONTRUNCSTORE);
+ ExpandCaseToPackedTypes(AMDIL::GLOBALSTORE);
+ ExpandCaseToPackedTypes(AMDIL::PRIVATESTORE);
+ ExpandCaseToPackedTypes(AMDIL::LOCALSTORE);
+ ExpandCaseToPackedTypes(AMDIL::REGIONSTORE);
+ return true;
+ }
+ return false;
+}
+
+ bool
+AMDILIOExpansion::isStaticCPLoad(MachineInstr *MI)
+{
+ switch(MI->getOpcode()) {
+ ExpandCaseToAllTypes(AMDIL::CPOOLLOAD);
+ ExpandCaseToAllTypes(AMDIL::CPOOLSEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::CPOOLZEXTLOAD);
+ ExpandCaseToAllTypes(AMDIL::CPOOLAEXTLOAD);
+ {
+ uint32_t x = 0;
+ uint32_t num = MI->getNumOperands();
+ for (x = 0; x < num; ++x) {
+ if (MI->getOperand(x).isCPI()) {
+ return true;
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+ bool
+AMDILIOExpansion::isNbitType(Type *mType, uint32_t nBits, bool isScalar)
+{
+ if (!mType) {
+ return false;
+ }
+ if (dyn_cast<PointerType>(mType)) {
+ PointerType *PT = dyn_cast<PointerType>(mType);
+ return isNbitType(PT->getElementType(), nBits);
+ } else if (dyn_cast<StructType>(mType)) {
+ return getTypeSize(mType) == nBits;
+ } else if (dyn_cast<VectorType>(mType)) {
+ VectorType *VT = dyn_cast<VectorType>(mType);
+ size_t size = VT->getScalarSizeInBits();
+ return (isScalar ?
+ VT->getNumElements() * size == nBits : size == nBits);
+ } else if (dyn_cast<ArrayType>(mType)) {
+ ArrayType *AT = dyn_cast<ArrayType>(mType);
+ size_t size = AT->getScalarSizeInBits();
+ return (isScalar ?
+ AT->getNumElements() * size == nBits : size == nBits);
+ } else if (mType->isSized()) {
+ return mType->getScalarSizeInBits() == nBits;
+ } else {
+ assert(0 && "Found a type that we don't know how to handle!");
+ return false;
+ }
+}
+
+ bool
+AMDILIOExpansion::isHardwareInst(MachineInstr *MI)
+{
+ AMDILAS::InstrResEnc curRes;
+ curRes.u16all = MI->getAsmPrinterFlags();
+ return curRes.bits.HardwareInst;
+}
+
+REG_PACKED_TYPE
+AMDILIOExpansion::getPackedID(MachineInstr *MI)
+{
+ switch (MI->getOpcode()) {
+ default:
+ break;
+ case AMDIL::GLOBALTRUNCSTORE_v2i64i8:
+ case AMDIL::REGIONTRUNCSTORE_v2i64i8:
+ case AMDIL::LOCALTRUNCSTORE_v2i64i8:
+ case AMDIL::PRIVATETRUNCSTORE_v2i64i8:
+ case AMDIL::GLOBALTRUNCSTORE_v2i32i8:
+ case AMDIL::REGIONTRUNCSTORE_v2i32i8:
+ case AMDIL::LOCALTRUNCSTORE_v2i32i8:
+ case AMDIL::PRIVATETRUNCSTORE_v2i32i8:
+ case AMDIL::GLOBALTRUNCSTORE_v2i16i8:
+ case AMDIL::REGIONTRUNCSTORE_v2i16i8:
+ case AMDIL::LOCALTRUNCSTORE_v2i16i8:
+ case AMDIL::PRIVATETRUNCSTORE_v2i16i8:
+ case AMDIL::GLOBALSTORE_v2i8:
+ case AMDIL::LOCALSTORE_v2i8:
+ case AMDIL::REGIONSTORE_v2i8:
+ case AMDIL::PRIVATESTORE_v2i8:
+ return PACK_V2I8;
+ case AMDIL::GLOBALTRUNCSTORE_v4i32i8:
+ case AMDIL::REGIONTRUNCSTORE_v4i32i8:
+ case AMDIL::LOCALTRUNCSTORE_v4i32i8:
+ case AMDIL::PRIVATETRUNCSTORE_v4i32i8:
+ case AMDIL::GLOBALTRUNCSTORE_v4i16i8:
+ case AMDIL::REGIONTRUNCSTORE_v4i16i8:
+ case AMDIL::LOCALTRUNCSTORE_v4i16i8:
+ case AMDIL::PRIVATETRUNCSTORE_v4i16i8:
+ case AMDIL::GLOBALSTORE_v4i8:
+ case AMDIL::LOCALSTORE_v4i8:
+ case AMDIL::REGIONSTORE_v4i8:
+ case AMDIL::PRIVATESTORE_v4i8:
+ return PACK_V4I8;
+ case AMDIL::GLOBALTRUNCSTORE_v2i64i16:
+ case AMDIL::REGIONTRUNCSTORE_v2i64i16:
+ case AMDIL::LOCALTRUNCSTORE_v2i64i16:
+ case AMDIL::PRIVATETRUNCSTORE_v2i64i16:
+ case AMDIL::GLOBALTRUNCSTORE_v2i32i16:
+ case AMDIL::REGIONTRUNCSTORE_v2i32i16:
+ case AMDIL::LOCALTRUNCSTORE_v2i32i16:
+ case AMDIL::PRIVATETRUNCSTORE_v2i32i16:
+ case AMDIL::GLOBALSTORE_v2i16:
+ case AMDIL::LOCALSTORE_v2i16:
+ case AMDIL::REGIONSTORE_v2i16:
+ case AMDIL::PRIVATESTORE_v2i16:
+ return PACK_V2I16;
+ case AMDIL::GLOBALTRUNCSTORE_v4i32i16:
+ case AMDIL::REGIONTRUNCSTORE_v4i32i16:
+ case AMDIL::LOCALTRUNCSTORE_v4i32i16:
+ case AMDIL::PRIVATETRUNCSTORE_v4i32i16:
+ case AMDIL::GLOBALSTORE_v4i16:
+ case AMDIL::LOCALSTORE_v4i16:
+ case AMDIL::REGIONSTORE_v4i16:
+ case AMDIL::PRIVATESTORE_v4i16:
+ return PACK_V4I16;
+ case AMDIL::GLOBALLOAD_v2i8:
+ case AMDIL::GLOBALSEXTLOAD_v2i8:
+ case AMDIL::GLOBALAEXTLOAD_v2i8:
+ case AMDIL::GLOBALZEXTLOAD_v2i8:
+ case AMDIL::LOCALLOAD_v2i8:
+ case AMDIL::LOCALSEXTLOAD_v2i8:
+ case AMDIL::LOCALAEXTLOAD_v2i8:
+ case AMDIL::LOCALZEXTLOAD_v2i8:
+ case AMDIL::REGIONLOAD_v2i8:
+ case AMDIL::REGIONSEXTLOAD_v2i8:
+ case AMDIL::REGIONAEXTLOAD_v2i8:
+ case AMDIL::REGIONZEXTLOAD_v2i8:
+ case AMDIL::PRIVATELOAD_v2i8:
+ case AMDIL::PRIVATESEXTLOAD_v2i8:
+ case AMDIL::PRIVATEAEXTLOAD_v2i8:
+ case AMDIL::PRIVATEZEXTLOAD_v2i8:
+ case AMDIL::CONSTANTLOAD_v2i8:
+ case AMDIL::CONSTANTSEXTLOAD_v2i8:
+ case AMDIL::CONSTANTAEXTLOAD_v2i8:
+ case AMDIL::CONSTANTZEXTLOAD_v2i8:
+ return UNPACK_V2I8;
+ case AMDIL::GLOBALLOAD_v4i8:
+ case AMDIL::GLOBALSEXTLOAD_v4i8:
+ case AMDIL::GLOBALAEXTLOAD_v4i8:
+ case AMDIL::GLOBALZEXTLOAD_v4i8:
+ case AMDIL::LOCALLOAD_v4i8:
+ case AMDIL::LOCALSEXTLOAD_v4i8:
+ case AMDIL::LOCALAEXTLOAD_v4i8:
+ case AMDIL::LOCALZEXTLOAD_v4i8:
+ case AMDIL::REGIONLOAD_v4i8:
+ case AMDIL::REGIONSEXTLOAD_v4i8:
+ case AMDIL::REGIONAEXTLOAD_v4i8:
+ case AMDIL::REGIONZEXTLOAD_v4i8:
+ case AMDIL::PRIVATELOAD_v4i8:
+ case AMDIL::PRIVATESEXTLOAD_v4i8:
+ case AMDIL::PRIVATEAEXTLOAD_v4i8:
+ case AMDIL::PRIVATEZEXTLOAD_v4i8:
+ case AMDIL::CONSTANTLOAD_v4i8:
+ case AMDIL::CONSTANTSEXTLOAD_v4i8:
+ case AMDIL::CONSTANTAEXTLOAD_v4i8:
+ case AMDIL::CONSTANTZEXTLOAD_v4i8:
+ return UNPACK_V4I8;
+ case AMDIL::GLOBALLOAD_v2i16:
+ case AMDIL::GLOBALSEXTLOAD_v2i16:
+ case AMDIL::GLOBALAEXTLOAD_v2i16:
+ case AMDIL::GLOBALZEXTLOAD_v2i16:
+ case AMDIL::LOCALLOAD_v2i16:
+ case AMDIL::LOCALSEXTLOAD_v2i16:
+ case AMDIL::LOCALAEXTLOAD_v2i16:
+ case AMDIL::LOCALZEXTLOAD_v2i16:
+ case AMDIL::REGIONLOAD_v2i16:
+ case AMDIL::REGIONSEXTLOAD_v2i16:
+ case AMDIL::REGIONAEXTLOAD_v2i16:
+ case AMDIL::REGIONZEXTLOAD_v2i16:
+ case AMDIL::PRIVATELOAD_v2i16:
+ case AMDIL::PRIVATESEXTLOAD_v2i16:
+ case AMDIL::PRIVATEAEXTLOAD_v2i16:
+ case AMDIL::PRIVATEZEXTLOAD_v2i16:
+ case AMDIL::CONSTANTLOAD_v2i16:
+ case AMDIL::CONSTANTSEXTLOAD_v2i16:
+ case AMDIL::CONSTANTAEXTLOAD_v2i16:
+ case AMDIL::CONSTANTZEXTLOAD_v2i16:
+ return UNPACK_V2I16;
+ case AMDIL::GLOBALLOAD_v4i16:
+ case AMDIL::GLOBALSEXTLOAD_v4i16:
+ case AMDIL::GLOBALAEXTLOAD_v4i16:
+ case AMDIL::GLOBALZEXTLOAD_v4i16:
+ case AMDIL::LOCALLOAD_v4i16:
+ case AMDIL::LOCALSEXTLOAD_v4i16:
+ case AMDIL::LOCALAEXTLOAD_v4i16:
+ case AMDIL::LOCALZEXTLOAD_v4i16:
+ case AMDIL::REGIONLOAD_v4i16:
+ case AMDIL::REGIONSEXTLOAD_v4i16:
+ case AMDIL::REGIONAEXTLOAD_v4i16:
+ case AMDIL::REGIONZEXTLOAD_v4i16:
+ case AMDIL::PRIVATELOAD_v4i16:
+ case AMDIL::PRIVATESEXTLOAD_v4i16:
+ case AMDIL::PRIVATEAEXTLOAD_v4i16:
+ case AMDIL::PRIVATEZEXTLOAD_v4i16:
+ case AMDIL::CONSTANTLOAD_v4i16:
+ case AMDIL::CONSTANTSEXTLOAD_v4i16:
+ case AMDIL::CONSTANTAEXTLOAD_v4i16:
+ case AMDIL::CONSTANTZEXTLOAD_v4i16:
+ return UNPACK_V4I16;
+ };
+ return NO_PACKING;
+}
+
+ uint32_t
+AMDILIOExpansion::getPointerID(MachineInstr *MI)
+{
+ AMDILAS::InstrResEnc curInst;
+ getAsmPrinterFlags(MI, curInst);
+ return curInst.bits.ResourceID;
+}
+
+ uint32_t
+AMDILIOExpansion::getShiftSize(MachineInstr *MI)
+{
+ switch(getPackedID(MI)) {
+ default:
+ return 0;
+ case PACK_V2I8:
+ case PACK_V4I8:
+ case UNPACK_V2I8:
+ case UNPACK_V4I8:
+ return 1;
+ case PACK_V2I16:
+ case PACK_V4I16:
+ case UNPACK_V2I16:
+ case UNPACK_V4I16:
+ return 2;
+ }
+ return 0;
+}
+ uint32_t
+AMDILIOExpansion::getMemorySize(MachineInstr *MI)
+{
+ if (MI->memoperands_empty()) {
+ return 4;
+ }
+ return (uint32_t)((*MI->memoperands_begin())->getSize());
+}
+
+ void
+AMDILIOExpansion::expandLongExtend(MachineInstr *MI,
+ uint32_t numComps, uint32_t size, bool signedShift)
+{
+ DebugLoc DL = MI->getDebugLoc();
+ switch(size) {
+ default:
+ assert(0 && "Found a case we don't handle!");
+ break;
+ case 8:
+ if (numComps == 1) {
+ expandLongExtendSub32(MI, AMDIL::SHL_i8, AMDIL::SHRVEC_v2i32,
+ AMDIL::USHRVEC_i8,
+ 24, (24ULL | (31ULL << 32)), 24, AMDIL::LCREATE, signedShift);
+ } else if (numComps == 2) {
+ expandLongExtendSub32(MI, AMDIL::SHL_v2i8, AMDIL::SHRVEC_v4i32,
+ AMDIL::USHRVEC_v2i8,
+ 24, (24ULL | (31ULL << 32)), 24, AMDIL::LCREATE_v2i64, signedShift);
+ } else {
+ assert(0 && "Found a case we don't handle!");
+ }
+ break;
+ case 16:
+ if (numComps == 1) {
+ expandLongExtendSub32(MI, AMDIL::SHL_i16, AMDIL::SHRVEC_v2i32,
+ AMDIL::USHRVEC_i16,
+ 16, (16ULL | (31ULL << 32)), 16, AMDIL::LCREATE, signedShift);
+ } else if (numComps == 2) {
+ expandLongExtendSub32(MI, AMDIL::SHL_v2i16, AMDIL::SHRVEC_v4i32,
+ AMDIL::USHRVEC_v2i16,
+ 16, (16ULL | (31ULL << 32)), 16, AMDIL::LCREATE_v2i64, signedShift);
+ } else {
+ assert(0 && "Found a case we don't handle!");
+ }
+ break;
+ case 32:
+ if (numComps == 1) {
+ if (signedShift) {
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::SHRVEC_i32), AMDIL::R1012)
+ .addReg(AMDIL::R1011)
+ .addImm(mMFI->addi32Literal(31));
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::LCREATE), AMDIL::R1011)
+ .addReg(AMDIL::R1011).addReg(AMDIL::R1012);
+ } else {
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::LCREATE), AMDIL::R1011)
+ .addReg(AMDIL::R1011)
+ .addImm(mMFI->addi32Literal(0));
+ }
+ } else if (numComps == 2) {
+ if (signedShift) {
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::SHRVEC_v2i32), AMDIL::R1012)
+ .addReg(AMDIL::R1011)
+ .addImm(mMFI->addi32Literal(31));
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::LCREATE_v2i64), AMDIL::R1011)
+ .addReg(AMDIL::R1011)
+ .addReg(AMDIL::R1012);
+ } else {
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::LCREATE_v2i64), AMDIL::R1011)
+ .addReg(AMDIL::R1011)
+ .addImm(mMFI->addi32Literal(0));
+ }
+ } else {
+ assert(0 && "Found a case we don't handle!");
+ }
+ };
+}
+ void
+AMDILIOExpansion::expandLongExtendSub32(MachineInstr *MI,
+ unsigned SHLop, unsigned SHRop, unsigned USHRop,
+ unsigned SHLimm, uint64_t SHRimm, unsigned USHRimm,
+ unsigned LCRop, bool signedShift)
+{
+ DebugLoc DL = MI->getDebugLoc();
+ BuildMI(*mBB, MI, DL, mTII->get(SHLop), AMDIL::R1011)
+ .addReg(AMDIL::R1011)
+ .addImm(mMFI->addi32Literal(SHLimm));
+ if (signedShift) {
+ BuildMI(*mBB, MI, DL, mTII->get(LCRop), AMDIL::R1011)
+ .addReg(AMDIL::R1011).addReg(AMDIL::R1011);
+ BuildMI(*mBB, MI, DL, mTII->get(SHRop), AMDIL::R1011)
+ .addReg(AMDIL::R1011)
+ .addImm(mMFI->addi64Literal(SHRimm));
+ } else {
+ BuildMI(*mBB, MI, DL, mTII->get(USHRop), AMDIL::R1011)
+ .addReg(AMDIL::R1011)
+ .addImm(mMFI->addi32Literal(USHRimm));
+ BuildMI(*mBB, MI, MI->getDebugLoc(), mTII->get(LCRop), AMDIL::R1011)
+ .addReg(AMDIL::R1011)
+ .addImm(mMFI->addi32Literal(0));
+ }
+}
+
+ void
+AMDILIOExpansion::expandIntegerExtend(MachineInstr *MI, unsigned SHLop,
+ unsigned SHRop, unsigned offset)
+{
+ DebugLoc DL = MI->getDebugLoc();
+ offset = mMFI->addi32Literal(offset);
+ BuildMI(*mBB, MI, DL,
+ mTII->get(SHLop), AMDIL::R1011)
+ .addReg(AMDIL::R1011).addImm(offset);
+ BuildMI(*mBB, MI, DL,
+ mTII->get(SHRop), AMDIL::R1011)
+ .addReg(AMDIL::R1011).addImm(offset);
+}
+ void
+AMDILIOExpansion::expandExtendLoad(MachineInstr *MI)
+{
+ if (!isExtendLoad(MI)) {
+ return;
+ }
+ Type *mType = NULL;
+ if (!MI->memoperands_empty()) {
+ MachineMemOperand *memOp = (*MI->memoperands_begin());
+ const Value *moVal = (memOp) ? memOp->getValue() : NULL;
+ mType = (moVal) ? moVal->getType() : NULL;
+ }
+ unsigned opcode = 0;
+ DebugLoc DL = MI->getDebugLoc();
+ if (isZExtLoadInst(TM.getInstrInfo(), MI) || isAExtLoadInst(TM.getInstrInfo(), MI) || isSExtLoadInst(TM.getInstrInfo(), MI)) {
+ switch(MI->getDesc().OpInfo[0].RegClass) {
+ default:
+ assert(0 && "Found an extending load that we don't handle!");
+ break;
+ case AMDIL::GPRI16RegClassID:
+ if (!isHardwareLocal(MI)
+ || mSTM->device()->usesSoftware(AMDILDeviceInfo::ByteLDSOps)) {
+ opcode = isSExtLoadInst(TM.getInstrInfo(), MI) ? AMDIL::SHRVEC_i16 : AMDIL::USHRVEC_i16;
+ expandIntegerExtend(MI, AMDIL::SHL_i16, opcode, 24);
+ }
+ break;
+ case AMDIL::GPRV2I16RegClassID:
+ opcode = isSExtLoadInst(TM.getInstrInfo(), MI) ? AMDIL::SHRVEC_v2i16 : AMDIL::USHRVEC_v2i16;
+ expandIntegerExtend(MI, AMDIL::SHL_v2i16, opcode, 24);
+ break;
+ case AMDIL::GPRV4I8RegClassID:
+ opcode = isSExtLoadInst(TM.getInstrInfo(), MI) ? AMDIL::SHRVEC_v4i8 : AMDIL::USHRVEC_v4i8;
+ expandIntegerExtend(MI, AMDIL::SHL_v4i8, opcode, 24);
+ break;
+ case AMDIL::GPRV4I16RegClassID:
+ opcode = isSExtLoadInst(TM.getInstrInfo(), MI) ? AMDIL::SHRVEC_v4i16 : AMDIL::USHRVEC_v4i16;
+ expandIntegerExtend(MI, AMDIL::SHL_v4i16, opcode, 24);
+ break;
+ case AMDIL::GPRI32RegClassID:
+ // We can be a i8 or i16 bit sign extended value
+ if (isNbitType(mType, 8) || getMemorySize(MI) == 1) {
+ opcode = isSExtLoadInst(TM.getInstrInfo(), MI) ? AMDIL::SHRVEC_i32 : AMDIL::USHRVEC_i32;
+ expandIntegerExtend(MI, AMDIL::SHL_i32, opcode, 24);
+ } else if (isNbitType(mType, 16) || getMemorySize(MI) == 2) {
+ opcode = isSExtLoadInst(TM.getInstrInfo(), MI) ? AMDIL::SHRVEC_i32 : AMDIL::USHRVEC_i32;
+ expandIntegerExtend(MI, AMDIL::SHL_i32, opcode, 16);
+ } else {
+ assert(0 && "Found an extending load that we don't handle!");
+ }
+ break;
+ case AMDIL::GPRV2I32RegClassID:
+ // We can be a v2i8 or v2i16 bit sign extended value
+ if (isNbitType(mType, 8, false) || getMemorySize(MI) == 2) {
+ opcode = isSExtLoadInst(TM.getInstrInfo(), MI) ? AMDIL::SHRVEC_v2i32 : AMDIL::USHRVEC_v2i32;
+ expandIntegerExtend(MI, AMDIL::SHL_v2i32, opcode, 24);
+ } else if (isNbitType(mType, 16, false) || getMemorySize(MI) == 4) {
+ opcode = isSExtLoadInst(TM.getInstrInfo(), MI) ? AMDIL::SHRVEC_v2i32 : AMDIL::USHRVEC_v2i32;
+ expandIntegerExtend(MI, AMDIL::SHL_v2i32, opcode, 16);
+ } else {
+ assert(0 && "Found an extending load that we don't handle!");
+ }
+ break;
+ case AMDIL::GPRV4I32RegClassID:
+ // We can be a v4i8 or v4i16 bit sign extended value
+ if (isNbitType(mType, 8, false) || getMemorySize(MI) == 4) {
+ opcode = isSExtLoadInst(TM.getInstrInfo(), MI) ? AMDIL::SHRVEC_v4i32 : AMDIL::USHRVEC_v4i32;
+ expandIntegerExtend(MI, AMDIL::SHL_v4i32, opcode, 24);
+ } else if (isNbitType(mType, 16, false) || getMemorySize(MI) == 8) {
+ opcode = isSExtLoadInst(TM.getInstrInfo(), MI) ? AMDIL::SHRVEC_v4i32 : AMDIL::USHRVEC_v4i32;
+ expandIntegerExtend(MI, AMDIL::SHL_v4i32, opcode, 16);
+ } else {
+ assert(0 && "Found an extending load that we don't handle!");
+ }
+ break;
+ case AMDIL::GPRI64RegClassID:
+ // We can be a i8, i16 or i32 bit sign extended value
+ if (isNbitType(mType, 8) || getMemorySize(MI) == 1) {
+ expandLongExtend(MI, 1, 8, isSExtLoadInst(TM.getInstrInfo(), MI));
+ } else if (isNbitType(mType, 16) || getMemorySize(MI) == 2) {
+ expandLongExtend(MI, 1, 16, isSExtLoadInst(TM.getInstrInfo(), MI));
+ } else if (isNbitType(mType, 32) || getMemorySize(MI) == 4) {
+ expandLongExtend(MI, 1, 32, isSExtLoadInst(TM.getInstrInfo(), MI));
+ } else {
+ assert(0 && "Found an extending load that we don't handle!");
+ }
+ break;
+ case AMDIL::GPRV2I64RegClassID:
+ // We can be a v2i8, v2i16 or v2i32 bit sign extended value
+ if (isNbitType(mType, 8, false) || getMemorySize(MI) == 2) {
+ expandLongExtend(MI, 2, 8, isSExtLoadInst(TM.getInstrInfo(), MI));
+ } else if (isNbitType(mType, 16, false) || getMemorySize(MI) == 4) {
+ expandLongExtend(MI, 2, 16, isSExtLoadInst(TM.getInstrInfo(), MI));
+ } else if (isNbitType(mType, 32, false) || getMemorySize(MI) == 8) {
+ expandLongExtend(MI, 2, 32, isSExtLoadInst(TM.getInstrInfo(), MI));
+ } else {
+ assert(0 && "Found an extending load that we don't handle!");
+ }
+ break;
+ case AMDIL::GPRF32RegClassID:
+ BuildMI(*mBB, MI, DL,
+ mTII->get(AMDIL::HTOF_f32), AMDIL::R1011)
+ .addReg(AMDIL::R1011);
+ break;
+ case AMDIL::GPRV2F32RegClassID:
+ BuildMI(*mBB, MI, DL,
+ mTII->get(AMDIL::HTOF_v2f32), AMDIL::R1011)
+ .addReg(AMDIL::R1011);
+ break;
+ case AMDIL::GPRV4F32RegClassID:
+ BuildMI(*mBB, MI, DL,
+ mTII->get(AMDIL::HTOF_v4f32), AMDIL::R1011)
+ .addReg(AMDIL::R1011);
+ break;
+ case AMDIL::GPRF64RegClassID:
+ BuildMI(*mBB, MI, DL,
+ mTII->get(AMDIL::FTOD), AMDIL::R1011)
+ .addReg(AMDIL::R1011);
+ break;
+ case AMDIL::GPRV2F64RegClassID:
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::VEXTRACT_v2f32),
+ AMDIL::R1012).addReg(AMDIL::R1011).addImm(2);
+ BuildMI(*mBB, MI, DL,
+ mTII->get(AMDIL::FTOD), AMDIL::R1011)
+ .addReg(AMDIL::R1011);
+ BuildMI(*mBB, MI, DL,
+ mTII->get(AMDIL::FTOD), AMDIL::R1012)
+ .addReg(AMDIL::R1012);
+ BuildMI(*mBB, MI, DL,
+ mTII->get(AMDIL::VINSERT_v2f64), AMDIL::R1011)
+ .addReg(AMDIL::R1011).addReg(AMDIL::R1012)
+ .addImm(1 << 8).addImm(1 << 8);
+ break;
+ };
+ } else if (isSWSExtLoadInst(MI)) {
+ switch(MI->getDesc().OpInfo[0].RegClass) {
+ case AMDIL::GPRI8RegClassID:
+ if (!isHardwareLocal(MI)
+ || mSTM->device()->usesSoftware(AMDILDeviceInfo::ByteLDSOps)) {
+ expandIntegerExtend(MI, AMDIL::SHL_i8, AMDIL::SHRVEC_i8, 24);
+ }
+ break;
+ case AMDIL::GPRV2I8RegClassID:
+ expandIntegerExtend(MI, AMDIL::SHL_v2i8, AMDIL::SHRVEC_v2i8, 24);
+ break;
+ case AMDIL::GPRV4I8RegClassID:
+ expandIntegerExtend(MI, AMDIL::SHL_v4i8, AMDIL::SHRVEC_v4i8, 24);
+ break;
+ case AMDIL::GPRI16RegClassID:
+ if (!isHardwareLocal(MI)
+ || mSTM->device()->usesSoftware(AMDILDeviceInfo::ByteLDSOps)) {
+ expandIntegerExtend(MI, AMDIL::SHL_i16, AMDIL::SHRVEC_i16, 16);
+ }
+ break;
+ case AMDIL::GPRV2I16RegClassID:
+ expandIntegerExtend(MI, AMDIL::SHL_v2i16, AMDIL::SHRVEC_v2i16, 16);
+ break;
+ case AMDIL::GPRV4I16RegClassID:
+ expandIntegerExtend(MI, AMDIL::SHL_v4i16, AMDIL::SHRVEC_v4i16, 16);
+ break;
+
+ };
+ }
+}
+
+ void
+AMDILIOExpansion::expandTruncData(MachineInstr *MI)
+{
+ MachineBasicBlock::iterator I = *MI;
+ if (!isTruncStoreInst(TM.getInstrInfo(), MI)) {
+ return;
+ }
+ DebugLoc DL = MI->getDebugLoc();
+ switch (MI->getOpcode()) {
+ default:
+ MI->dump();
+ assert(!"Found a trunc store instructions we don't handle!");
+ break;
+ case AMDIL::GLOBALTRUNCSTORE_i64i8:
+ case AMDIL::GLOBALTRUNCSTORE_v2i64i8:
+ case AMDIL::LOCALTRUNCSTORE_i64i8:
+ case AMDIL::LOCALTRUNCSTORE_v2i64i8:
+ case AMDIL::REGIONTRUNCSTORE_i64i8:
+ case AMDIL::REGIONTRUNCSTORE_v2i64i8:
+ case AMDIL::PRIVATETRUNCSTORE_i64i8:
+ case AMDIL::PRIVATETRUNCSTORE_v2i64i8:
+ BuildMI(*mBB, MI, DL,
+ mTII->get(AMDIL::LLO_v2i64), AMDIL::R1011)
+ .addReg(AMDIL::R1011);
+ case AMDIL::GLOBALTRUNCSTORE_i16i8:
+ case AMDIL::GLOBALTRUNCSTORE_v2i16i8:
+ case AMDIL::GLOBALTRUNCSTORE_v4i16i8:
+ case AMDIL::LOCALTRUNCSTORE_i16i8:
+ case AMDIL::LOCALTRUNCSTORE_v2i16i8:
+ case AMDIL::LOCALTRUNCSTORE_v4i16i8:
+ case AMDIL::REGIONTRUNCSTORE_i16i8:
+ case AMDIL::REGIONTRUNCSTORE_v2i16i8:
+ case AMDIL::REGIONTRUNCSTORE_v4i16i8:
+ case AMDIL::PRIVATETRUNCSTORE_i16i8:
+ case AMDIL::PRIVATETRUNCSTORE_v2i16i8:
+ case AMDIL::PRIVATETRUNCSTORE_v4i16i8:
+ case AMDIL::GLOBALTRUNCSTORE_i32i8:
+ case AMDIL::GLOBALTRUNCSTORE_v2i32i8:
+ case AMDIL::GLOBALTRUNCSTORE_v4i32i8:
+ case AMDIL::LOCALTRUNCSTORE_i32i8:
+ case AMDIL::LOCALTRUNCSTORE_v2i32i8:
+ case AMDIL::LOCALTRUNCSTORE_v4i32i8:
+ case AMDIL::REGIONTRUNCSTORE_i32i8:
+ case AMDIL::REGIONTRUNCSTORE_v2i32i8:
+ case AMDIL::REGIONTRUNCSTORE_v4i32i8:
+ case AMDIL::PRIVATETRUNCSTORE_i32i8:
+ case AMDIL::PRIVATETRUNCSTORE_v2i32i8:
+ case AMDIL::PRIVATETRUNCSTORE_v4i32i8:
+ BuildMI(*mBB, MI, DL,
+ mTII->get(AMDIL::BINARY_AND_v4i32), AMDIL::R1011)
+ .addReg(AMDIL::R1011)
+ .addImm(mMFI->addi32Literal(0xFF));
+ break;
+ case AMDIL::GLOBALTRUNCSTORE_i64i16:
+ case AMDIL::GLOBALTRUNCSTORE_v2i64i16:
+ case AMDIL::LOCALTRUNCSTORE_i64i16:
+ case AMDIL::LOCALTRUNCSTORE_v2i64i16:
+ case AMDIL::REGIONTRUNCSTORE_i64i16:
+ case AMDIL::REGIONTRUNCSTORE_v2i64i16:
+ case AMDIL::PRIVATETRUNCSTORE_i64i16:
+ case AMDIL::PRIVATETRUNCSTORE_v2i64i16:
+ BuildMI(*mBB, MI, DL,
+ mTII->get(AMDIL::LLO_v2i64), AMDIL::R1011)
+ .addReg(AMDIL::R1011);
+ case AMDIL::GLOBALTRUNCSTORE_i32i16:
+ case AMDIL::GLOBALTRUNCSTORE_v2i32i16:
+ case AMDIL::GLOBALTRUNCSTORE_v4i32i16:
+ case AMDIL::LOCALTRUNCSTORE_i32i16:
+ case AMDIL::LOCALTRUNCSTORE_v2i32i16:
+ case AMDIL::LOCALTRUNCSTORE_v4i32i16:
+ case AMDIL::REGIONTRUNCSTORE_i32i16:
+ case AMDIL::REGIONTRUNCSTORE_v2i32i16:
+ case AMDIL::REGIONTRUNCSTORE_v4i32i16:
+ case AMDIL::PRIVATETRUNCSTORE_i32i16:
+ case AMDIL::PRIVATETRUNCSTORE_v2i32i16:
+ case AMDIL::PRIVATETRUNCSTORE_v4i32i16:
+ BuildMI(*mBB, MI, DL,
+ mTII->get(AMDIL::BINARY_AND_v4i32), AMDIL::R1011)
+ .addReg(AMDIL::R1011)
+ .addImm(mMFI->addi32Literal(0xFFFF));
+ break;
+ case AMDIL::GLOBALTRUNCSTORE_i64i32:
+ case AMDIL::LOCALTRUNCSTORE_i64i32:
+ case AMDIL::REGIONTRUNCSTORE_i64i32:
+ case AMDIL::PRIVATETRUNCSTORE_i64i32:
+ BuildMI(*mBB, MI, DL,
+ mTII->get(AMDIL::LLO), AMDIL::R1011)
+ .addReg(AMDIL::R1011);
+ break;
+ case AMDIL::GLOBALTRUNCSTORE_v2i64i32:
+ case AMDIL::LOCALTRUNCSTORE_v2i64i32:
+ case AMDIL::REGIONTRUNCSTORE_v2i64i32:
+ case AMDIL::PRIVATETRUNCSTORE_v2i64i32:
+ BuildMI(*mBB, MI, DL,
+ mTII->get(AMDIL::LLO_v2i64), AMDIL::R1011)
+ .addReg(AMDIL::R1011);
+ break;
+ case AMDIL::GLOBALTRUNCSTORE_f64f32:
+ case AMDIL::LOCALTRUNCSTORE_f64f32:
+ case AMDIL::REGIONTRUNCSTORE_f64f32:
+ case AMDIL::PRIVATETRUNCSTORE_f64f32:
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::DTOF),
+ AMDIL::R1011).addReg(AMDIL::R1011);
+ break;
+ case AMDIL::GLOBALTRUNCSTORE_v2f64f32:
+ case AMDIL::LOCALTRUNCSTORE_v2f64f32:
+ case AMDIL::REGIONTRUNCSTORE_v2f64f32:
+ case AMDIL::PRIVATETRUNCSTORE_v2f64f32:
+ BuildMI(*mBB, I, DL, mTII->get(AMDIL::VEXTRACT_v2f64),
+ AMDIL::R1012).addReg(AMDIL::R1011).addImm(2);
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::DTOF),
+ AMDIL::R1011).addReg(AMDIL::R1011);
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::DTOF),
+ AMDIL::R1012).addReg(AMDIL::R1012);
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::VINSERT_v2f32),
+ AMDIL::R1011).addReg(AMDIL::R1011).addReg(AMDIL::R1012)
+ .addImm(1 << 8).addImm(1 << 8);
+ break;
+ }
+}
+ void
+AMDILIOExpansion::expandAddressCalc(MachineInstr *MI)
+{
+ if (!isAddrCalcInstr(MI)) {
+ return;
+ }
+ DebugLoc DL = MI->getDebugLoc();
+ switch(MI->getOpcode()) {
+ ExpandCaseToAllTruncTypes(AMDIL::PRIVATETRUNCSTORE)
+ ExpandCaseToAllTypes(AMDIL::PRIVATESTORE)
+ ExpandCaseToAllTypes(AMDIL::PRIVATELOAD)
+ ExpandCaseToAllTypes(AMDIL::PRIVATESEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::PRIVATEZEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::PRIVATEAEXTLOAD)
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::ADD_i32),
+ AMDIL::R1010).addReg(AMDIL::R1010).addReg(AMDIL::T1);
+ break;
+ ExpandCaseToAllTruncTypes(AMDIL::LOCALTRUNCSTORE)
+ ExpandCaseToAllTypes(AMDIL::LOCALLOAD)
+ ExpandCaseToAllTypes(AMDIL::LOCALSEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::LOCALZEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::LOCALAEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::LOCALSTORE)
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::ADD_i32),
+ AMDIL::R1010).addReg(AMDIL::R1010).addReg(AMDIL::T2);
+ break;
+ ExpandCaseToAllTypes(AMDIL::CPOOLLOAD)
+ ExpandCaseToAllTypes(AMDIL::CPOOLSEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::CPOOLZEXTLOAD)
+ ExpandCaseToAllTypes(AMDIL::CPOOLAEXTLOAD)
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::ADD_i32),
+ AMDIL::R1010).addReg(AMDIL::R1010).addReg(AMDIL::SDP);
+ break;
+ default:
+ return;
+ }
+}
+ void
+AMDILIOExpansion::expandLoadStartCode(MachineInstr *MI)
+{
+ DebugLoc DL = MI->getDebugLoc();
+ if (MI->getOperand(2).isReg()) {
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::ADD_i32),
+ AMDIL::R1010).addReg(MI->getOperand(1).getReg())
+ .addReg(MI->getOperand(2).getReg());
+ } else {
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::MOVE_i32),
+ AMDIL::R1010).addReg(MI->getOperand(1).getReg());
+ }
+ MI->getOperand(1).setReg(AMDIL::R1010);
+ expandAddressCalc(MI);
+}
+ void
+AMDILIOExpansion::emitStaticCPLoad(MachineInstr* MI, int swizzle,
+ int id, bool ExtFPLoad)
+{
+ DebugLoc DL = MI->getDebugLoc();
+ switch(swizzle) {
+ default:
+ BuildMI(*mBB, MI, DL, mTII->get(ExtFPLoad
+ ? AMDIL::DTOF : AMDIL::MOVE_i32),
+ MI->getOperand(0).getReg())
+ .addImm(id);
+ break;
+ case 1:
+ case 2:
+ case 3:
+ BuildMI(*mBB, MI, DL, mTII->get(ExtFPLoad
+ ? AMDIL::DTOF : AMDIL::MOVE_i32), AMDIL::R1001)
+ .addImm(id);
+ BuildMI(*mBB, MI, DL, mTII->get(AMDIL::VINSERT_v4i32),
+ MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(AMDIL::R1001)
+ .addImm(swizzle + 1);
+ break;
+ };
+}
+ void
+AMDILIOExpansion::emitCPInst(MachineInstr* MI,
+ const Constant* C, AMDILKernelManager* KM, int swizzle, bool ExtFPLoad)
+{
+ if (const ConstantFP* CFP = dyn_cast<ConstantFP>(C)) {
+ if (CFP->getType()->isFloatTy()) {
+ uint32_t val = (uint32_t)(CFP->getValueAPF().bitcastToAPInt()
+ .getZExtValue());
+ uint32_t id = mMFI->addi32Literal(val);
+ if (!id) {
+ const APFloat &APF = CFP->getValueAPF();
+ union dtol_union {
+ double d;
+ uint64_t ul;
+ } conv;
+ if (&APF.getSemantics()
+ == (const llvm::fltSemantics*)&APFloat::IEEEsingle) {
+ float fval = APF.convertToFloat();
+ conv.d = (double)fval;
+ } else {
+ conv.d = APF.convertToDouble();
+ }
+ id = mMFI->addi64Literal(conv.ul);
+ }
+ emitStaticCPLoad(MI, swizzle, id, ExtFPLoad);
+ } else {
+ const APFloat &APF = CFP->getValueAPF();
+ union ftol_union {
+ double d;
+ uint64_t ul;
+ } conv;
+ if (&APF.getSemantics()
+ == (const llvm::fltSemantics*)&APFloat::IEEEsingle) {
+ float fval = APF.convertToFloat();
+ conv.d = (double)fval;
+ } else {
+ conv.d = APF.convertToDouble();
+ }
+ uint32_t id = mMFI->getLongLits(conv.ul);
+ if (!id) {
+ id = mMFI->getIntLits((uint32_t)conv.ul);
+ }
+ emitStaticCPLoad(MI, swizzle, id, ExtFPLoad);
+ }
+ } else if (const ConstantInt* CI = dyn_cast<ConstantInt>(C)) {
+ int64_t val = 0;
+ if (CI) {
+ val = CI->getSExtValue();
+ }
+ if (CI->getBitWidth() == 64) {
+ emitStaticCPLoad(MI, swizzle, mMFI->addi64Literal(val), ExtFPLoad);
+ } else {
+ emitStaticCPLoad(MI, swizzle, mMFI->addi32Literal(val), ExtFPLoad);
+ }
+ } else if (const ConstantArray* CA = dyn_cast<ConstantArray>(C)) {
+ uint32_t size = CA->getNumOperands();
+ assert(size < 5 && "Cannot handle a constant array where size > 4");
+ if (size > 4) {
+ size = 4;
+ }
+ for (uint32_t x = 0; x < size; ++x) {
+ emitCPInst(MI, CA->getOperand(0), KM, x, ExtFPLoad);
+ }
+ } else if (const ConstantAggregateZero* CAZ
+ = dyn_cast<ConstantAggregateZero>(C)) {
+ if (CAZ->isNullValue()) {
+ emitStaticCPLoad(MI, swizzle, mMFI->addi32Literal(0), ExtFPLoad);
+ }
+ } else if (const ConstantStruct* CS = dyn_cast<ConstantStruct>(C)) {
+ uint32_t size = CS->getNumOperands();
+ assert(size < 5 && "Cannot handle a constant array where size > 4");
+ if (size > 4) {
+ size = 4;
+ }
+ for (uint32_t x = 0; x < size; ++x) {
+ emitCPInst(MI, CS->getOperand(0), KM, x, ExtFPLoad);
+ }
+ } else if (const ConstantVector* CV = dyn_cast<ConstantVector>(C)) {
+ // TODO: Make this handle vectors natively up to the correct
+ // size
+ uint32_t size = CV->getNumOperands();
+ assert(size < 5 && "Cannot handle a constant array where size > 4");
+ if (size > 4) {
+ size = 4;
+ }
+ for (uint32_t x = 0; x < size; ++x) {
+ emitCPInst(MI, CV->getOperand(0), KM, x, ExtFPLoad);
+ }
+ } else {
+ // TODO: Do we really need to handle ConstantPointerNull?
+ // What about BlockAddress, ConstantExpr and Undef?
+ // How would these even be generated by a valid CL program?
+ assert(0 && "Found a constant type that I don't know how to handle");
+ }
+}
+