1 //===-- PPCRegisterInfo.cpp - PowerPC Register Information ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the PowerPC implementation of the TargetRegisterInfo 10 // class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCRegisterInfo.h" 15 #include "PPCFrameLowering.h" 16 #include "PPCInstrBuilder.h" 17 #include "PPCMachineFunctionInfo.h" 18 #include "PPCSubtarget.h" 19 #include "PPCTargetMachine.h" 20 #include "llvm/ADT/BitVector.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/MachineFrameInfo.h" 24 #include "llvm/CodeGen/MachineFunction.h" 25 #include "llvm/CodeGen/MachineInstrBuilder.h" 26 #include "llvm/CodeGen/MachineModuleInfo.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/RegisterScavenging.h" 29 #include "llvm/CodeGen/TargetFrameLowering.h" 30 #include "llvm/CodeGen/TargetInstrInfo.h" 31 #include "llvm/IR/CallingConv.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/IR/Type.h" 35 #include "llvm/Support/CommandLine.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/ErrorHandling.h" 38 #include "llvm/Support/MathExtras.h" 39 #include "llvm/Support/raw_ostream.h" 40 #include "llvm/Target/TargetMachine.h" 41 #include "llvm/Target/TargetOptions.h" 42 #include <cstdlib> 43 44 using namespace llvm; 45 46 #define DEBUG_TYPE "reginfo" 47 48 #define GET_REGINFO_TARGET_DESC 49 #include "PPCGenRegisterInfo.inc" 50 51 STATISTIC(InflateGPRC, "Number of gprc inputs for getLargestLegalClass"); 52 STATISTIC(InflateGP8RC, "Number of g8rc inputs for getLargestLegalClass"); 53 54 static cl::opt<bool> 55 EnableBasePointer("ppc-use-base-pointer", cl::Hidden, cl::init(true), 56 cl::desc("Enable use of a base pointer for complex stack frames")); 57 58 static cl::opt<bool> 59 AlwaysBasePointer("ppc-always-use-base-pointer", cl::Hidden, cl::init(false), 60 cl::desc("Force the use of a base pointer in every function")); 61 62 static cl::opt<bool> 63 EnableGPRToVecSpills("ppc-enable-gpr-to-vsr-spills", cl::Hidden, cl::init(false), 64 cl::desc("Enable spills from gpr to vsr rather than stack")); 65 66 static cl::opt<bool> 67 StackPtrConst("ppc-stack-ptr-caller-preserved", 68 cl::desc("Consider R1 caller preserved so stack saves of " 69 "caller preserved registers can be LICM candidates"), 70 cl::init(true), cl::Hidden); 71 72 static cl::opt<unsigned> 73 MaxCRBitSpillDist("ppc-max-crbit-spill-dist", 74 cl::desc("Maximum search distance for definition of CR bit " 75 "spill on ppc"), 76 cl::Hidden, cl::init(100)); 77 78 static unsigned offsetMinAlignForOpcode(unsigned OpC); 79 80 PPCRegisterInfo::PPCRegisterInfo(const PPCTargetMachine &TM) 81 : PPCGenRegisterInfo(TM.isPPC64() ? PPC::LR8 : PPC::LR, 82 TM.isPPC64() ? 0 : 1, 83 TM.isPPC64() ? 0 : 1), 84 TM(TM) { 85 ImmToIdxMap[PPC::LD] = PPC::LDX; ImmToIdxMap[PPC::STD] = PPC::STDX; 86 ImmToIdxMap[PPC::LBZ] = PPC::LBZX; ImmToIdxMap[PPC::STB] = PPC::STBX; 87 ImmToIdxMap[PPC::LHZ] = PPC::LHZX; ImmToIdxMap[PPC::LHA] = PPC::LHAX; 88 ImmToIdxMap[PPC::LWZ] = PPC::LWZX; ImmToIdxMap[PPC::LWA] = PPC::LWAX; 89 ImmToIdxMap[PPC::LFS] = PPC::LFSX; ImmToIdxMap[PPC::LFD] = PPC::LFDX; 90 ImmToIdxMap[PPC::STH] = PPC::STHX; ImmToIdxMap[PPC::STW] = PPC::STWX; 91 ImmToIdxMap[PPC::STFS] = PPC::STFSX; ImmToIdxMap[PPC::STFD] = PPC::STFDX; 92 ImmToIdxMap[PPC::ADDI] = PPC::ADD4; 93 ImmToIdxMap[PPC::LWA_32] = PPC::LWAX_32; 94 95 // 64-bit 96 ImmToIdxMap[PPC::LHA8] = PPC::LHAX8; ImmToIdxMap[PPC::LBZ8] = PPC::LBZX8; 97 ImmToIdxMap[PPC::LHZ8] = PPC::LHZX8; ImmToIdxMap[PPC::LWZ8] = PPC::LWZX8; 98 ImmToIdxMap[PPC::STB8] = PPC::STBX8; ImmToIdxMap[PPC::STH8] = PPC::STHX8; 99 ImmToIdxMap[PPC::STW8] = PPC::STWX8; ImmToIdxMap[PPC::STDU] = PPC::STDUX; 100 ImmToIdxMap[PPC::ADDI8] = PPC::ADD8; 101 102 // VSX 103 ImmToIdxMap[PPC::DFLOADf32] = PPC::LXSSPX; 104 ImmToIdxMap[PPC::DFLOADf64] = PPC::LXSDX; 105 ImmToIdxMap[PPC::SPILLTOVSR_LD] = PPC::SPILLTOVSR_LDX; 106 ImmToIdxMap[PPC::SPILLTOVSR_ST] = PPC::SPILLTOVSR_STX; 107 ImmToIdxMap[PPC::DFSTOREf32] = PPC::STXSSPX; 108 ImmToIdxMap[PPC::DFSTOREf64] = PPC::STXSDX; 109 ImmToIdxMap[PPC::LXV] = PPC::LXVX; 110 ImmToIdxMap[PPC::LXSD] = PPC::LXSDX; 111 ImmToIdxMap[PPC::LXSSP] = PPC::LXSSPX; 112 ImmToIdxMap[PPC::STXV] = PPC::STXVX; 113 ImmToIdxMap[PPC::STXSD] = PPC::STXSDX; 114 ImmToIdxMap[PPC::STXSSP] = PPC::STXSSPX; 115 116 // SPE 117 ImmToIdxMap[PPC::EVLDD] = PPC::EVLDDX; 118 ImmToIdxMap[PPC::EVSTDD] = PPC::EVSTDDX; 119 ImmToIdxMap[PPC::SPESTW] = PPC::SPESTWX; 120 ImmToIdxMap[PPC::SPELWZ] = PPC::SPELWZX; 121 } 122 123 /// getPointerRegClass - Return the register class to use to hold pointers. 124 /// This is used for addressing modes. 125 const TargetRegisterClass * 126 PPCRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) 127 const { 128 // Note that PPCInstrInfo::FoldImmediate also directly uses this Kind value 129 // when it checks for ZERO folding. 130 if (Kind == 1) { 131 if (TM.isPPC64()) 132 return &PPC::G8RC_NOX0RegClass; 133 return &PPC::GPRC_NOR0RegClass; 134 } 135 136 if (TM.isPPC64()) 137 return &PPC::G8RCRegClass; 138 return &PPC::GPRCRegClass; 139 } 140 141 const MCPhysReg* 142 PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 143 const PPCSubtarget &Subtarget = MF->getSubtarget<PPCSubtarget>(); 144 if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) { 145 if (!TM.isPPC64() && Subtarget.isAIXABI()) 146 report_fatal_error("AnyReg unimplemented on 32-bit AIX."); 147 if (Subtarget.hasVSX()) 148 return CSR_64_AllRegs_VSX_SaveList; 149 if (Subtarget.hasAltivec()) 150 return CSR_64_AllRegs_Altivec_SaveList; 151 return CSR_64_AllRegs_SaveList; 152 } 153 154 // On PPC64, we might need to save r2 (but only if it is not reserved). 155 // We do not need to treat R2 as callee-saved when using PC-Relative calls 156 // because any direct uses of R2 will cause it to be reserved. If the function 157 // is a leaf or the only uses of R2 are implicit uses for calls, the calls 158 // will use the @notoc relocation which will cause this function to set the 159 // st_other bit to 1, thereby communicating to its caller that it arbitrarily 160 // clobbers the TOC. 161 bool SaveR2 = MF->getRegInfo().isAllocatable(PPC::X2) && 162 !Subtarget.isUsingPCRelativeCalls(); 163 164 // Cold calling convention CSRs. 165 if (MF->getFunction().getCallingConv() == CallingConv::Cold) { 166 if (Subtarget.isAIXABI()) 167 report_fatal_error("Cold calling unimplemented on AIX."); 168 if (TM.isPPC64()) { 169 if (Subtarget.hasAltivec()) 170 return SaveR2 ? CSR_SVR64_ColdCC_R2_Altivec_SaveList 171 : CSR_SVR64_ColdCC_Altivec_SaveList; 172 return SaveR2 ? CSR_SVR64_ColdCC_R2_SaveList 173 : CSR_SVR64_ColdCC_SaveList; 174 } 175 // 32-bit targets. 176 if (Subtarget.hasAltivec()) 177 return CSR_SVR32_ColdCC_Altivec_SaveList; 178 else if (Subtarget.hasSPE()) 179 return CSR_SVR32_ColdCC_SPE_SaveList; 180 return CSR_SVR32_ColdCC_SaveList; 181 } 182 // Standard calling convention CSRs. 183 if (TM.isPPC64()) { 184 if (Subtarget.hasAltivec()) 185 return SaveR2 ? CSR_PPC64_R2_Altivec_SaveList 186 : CSR_PPC64_Altivec_SaveList; 187 return SaveR2 ? CSR_PPC64_R2_SaveList : CSR_PPC64_SaveList; 188 } 189 // 32-bit targets. 190 if (Subtarget.isAIXABI()) 191 return CSR_AIX32_SaveList; 192 if (Subtarget.hasAltivec()) 193 return CSR_SVR432_Altivec_SaveList; 194 else if (Subtarget.hasSPE()) 195 return CSR_SVR432_SPE_SaveList; 196 return CSR_SVR432_SaveList; 197 } 198 199 const uint32_t * 200 PPCRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 201 CallingConv::ID CC) const { 202 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 203 if (CC == CallingConv::AnyReg) { 204 if (Subtarget.hasVSX()) 205 return CSR_64_AllRegs_VSX_RegMask; 206 if (Subtarget.hasAltivec()) 207 return CSR_64_AllRegs_Altivec_RegMask; 208 return CSR_64_AllRegs_RegMask; 209 } 210 211 if (Subtarget.isAIXABI()) { 212 assert(!Subtarget.hasAltivec() && "Altivec is not implemented on AIX yet."); 213 return TM.isPPC64() ? CSR_PPC64_RegMask : CSR_AIX32_RegMask; 214 } 215 216 if (CC == CallingConv::Cold) { 217 return TM.isPPC64() ? (Subtarget.hasAltivec() ? CSR_SVR64_ColdCC_Altivec_RegMask 218 : CSR_SVR64_ColdCC_RegMask) 219 : (Subtarget.hasAltivec() ? CSR_SVR32_ColdCC_Altivec_RegMask 220 : (Subtarget.hasSPE() 221 ? CSR_SVR32_ColdCC_SPE_RegMask 222 : CSR_SVR32_ColdCC_RegMask)); 223 } 224 225 return TM.isPPC64() ? (Subtarget.hasAltivec() ? CSR_PPC64_Altivec_RegMask 226 : CSR_PPC64_RegMask) 227 : (Subtarget.hasAltivec() 228 ? CSR_SVR432_Altivec_RegMask 229 : (Subtarget.hasSPE() ? CSR_SVR432_SPE_RegMask 230 : CSR_SVR432_RegMask)); 231 } 232 233 const uint32_t* 234 PPCRegisterInfo::getNoPreservedMask() const { 235 return CSR_NoRegs_RegMask; 236 } 237 238 void PPCRegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const { 239 for (unsigned PseudoReg : {PPC::ZERO, PPC::ZERO8, PPC::RM}) 240 Mask[PseudoReg / 32] &= ~(1u << (PseudoReg % 32)); 241 } 242 243 BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const { 244 BitVector Reserved(getNumRegs()); 245 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 246 const PPCFrameLowering *TFI = getFrameLowering(MF); 247 248 // The ZERO register is not really a register, but the representation of r0 249 // when used in instructions that treat r0 as the constant 0. 250 markSuperRegs(Reserved, PPC::ZERO); 251 252 // The FP register is also not really a register, but is the representation 253 // of the frame pointer register used by ISD::FRAMEADDR. 254 markSuperRegs(Reserved, PPC::FP); 255 256 // The BP register is also not really a register, but is the representation 257 // of the base pointer register used by setjmp. 258 markSuperRegs(Reserved, PPC::BP); 259 260 // The counter registers must be reserved so that counter-based loops can 261 // be correctly formed (and the mtctr instructions are not DCE'd). 262 markSuperRegs(Reserved, PPC::CTR); 263 markSuperRegs(Reserved, PPC::CTR8); 264 265 markSuperRegs(Reserved, PPC::R1); 266 markSuperRegs(Reserved, PPC::LR); 267 markSuperRegs(Reserved, PPC::LR8); 268 markSuperRegs(Reserved, PPC::RM); 269 270 markSuperRegs(Reserved, PPC::VRSAVE); 271 272 // The SVR4 ABI reserves r2 and r13 273 if (Subtarget.isSVR4ABI()) { 274 // We only reserve r2 if we need to use the TOC pointer. If we have no 275 // explicit uses of the TOC pointer (meaning we're a leaf function with 276 // no constant-pool loads, etc.) and we have no potential uses inside an 277 // inline asm block, then we can treat r2 has an ordinary callee-saved 278 // register. 279 const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 280 if (!TM.isPPC64() || FuncInfo->usesTOCBasePtr() || MF.hasInlineAsm()) 281 markSuperRegs(Reserved, PPC::R2); // System-reserved register 282 markSuperRegs(Reserved, PPC::R13); // Small Data Area pointer register 283 } 284 285 // Always reserve r2 on AIX for now. 286 // TODO: Make r2 allocatable on AIX/XCOFF for some leaf functions. 287 if (Subtarget.isAIXABI()) 288 markSuperRegs(Reserved, PPC::R2); // System-reserved register 289 290 // On PPC64, r13 is the thread pointer. Never allocate this register. 291 if (TM.isPPC64()) 292 markSuperRegs(Reserved, PPC::R13); 293 294 if (TFI->needsFP(MF)) 295 markSuperRegs(Reserved, PPC::R31); 296 297 bool IsPositionIndependent = TM.isPositionIndependent(); 298 if (hasBasePointer(MF)) { 299 if (Subtarget.is32BitELFABI() && IsPositionIndependent) 300 markSuperRegs(Reserved, PPC::R29); 301 else 302 markSuperRegs(Reserved, PPC::R30); 303 } 304 305 if (Subtarget.is32BitELFABI() && IsPositionIndependent) 306 markSuperRegs(Reserved, PPC::R30); 307 308 // Reserve Altivec registers when Altivec is unavailable. 309 if (!Subtarget.hasAltivec()) 310 for (TargetRegisterClass::iterator I = PPC::VRRCRegClass.begin(), 311 IE = PPC::VRRCRegClass.end(); I != IE; ++I) 312 markSuperRegs(Reserved, *I); 313 314 assert(checkAllSuperRegsMarked(Reserved)); 315 return Reserved; 316 } 317 318 bool PPCRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const { 319 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 320 const PPCInstrInfo *InstrInfo = Subtarget.getInstrInfo(); 321 const MachineFrameInfo &MFI = MF.getFrameInfo(); 322 const std::vector<CalleeSavedInfo> &Info = MFI.getCalleeSavedInfo(); 323 324 // If the callee saved info is invalid we have to default to true for safety. 325 if (!MFI.isCalleeSavedInfoValid()) 326 return true; 327 328 // We will require the use of X-Forms because the frame is larger than what 329 // can be represented in signed 16 bits that fit in the immediate of a D-Form. 330 // If we need an X-Form then we need a register to store the address offset. 331 unsigned FrameSize = MFI.getStackSize(); 332 // Signed 16 bits means that the FrameSize cannot be more than 15 bits. 333 if (FrameSize & ~0x7FFF) 334 return true; 335 336 // The callee saved info is valid so it can be traversed. 337 // Checking for registers that need saving that do not have load or store 338 // forms where the address offset is an immediate. 339 for (unsigned i = 0; i < Info.size(); i++) { 340 int FrIdx = Info[i].getFrameIdx(); 341 unsigned Reg = Info[i].getReg(); 342 343 const TargetRegisterClass *RC = getMinimalPhysRegClass(Reg); 344 unsigned Opcode = InstrInfo->getStoreOpcodeForSpill(RC); 345 if (!MFI.isFixedObjectIndex(FrIdx)) { 346 // This is not a fixed object. If it requires alignment then we may still 347 // need to use the XForm. 348 if (offsetMinAlignForOpcode(Opcode) > 1) 349 return true; 350 } 351 352 // This is eiher: 353 // 1) A fixed frame index object which we know are aligned so 354 // as long as we have a valid DForm/DSForm/DQForm (non XForm) we don't 355 // need to consider the alignment here. 356 // 2) A not fixed object but in that case we now know that the min required 357 // alignment is no more than 1 based on the previous check. 358 if (InstrInfo->isXFormMemOp(Opcode)) 359 return true; 360 } 361 return false; 362 } 363 364 bool PPCRegisterInfo::isCallerPreservedPhysReg(MCRegister PhysReg, 365 const MachineFunction &MF) const { 366 assert(Register::isPhysicalRegister(PhysReg)); 367 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 368 const MachineFrameInfo &MFI = MF.getFrameInfo(); 369 if (!TM.isPPC64()) 370 return false; 371 372 if (!Subtarget.isSVR4ABI()) 373 return false; 374 if (PhysReg == PPC::X2) 375 // X2 is guaranteed to be preserved within a function if it is reserved. 376 // The reason it's reserved is that it's the TOC pointer (and the function 377 // uses the TOC). In functions where it isn't reserved (i.e. leaf functions 378 // with no TOC access), we can't claim that it is preserved. 379 return (getReservedRegs(MF).test(PPC::X2)); 380 if (StackPtrConst && (PhysReg == PPC::X1) && !MFI.hasVarSizedObjects() 381 && !MFI.hasOpaqueSPAdjustment()) 382 // The value of the stack pointer does not change within a function after 383 // the prologue and before the epilogue if there are no dynamic allocations 384 // and no inline asm which clobbers X1. 385 return true; 386 return false; 387 } 388 389 unsigned PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 390 MachineFunction &MF) const { 391 const PPCFrameLowering *TFI = getFrameLowering(MF); 392 const unsigned DefaultSafety = 1; 393 394 switch (RC->getID()) { 395 default: 396 return 0; 397 case PPC::G8RC_NOX0RegClassID: 398 case PPC::GPRC_NOR0RegClassID: 399 case PPC::SPERCRegClassID: 400 case PPC::G8RCRegClassID: 401 case PPC::GPRCRegClassID: { 402 unsigned FP = TFI->hasFP(MF) ? 1 : 0; 403 return 32 - FP - DefaultSafety; 404 } 405 case PPC::F8RCRegClassID: 406 case PPC::F4RCRegClassID: 407 case PPC::QFRCRegClassID: 408 case PPC::QSRCRegClassID: 409 case PPC::QBRCRegClassID: 410 case PPC::VRRCRegClassID: 411 case PPC::VFRCRegClassID: 412 case PPC::VSLRCRegClassID: 413 return 32 - DefaultSafety; 414 case PPC::VSRCRegClassID: 415 case PPC::VSFRCRegClassID: 416 case PPC::VSSRCRegClassID: 417 return 64 - DefaultSafety; 418 case PPC::CRRCRegClassID: 419 return 8 - DefaultSafety; 420 } 421 } 422 423 const TargetRegisterClass * 424 PPCRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 425 const MachineFunction &MF) const { 426 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 427 if (Subtarget.hasVSX()) { 428 // With VSX, we can inflate various sub-register classes to the full VSX 429 // register set. 430 431 // For Power9 we allow the user to enable GPR to vector spills. 432 // FIXME: Currently limited to spilling GP8RC. A follow on patch will add 433 // support to spill GPRC. 434 if (TM.isELFv2ABI()) { 435 if (Subtarget.hasP9Vector() && EnableGPRToVecSpills && 436 RC == &PPC::G8RCRegClass) { 437 InflateGP8RC++; 438 return &PPC::SPILLTOVSRRCRegClass; 439 } 440 if (RC == &PPC::GPRCRegClass && EnableGPRToVecSpills) 441 InflateGPRC++; 442 } 443 if (RC == &PPC::F8RCRegClass) 444 return &PPC::VSFRCRegClass; 445 else if (RC == &PPC::VRRCRegClass) 446 return &PPC::VSRCRegClass; 447 else if (RC == &PPC::F4RCRegClass && Subtarget.hasP8Vector()) 448 return &PPC::VSSRCRegClass; 449 } 450 451 return TargetRegisterInfo::getLargestLegalSuperClass(RC, MF); 452 } 453 454 //===----------------------------------------------------------------------===// 455 // Stack Frame Processing methods 456 //===----------------------------------------------------------------------===// 457 458 /// lowerDynamicAlloc - Generate the code for allocating an object in the 459 /// current frame. The sequence of code will be in the general form 460 /// 461 /// addi R0, SP, \#frameSize ; get the address of the previous frame 462 /// stwxu R0, SP, Rnegsize ; add and update the SP with the negated size 463 /// addi Rnew, SP, \#maxCalFrameSize ; get the top of the allocation 464 /// 465 void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const { 466 // Get the instruction. 467 MachineInstr &MI = *II; 468 // Get the instruction's basic block. 469 MachineBasicBlock &MBB = *MI.getParent(); 470 // Get the basic block's function. 471 MachineFunction &MF = *MBB.getParent(); 472 // Get the frame info. 473 MachineFrameInfo &MFI = MF.getFrameInfo(); 474 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 475 // Get the instruction info. 476 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 477 // Determine whether 64-bit pointers are used. 478 bool LP64 = TM.isPPC64(); 479 DebugLoc dl = MI.getDebugLoc(); 480 481 // Get the maximum call stack size. 482 unsigned maxCallFrameSize = MFI.getMaxCallFrameSize(); 483 Align MaxAlign = MFI.getMaxAlign(); 484 assert(isAligned(MaxAlign, maxCallFrameSize) && 485 "Maximum call-frame size not sufficiently aligned"); 486 (void)MaxAlign; 487 488 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 489 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 490 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 491 bool KillNegSizeReg = MI.getOperand(1).isKill(); 492 Register NegSizeReg = MI.getOperand(1).getReg(); 493 494 prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, Reg); 495 // Grow the stack and update the stack pointer link, then determine the 496 // address of new allocated space. 497 if (LP64) { 498 BuildMI(MBB, II, dl, TII.get(PPC::STDUX), PPC::X1) 499 .addReg(Reg, RegState::Kill) 500 .addReg(PPC::X1) 501 .addReg(NegSizeReg, getKillRegState(KillNegSizeReg)); 502 BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), MI.getOperand(0).getReg()) 503 .addReg(PPC::X1) 504 .addImm(maxCallFrameSize); 505 } else { 506 BuildMI(MBB, II, dl, TII.get(PPC::STWUX), PPC::R1) 507 .addReg(Reg, RegState::Kill) 508 .addReg(PPC::R1) 509 .addReg(NegSizeReg, getKillRegState(KillNegSizeReg)); 510 BuildMI(MBB, II, dl, TII.get(PPC::ADDI), MI.getOperand(0).getReg()) 511 .addReg(PPC::R1) 512 .addImm(maxCallFrameSize); 513 } 514 515 // Discard the DYNALLOC instruction. 516 MBB.erase(II); 517 } 518 519 /// To accomplish dynamic stack allocation, we have to calculate exact size 520 /// subtracted from the stack pointer according alignment information and get 521 /// previous frame pointer. 522 void PPCRegisterInfo::prepareDynamicAlloca(MachineBasicBlock::iterator II, 523 Register &NegSizeReg, 524 bool &KillNegSizeReg, 525 Register &FramePointer) const { 526 // Get the instruction. 527 MachineInstr &MI = *II; 528 // Get the instruction's basic block. 529 MachineBasicBlock &MBB = *MI.getParent(); 530 // Get the basic block's function. 531 MachineFunction &MF = *MBB.getParent(); 532 // Get the frame info. 533 MachineFrameInfo &MFI = MF.getFrameInfo(); 534 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 535 // Get the instruction info. 536 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 537 // Determine whether 64-bit pointers are used. 538 bool LP64 = TM.isPPC64(); 539 DebugLoc dl = MI.getDebugLoc(); 540 // Get the total frame size. 541 unsigned FrameSize = MFI.getStackSize(); 542 543 // Get stack alignments. 544 const PPCFrameLowering *TFI = getFrameLowering(MF); 545 Align TargetAlign = TFI->getStackAlign(); 546 Align MaxAlign = MFI.getMaxAlign(); 547 548 // Determine the previous frame's address. If FrameSize can't be 549 // represented as 16 bits or we need special alignment, then we load the 550 // previous frame's address from 0(SP). Why not do an addis of the hi? 551 // Because R0 is our only safe tmp register and addi/addis treat R0 as zero. 552 // Constructing the constant and adding would take 3 instructions. 553 // Fortunately, a frame greater than 32K is rare. 554 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 555 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 556 557 if (MaxAlign < TargetAlign && isInt<16>(FrameSize)) { 558 if (LP64) 559 BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), FramePointer) 560 .addReg(PPC::X31) 561 .addImm(FrameSize); 562 else 563 BuildMI(MBB, II, dl, TII.get(PPC::ADDI), FramePointer) 564 .addReg(PPC::R31) 565 .addImm(FrameSize); 566 } else if (LP64) { 567 BuildMI(MBB, II, dl, TII.get(PPC::LD), FramePointer) 568 .addImm(0) 569 .addReg(PPC::X1); 570 } else { 571 BuildMI(MBB, II, dl, TII.get(PPC::LWZ), FramePointer) 572 .addImm(0) 573 .addReg(PPC::R1); 574 } 575 // Determine the actual NegSizeReg according to alignment info. 576 if (LP64) { 577 if (MaxAlign > TargetAlign) { 578 unsigned UnalNegSizeReg = NegSizeReg; 579 NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC); 580 581 // Unfortunately, there is no andi, only andi., and we can't insert that 582 // here because we might clobber cr0 while it is live. 583 BuildMI(MBB, II, dl, TII.get(PPC::LI8), NegSizeReg) 584 .addImm(~(MaxAlign.value() - 1)); 585 586 unsigned NegSizeReg1 = NegSizeReg; 587 NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC); 588 BuildMI(MBB, II, dl, TII.get(PPC::AND8), NegSizeReg) 589 .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg)) 590 .addReg(NegSizeReg1, RegState::Kill); 591 KillNegSizeReg = true; 592 } 593 } else { 594 if (MaxAlign > TargetAlign) { 595 unsigned UnalNegSizeReg = NegSizeReg; 596 NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC); 597 598 // Unfortunately, there is no andi, only andi., and we can't insert that 599 // here because we might clobber cr0 while it is live. 600 BuildMI(MBB, II, dl, TII.get(PPC::LI), NegSizeReg) 601 .addImm(~(MaxAlign.value() - 1)); 602 603 unsigned NegSizeReg1 = NegSizeReg; 604 NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC); 605 BuildMI(MBB, II, dl, TII.get(PPC::AND), NegSizeReg) 606 .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg)) 607 .addReg(NegSizeReg1, RegState::Kill); 608 KillNegSizeReg = true; 609 } 610 } 611 } 612 613 void PPCRegisterInfo::lowerPrepareProbedAlloca( 614 MachineBasicBlock::iterator II) const { 615 MachineInstr &MI = *II; 616 // Get the instruction's basic block. 617 MachineBasicBlock &MBB = *MI.getParent(); 618 // Get the basic block's function. 619 MachineFunction &MF = *MBB.getParent(); 620 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 621 // Get the instruction info. 622 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 623 // Determine whether 64-bit pointers are used. 624 bool LP64 = TM.isPPC64(); 625 DebugLoc dl = MI.getDebugLoc(); 626 Register FramePointer = MI.getOperand(0).getReg(); 627 const Register ActualNegSizeReg = MI.getOperand(1).getReg(); 628 bool KillNegSizeReg = MI.getOperand(2).isKill(); 629 Register NegSizeReg = MI.getOperand(2).getReg(); 630 const MCInstrDesc &CopyInst = TII.get(LP64 ? PPC::OR8 : PPC::OR); 631 // RegAllocator might allocate FramePointer and NegSizeReg in the same phyreg. 632 if (FramePointer == NegSizeReg) { 633 assert(KillNegSizeReg && "FramePointer is a def and NegSizeReg is an use, " 634 "NegSizeReg should be killed"); 635 // FramePointer is clobbered earlier than the use of NegSizeReg in 636 // prepareDynamicAlloca, save NegSizeReg in ActualNegSizeReg to avoid 637 // misuse. 638 BuildMI(MBB, II, dl, CopyInst, ActualNegSizeReg) 639 .addReg(NegSizeReg) 640 .addReg(NegSizeReg); 641 NegSizeReg = ActualNegSizeReg; 642 KillNegSizeReg = false; 643 } 644 prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, FramePointer); 645 // NegSizeReg might be updated in prepareDynamicAlloca if MaxAlign > 646 // TargetAlign. 647 if (NegSizeReg != ActualNegSizeReg) 648 BuildMI(MBB, II, dl, CopyInst, ActualNegSizeReg) 649 .addReg(NegSizeReg) 650 .addReg(NegSizeReg); 651 MBB.erase(II); 652 } 653 654 void PPCRegisterInfo::lowerDynamicAreaOffset( 655 MachineBasicBlock::iterator II) const { 656 // Get the instruction. 657 MachineInstr &MI = *II; 658 // Get the instruction's basic block. 659 MachineBasicBlock &MBB = *MI.getParent(); 660 // Get the basic block's function. 661 MachineFunction &MF = *MBB.getParent(); 662 // Get the frame info. 663 MachineFrameInfo &MFI = MF.getFrameInfo(); 664 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 665 // Get the instruction info. 666 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 667 668 unsigned maxCallFrameSize = MFI.getMaxCallFrameSize(); 669 bool is64Bit = TM.isPPC64(); 670 DebugLoc dl = MI.getDebugLoc(); 671 BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI), 672 MI.getOperand(0).getReg()) 673 .addImm(maxCallFrameSize); 674 MBB.erase(II); 675 } 676 677 /// lowerCRSpilling - Generate the code for spilling a CR register. Instead of 678 /// reserving a whole register (R0), we scrounge for one here. This generates 679 /// code like this: 680 /// 681 /// mfcr rA ; Move the conditional register into GPR rA. 682 /// rlwinm rA, rA, SB, 0, 31 ; Shift the bits left so they are in CR0's slot. 683 /// stw rA, FI ; Store rA to the frame. 684 /// 685 void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II, 686 unsigned FrameIndex) const { 687 // Get the instruction. 688 MachineInstr &MI = *II; // ; SPILL_CR <SrcReg>, <offset> 689 // Get the instruction's basic block. 690 MachineBasicBlock &MBB = *MI.getParent(); 691 MachineFunction &MF = *MBB.getParent(); 692 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 693 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 694 DebugLoc dl = MI.getDebugLoc(); 695 696 bool LP64 = TM.isPPC64(); 697 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 698 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 699 700 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 701 Register SrcReg = MI.getOperand(0).getReg(); 702 703 // We need to store the CR in the low 4-bits of the saved value. First, issue 704 // an MFOCRF to save all of the CRBits and, if needed, kill the SrcReg. 705 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg) 706 .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill())); 707 708 // If the saved register wasn't CR0, shift the bits left so that they are in 709 // CR0's slot. 710 if (SrcReg != PPC::CR0) { 711 Register Reg1 = Reg; 712 Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 713 714 // rlwinm rA, rA, ShiftBits, 0, 31. 715 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg) 716 .addReg(Reg1, RegState::Kill) 717 .addImm(getEncodingValue(SrcReg) * 4) 718 .addImm(0) 719 .addImm(31); 720 } 721 722 addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW)) 723 .addReg(Reg, RegState::Kill), 724 FrameIndex); 725 726 // Discard the pseudo instruction. 727 MBB.erase(II); 728 } 729 730 void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II, 731 unsigned FrameIndex) const { 732 // Get the instruction. 733 MachineInstr &MI = *II; // ; <DestReg> = RESTORE_CR <offset> 734 // Get the instruction's basic block. 735 MachineBasicBlock &MBB = *MI.getParent(); 736 MachineFunction &MF = *MBB.getParent(); 737 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 738 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 739 DebugLoc dl = MI.getDebugLoc(); 740 741 bool LP64 = TM.isPPC64(); 742 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 743 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 744 745 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 746 Register DestReg = MI.getOperand(0).getReg(); 747 assert(MI.definesRegister(DestReg) && 748 "RESTORE_CR does not define its destination"); 749 750 addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ), 751 Reg), FrameIndex); 752 753 // If the reloaded register isn't CR0, shift the bits right so that they are 754 // in the right CR's slot. 755 if (DestReg != PPC::CR0) { 756 Register Reg1 = Reg; 757 Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 758 759 unsigned ShiftBits = getEncodingValue(DestReg)*4; 760 // rlwinm r11, r11, 32-ShiftBits, 0, 31. 761 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg) 762 .addReg(Reg1, RegState::Kill).addImm(32-ShiftBits).addImm(0) 763 .addImm(31); 764 } 765 766 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), DestReg) 767 .addReg(Reg, RegState::Kill); 768 769 // Discard the pseudo instruction. 770 MBB.erase(II); 771 } 772 773 void PPCRegisterInfo::lowerCRBitSpilling(MachineBasicBlock::iterator II, 774 unsigned FrameIndex) const { 775 // Get the instruction. 776 MachineInstr &MI = *II; // ; SPILL_CRBIT <SrcReg>, <offset> 777 // Get the instruction's basic block. 778 MachineBasicBlock &MBB = *MI.getParent(); 779 MachineFunction &MF = *MBB.getParent(); 780 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 781 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 782 const TargetRegisterInfo* TRI = Subtarget.getRegisterInfo(); 783 DebugLoc dl = MI.getDebugLoc(); 784 785 bool LP64 = TM.isPPC64(); 786 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 787 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 788 789 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 790 Register SrcReg = MI.getOperand(0).getReg(); 791 792 // Search up the BB to find the definition of the CR bit. 793 MachineBasicBlock::reverse_iterator Ins = MI; 794 MachineBasicBlock::reverse_iterator Rend = MBB.rend(); 795 ++Ins; 796 unsigned CRBitSpillDistance = 0; 797 bool SeenUse = false; 798 for (; Ins != Rend; ++Ins) { 799 // Definition found. 800 if (Ins->modifiesRegister(SrcReg, TRI)) 801 break; 802 // Use found. 803 if (Ins->readsRegister(SrcReg, TRI)) 804 SeenUse = true; 805 // Unable to find CR bit definition within maximum search distance. 806 if (CRBitSpillDistance == MaxCRBitSpillDist) { 807 Ins = MI; 808 break; 809 } 810 // Skip debug instructions when counting CR bit spill distance. 811 if (!Ins->isDebugInstr()) 812 CRBitSpillDistance++; 813 } 814 815 // Unable to find the definition of the CR bit in the MBB. 816 if (Ins == MBB.rend()) 817 Ins = MI; 818 819 bool SpillsKnownBit = false; 820 // There is no need to extract the CR bit if its value is already known. 821 switch (Ins->getOpcode()) { 822 case PPC::CRUNSET: 823 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LI8 : PPC::LI), Reg) 824 .addImm(0); 825 SpillsKnownBit = true; 826 break; 827 case PPC::CRSET: 828 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LIS8 : PPC::LIS), Reg) 829 .addImm(-32768); 830 SpillsKnownBit = true; 831 break; 832 default: 833 // On Power9, we can use SETB to extract the LT bit. This only works for 834 // the LT bit since SETB produces -1/1/0 for LT/GT/<neither>. So the value 835 // of the bit we care about (32-bit sign bit) will be set to the value of 836 // the LT bit (regardless of the other bits in the CR field). 837 if (Subtarget.isISA3_0()) { 838 if (SrcReg == PPC::CR0LT || SrcReg == PPC::CR1LT || 839 SrcReg == PPC::CR2LT || SrcReg == PPC::CR3LT || 840 SrcReg == PPC::CR4LT || SrcReg == PPC::CR5LT || 841 SrcReg == PPC::CR6LT || SrcReg == PPC::CR7LT) { 842 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETB8 : PPC::SETB), Reg) 843 .addReg(getCRFromCRBit(SrcReg), RegState::Undef); 844 break; 845 } 846 } 847 848 // We need to move the CR field that contains the CR bit we are spilling. 849 // The super register may not be explicitly defined (i.e. it can be defined 850 // by a CR-logical that only defines the subreg) so we state that the CR 851 // field is undef. Also, in order to preserve the kill flag on the CR bit, 852 // we add it as an implicit use. 853 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg) 854 .addReg(getCRFromCRBit(SrcReg), RegState::Undef) 855 .addReg(SrcReg, 856 RegState::Implicit | getKillRegState(MI.getOperand(0).isKill())); 857 858 // If the saved register wasn't CR0LT, shift the bits left so that the bit 859 // to store is the first one. Mask all but that bit. 860 Register Reg1 = Reg; 861 Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 862 863 // rlwinm rA, rA, ShiftBits, 0, 0. 864 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg) 865 .addReg(Reg1, RegState::Kill) 866 .addImm(getEncodingValue(SrcReg)) 867 .addImm(0).addImm(0); 868 } 869 addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW)) 870 .addReg(Reg, RegState::Kill), 871 FrameIndex); 872 873 bool KillsCRBit = MI.killsRegister(SrcReg, TRI); 874 // Discard the pseudo instruction. 875 MBB.erase(II); 876 if (SpillsKnownBit && KillsCRBit && !SeenUse) { 877 Ins->setDesc(TII.get(PPC::UNENCODED_NOP)); 878 Ins->RemoveOperand(0); 879 } 880 } 881 882 void PPCRegisterInfo::lowerCRBitRestore(MachineBasicBlock::iterator II, 883 unsigned FrameIndex) const { 884 // Get the instruction. 885 MachineInstr &MI = *II; // ; <DestReg> = RESTORE_CRBIT <offset> 886 // Get the instruction's basic block. 887 MachineBasicBlock &MBB = *MI.getParent(); 888 MachineFunction &MF = *MBB.getParent(); 889 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 890 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 891 DebugLoc dl = MI.getDebugLoc(); 892 893 bool LP64 = TM.isPPC64(); 894 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 895 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 896 897 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 898 Register DestReg = MI.getOperand(0).getReg(); 899 assert(MI.definesRegister(DestReg) && 900 "RESTORE_CRBIT does not define its destination"); 901 902 addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ), 903 Reg), FrameIndex); 904 905 BuildMI(MBB, II, dl, TII.get(TargetOpcode::IMPLICIT_DEF), DestReg); 906 907 Register RegO = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 908 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), RegO) 909 .addReg(getCRFromCRBit(DestReg)); 910 911 unsigned ShiftBits = getEncodingValue(DestReg); 912 // rlwimi r11, r10, 32-ShiftBits, ..., ... 913 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWIMI8 : PPC::RLWIMI), RegO) 914 .addReg(RegO, RegState::Kill) 915 .addReg(Reg, RegState::Kill) 916 .addImm(ShiftBits ? 32 - ShiftBits : 0) 917 .addImm(ShiftBits) 918 .addImm(ShiftBits); 919 920 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), 921 getCRFromCRBit(DestReg)) 922 .addReg(RegO, RegState::Kill) 923 // Make sure we have a use dependency all the way through this 924 // sequence of instructions. We can't have the other bits in the CR 925 // modified in between the mfocrf and the mtocrf. 926 .addReg(getCRFromCRBit(DestReg), RegState::Implicit); 927 928 // Discard the pseudo instruction. 929 MBB.erase(II); 930 } 931 932 void PPCRegisterInfo::lowerVRSAVESpilling(MachineBasicBlock::iterator II, 933 unsigned FrameIndex) const { 934 // Get the instruction. 935 MachineInstr &MI = *II; // ; SPILL_VRSAVE <SrcReg>, <offset> 936 // Get the instruction's basic block. 937 MachineBasicBlock &MBB = *MI.getParent(); 938 MachineFunction &MF = *MBB.getParent(); 939 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 940 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 941 DebugLoc dl = MI.getDebugLoc(); 942 943 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 944 Register Reg = MF.getRegInfo().createVirtualRegister(GPRC); 945 Register SrcReg = MI.getOperand(0).getReg(); 946 947 BuildMI(MBB, II, dl, TII.get(PPC::MFVRSAVEv), Reg) 948 .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill())); 949 950 addFrameReference( 951 BuildMI(MBB, II, dl, TII.get(PPC::STW)).addReg(Reg, RegState::Kill), 952 FrameIndex); 953 954 // Discard the pseudo instruction. 955 MBB.erase(II); 956 } 957 958 void PPCRegisterInfo::lowerVRSAVERestore(MachineBasicBlock::iterator II, 959 unsigned FrameIndex) const { 960 // Get the instruction. 961 MachineInstr &MI = *II; // ; <DestReg> = RESTORE_VRSAVE <offset> 962 // Get the instruction's basic block. 963 MachineBasicBlock &MBB = *MI.getParent(); 964 MachineFunction &MF = *MBB.getParent(); 965 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 966 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 967 DebugLoc dl = MI.getDebugLoc(); 968 969 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 970 Register Reg = MF.getRegInfo().createVirtualRegister(GPRC); 971 Register DestReg = MI.getOperand(0).getReg(); 972 assert(MI.definesRegister(DestReg) && 973 "RESTORE_VRSAVE does not define its destination"); 974 975 addFrameReference(BuildMI(MBB, II, dl, TII.get(PPC::LWZ), 976 Reg), FrameIndex); 977 978 BuildMI(MBB, II, dl, TII.get(PPC::MTVRSAVEv), DestReg) 979 .addReg(Reg, RegState::Kill); 980 981 // Discard the pseudo instruction. 982 MBB.erase(II); 983 } 984 985 bool PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, 986 Register Reg, int &FrameIdx) const { 987 // For the nonvolatile condition registers (CR2, CR3, CR4) return true to 988 // prevent allocating an additional frame slot. 989 // For 64-bit ELF and AIX, the CR save area is in the linkage area at SP+8, 990 // for 32-bit AIX the CR save area is in the linkage area at SP+4. 991 // We have created a FrameIndex to that spill slot to keep the CalleSaveInfos 992 // valid. 993 // For 32-bit ELF, we have previously created the stack slot if needed, so 994 // return its FrameIdx. 995 if (PPC::CR2 <= Reg && Reg <= PPC::CR4) { 996 FrameIdx = MF.getInfo<PPCFunctionInfo>()->getCRSpillFrameIndex(); 997 return true; 998 } 999 return false; 1000 } 1001 1002 // If the offset must be a multiple of some value, return what that value is. 1003 static unsigned offsetMinAlignForOpcode(unsigned OpC) { 1004 switch (OpC) { 1005 default: 1006 return 1; 1007 case PPC::LWA: 1008 case PPC::LWA_32: 1009 case PPC::LD: 1010 case PPC::LDU: 1011 case PPC::STD: 1012 case PPC::STDU: 1013 case PPC::DFLOADf32: 1014 case PPC::DFLOADf64: 1015 case PPC::DFSTOREf32: 1016 case PPC::DFSTOREf64: 1017 case PPC::LXSD: 1018 case PPC::LXSSP: 1019 case PPC::STXSD: 1020 case PPC::STXSSP: 1021 return 4; 1022 case PPC::EVLDD: 1023 case PPC::EVSTDD: 1024 return 8; 1025 case PPC::LXV: 1026 case PPC::STXV: 1027 return 16; 1028 } 1029 } 1030 1031 // If the offset must be a multiple of some value, return what that value is. 1032 static unsigned offsetMinAlign(const MachineInstr &MI) { 1033 unsigned OpC = MI.getOpcode(); 1034 return offsetMinAlignForOpcode(OpC); 1035 } 1036 1037 // Return the OffsetOperandNo given the FIOperandNum (and the instruction). 1038 static unsigned getOffsetONFromFION(const MachineInstr &MI, 1039 unsigned FIOperandNum) { 1040 // Take into account whether it's an add or mem instruction 1041 unsigned OffsetOperandNo = (FIOperandNum == 2) ? 1 : 2; 1042 if (MI.isInlineAsm()) 1043 OffsetOperandNo = FIOperandNum - 1; 1044 else if (MI.getOpcode() == TargetOpcode::STACKMAP || 1045 MI.getOpcode() == TargetOpcode::PATCHPOINT) 1046 OffsetOperandNo = FIOperandNum + 1; 1047 1048 return OffsetOperandNo; 1049 } 1050 1051 void 1052 PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 1053 int SPAdj, unsigned FIOperandNum, 1054 RegScavenger *RS) const { 1055 assert(SPAdj == 0 && "Unexpected"); 1056 1057 // Get the instruction. 1058 MachineInstr &MI = *II; 1059 // Get the instruction's basic block. 1060 MachineBasicBlock &MBB = *MI.getParent(); 1061 // Get the basic block's function. 1062 MachineFunction &MF = *MBB.getParent(); 1063 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 1064 // Get the instruction info. 1065 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1066 // Get the frame info. 1067 MachineFrameInfo &MFI = MF.getFrameInfo(); 1068 DebugLoc dl = MI.getDebugLoc(); 1069 1070 unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum); 1071 1072 // Get the frame index. 1073 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 1074 1075 // Get the frame pointer save index. Users of this index are primarily 1076 // DYNALLOC instructions. 1077 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 1078 int FPSI = FI->getFramePointerSaveIndex(); 1079 // Get the instruction opcode. 1080 unsigned OpC = MI.getOpcode(); 1081 1082 if ((OpC == PPC::DYNAREAOFFSET || OpC == PPC::DYNAREAOFFSET8)) { 1083 lowerDynamicAreaOffset(II); 1084 return; 1085 } 1086 1087 // Special case for dynamic alloca. 1088 if (FPSI && FrameIndex == FPSI && 1089 (OpC == PPC::DYNALLOC || OpC == PPC::DYNALLOC8)) { 1090 lowerDynamicAlloc(II); 1091 return; 1092 } 1093 1094 if (FPSI && FrameIndex == FPSI && 1095 (OpC == PPC::PREPARE_PROBED_ALLOCA_64 || 1096 OpC == PPC::PREPARE_PROBED_ALLOCA_32 || 1097 OpC == PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64 || 1098 OpC == PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32)) { 1099 lowerPrepareProbedAlloca(II); 1100 return; 1101 } 1102 1103 // Special case for pseudo-ops SPILL_CR and RESTORE_CR, etc. 1104 if (OpC == PPC::SPILL_CR) { 1105 lowerCRSpilling(II, FrameIndex); 1106 return; 1107 } else if (OpC == PPC::RESTORE_CR) { 1108 lowerCRRestore(II, FrameIndex); 1109 return; 1110 } else if (OpC == PPC::SPILL_CRBIT) { 1111 lowerCRBitSpilling(II, FrameIndex); 1112 return; 1113 } else if (OpC == PPC::RESTORE_CRBIT) { 1114 lowerCRBitRestore(II, FrameIndex); 1115 return; 1116 } else if (OpC == PPC::SPILL_VRSAVE) { 1117 lowerVRSAVESpilling(II, FrameIndex); 1118 return; 1119 } else if (OpC == PPC::RESTORE_VRSAVE) { 1120 lowerVRSAVERestore(II, FrameIndex); 1121 return; 1122 } 1123 1124 // Replace the FrameIndex with base register with GPR1 (SP) or GPR31 (FP). 1125 MI.getOperand(FIOperandNum).ChangeToRegister( 1126 FrameIndex < 0 ? getBaseRegister(MF) : getFrameRegister(MF), false); 1127 1128 // If the instruction is not present in ImmToIdxMap, then it has no immediate 1129 // form (and must be r+r). 1130 bool noImmForm = !MI.isInlineAsm() && OpC != TargetOpcode::STACKMAP && 1131 OpC != TargetOpcode::PATCHPOINT && !ImmToIdxMap.count(OpC); 1132 1133 // Now add the frame object offset to the offset from r1. 1134 int Offset = MFI.getObjectOffset(FrameIndex); 1135 Offset += MI.getOperand(OffsetOperandNo).getImm(); 1136 1137 // If we're not using a Frame Pointer that has been set to the value of the 1138 // SP before having the stack size subtracted from it, then add the stack size 1139 // to Offset to get the correct offset. 1140 // Naked functions have stack size 0, although getStackSize may not reflect 1141 // that because we didn't call all the pieces that compute it for naked 1142 // functions. 1143 if (!MF.getFunction().hasFnAttribute(Attribute::Naked)) { 1144 if (!(hasBasePointer(MF) && FrameIndex < 0)) 1145 Offset += MFI.getStackSize(); 1146 } 1147 1148 // If we can, encode the offset directly into the instruction. If this is a 1149 // normal PPC "ri" instruction, any 16-bit value can be safely encoded. If 1150 // this is a PPC64 "ix" instruction, only a 16-bit value with the low two bits 1151 // clear can be encoded. This is extremely uncommon, because normally you 1152 // only "std" to a stack slot that is at least 4-byte aligned, but it can 1153 // happen in invalid code. 1154 assert(OpC != PPC::DBG_VALUE && 1155 "This should be handled in a target-independent way"); 1156 bool OffsetFitsMnemonic = (OpC == PPC::EVSTDD || OpC == PPC::EVLDD) ? 1157 isUInt<8>(Offset) : 1158 isInt<16>(Offset); 1159 if (!noImmForm && ((OffsetFitsMnemonic && 1160 ((Offset % offsetMinAlign(MI)) == 0)) || 1161 OpC == TargetOpcode::STACKMAP || 1162 OpC == TargetOpcode::PATCHPOINT)) { 1163 MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset); 1164 return; 1165 } 1166 1167 // The offset doesn't fit into a single register, scavenge one to build the 1168 // offset in. 1169 1170 bool is64Bit = TM.isPPC64(); 1171 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 1172 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 1173 const TargetRegisterClass *RC = is64Bit ? G8RC : GPRC; 1174 Register SRegHi = MF.getRegInfo().createVirtualRegister(RC), 1175 SReg = MF.getRegInfo().createVirtualRegister(RC); 1176 1177 // Insert a set of rA with the full offset value before the ld, st, or add 1178 if (isInt<16>(Offset)) 1179 BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI), SReg) 1180 .addImm(Offset); 1181 else { 1182 BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LIS8 : PPC::LIS), SRegHi) 1183 .addImm(Offset >> 16); 1184 BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::ORI8 : PPC::ORI), SReg) 1185 .addReg(SRegHi, RegState::Kill) 1186 .addImm(Offset); 1187 } 1188 1189 // Convert into indexed form of the instruction: 1190 // 1191 // sth 0:rA, 1:imm 2:(rB) ==> sthx 0:rA, 2:rB, 1:r0 1192 // addi 0:rA 1:rB, 2, imm ==> add 0:rA, 1:rB, 2:r0 1193 unsigned OperandBase; 1194 1195 if (noImmForm) 1196 OperandBase = 1; 1197 else if (OpC != TargetOpcode::INLINEASM && 1198 OpC != TargetOpcode::INLINEASM_BR) { 1199 assert(ImmToIdxMap.count(OpC) && 1200 "No indexed form of load or store available!"); 1201 unsigned NewOpcode = ImmToIdxMap.find(OpC)->second; 1202 MI.setDesc(TII.get(NewOpcode)); 1203 OperandBase = 1; 1204 } else { 1205 OperandBase = OffsetOperandNo; 1206 } 1207 1208 Register StackReg = MI.getOperand(FIOperandNum).getReg(); 1209 MI.getOperand(OperandBase).ChangeToRegister(StackReg, false); 1210 MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false, false, true); 1211 } 1212 1213 Register PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 1214 const PPCFrameLowering *TFI = getFrameLowering(MF); 1215 1216 if (!TM.isPPC64()) 1217 return TFI->hasFP(MF) ? PPC::R31 : PPC::R1; 1218 else 1219 return TFI->hasFP(MF) ? PPC::X31 : PPC::X1; 1220 } 1221 1222 Register PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const { 1223 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 1224 if (!hasBasePointer(MF)) 1225 return getFrameRegister(MF); 1226 1227 if (TM.isPPC64()) 1228 return PPC::X30; 1229 1230 if (Subtarget.isSVR4ABI() && TM.isPositionIndependent()) 1231 return PPC::R29; 1232 1233 return PPC::R30; 1234 } 1235 1236 bool PPCRegisterInfo::hasBasePointer(const MachineFunction &MF) const { 1237 if (!EnableBasePointer) 1238 return false; 1239 if (AlwaysBasePointer) 1240 return true; 1241 1242 // If we need to realign the stack, then the stack pointer can no longer 1243 // serve as an offset into the caller's stack space. As a result, we need a 1244 // base pointer. 1245 return needsStackRealignment(MF); 1246 } 1247 1248 /// Returns true if the instruction's frame index 1249 /// reference would be better served by a base register other than FP 1250 /// or SP. Used by LocalStackFrameAllocation to determine which frame index 1251 /// references it should create new base registers for. 1252 bool PPCRegisterInfo:: 1253 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { 1254 assert(Offset < 0 && "Local offset must be negative"); 1255 1256 // It's the load/store FI references that cause issues, as it can be difficult 1257 // to materialize the offset if it won't fit in the literal field. Estimate 1258 // based on the size of the local frame and some conservative assumptions 1259 // about the rest of the stack frame (note, this is pre-regalloc, so 1260 // we don't know everything for certain yet) whether this offset is likely 1261 // to be out of range of the immediate. Return true if so. 1262 1263 // We only generate virtual base registers for loads and stores that have 1264 // an r+i form. Return false for everything else. 1265 unsigned OpC = MI->getOpcode(); 1266 if (!ImmToIdxMap.count(OpC)) 1267 return false; 1268 1269 // Don't generate a new virtual base register just to add zero to it. 1270 if ((OpC == PPC::ADDI || OpC == PPC::ADDI8) && 1271 MI->getOperand(2).getImm() == 0) 1272 return false; 1273 1274 MachineBasicBlock &MBB = *MI->getParent(); 1275 MachineFunction &MF = *MBB.getParent(); 1276 const PPCFrameLowering *TFI = getFrameLowering(MF); 1277 unsigned StackEst = TFI->determineFrameLayout(MF, true); 1278 1279 // If we likely don't need a stack frame, then we probably don't need a 1280 // virtual base register either. 1281 if (!StackEst) 1282 return false; 1283 1284 // Estimate an offset from the stack pointer. 1285 // The incoming offset is relating to the SP at the start of the function, 1286 // but when we access the local it'll be relative to the SP after local 1287 // allocation, so adjust our SP-relative offset by that allocation size. 1288 Offset += StackEst; 1289 1290 // The frame pointer will point to the end of the stack, so estimate the 1291 // offset as the difference between the object offset and the FP location. 1292 return !isFrameOffsetLegal(MI, getBaseRegister(MF), Offset); 1293 } 1294 1295 /// Insert defining instruction(s) for BaseReg to 1296 /// be a pointer to FrameIdx at the beginning of the basic block. 1297 void PPCRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, 1298 Register BaseReg, 1299 int FrameIdx, 1300 int64_t Offset) const { 1301 unsigned ADDriOpc = TM.isPPC64() ? PPC::ADDI8 : PPC::ADDI; 1302 1303 MachineBasicBlock::iterator Ins = MBB->begin(); 1304 DebugLoc DL; // Defaults to "unknown" 1305 if (Ins != MBB->end()) 1306 DL = Ins->getDebugLoc(); 1307 1308 const MachineFunction &MF = *MBB->getParent(); 1309 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 1310 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1311 const MCInstrDesc &MCID = TII.get(ADDriOpc); 1312 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1313 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF)); 1314 1315 BuildMI(*MBB, Ins, DL, MCID, BaseReg) 1316 .addFrameIndex(FrameIdx).addImm(Offset); 1317 } 1318 1319 void PPCRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg, 1320 int64_t Offset) const { 1321 unsigned FIOperandNum = 0; 1322 while (!MI.getOperand(FIOperandNum).isFI()) { 1323 ++FIOperandNum; 1324 assert(FIOperandNum < MI.getNumOperands() && 1325 "Instr doesn't have FrameIndex operand!"); 1326 } 1327 1328 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false); 1329 unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum); 1330 Offset += MI.getOperand(OffsetOperandNo).getImm(); 1331 MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset); 1332 1333 MachineBasicBlock &MBB = *MI.getParent(); 1334 MachineFunction &MF = *MBB.getParent(); 1335 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 1336 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1337 const MCInstrDesc &MCID = MI.getDesc(); 1338 MachineRegisterInfo &MRI = MF.getRegInfo(); 1339 MRI.constrainRegClass(BaseReg, 1340 TII.getRegClass(MCID, FIOperandNum, this, MF)); 1341 } 1342 1343 bool PPCRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, 1344 Register BaseReg, 1345 int64_t Offset) const { 1346 unsigned FIOperandNum = 0; 1347 while (!MI->getOperand(FIOperandNum).isFI()) { 1348 ++FIOperandNum; 1349 assert(FIOperandNum < MI->getNumOperands() && 1350 "Instr doesn't have FrameIndex operand!"); 1351 } 1352 1353 unsigned OffsetOperandNo = getOffsetONFromFION(*MI, FIOperandNum); 1354 Offset += MI->getOperand(OffsetOperandNo).getImm(); 1355 1356 return MI->getOpcode() == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm 1357 MI->getOpcode() == TargetOpcode::STACKMAP || 1358 MI->getOpcode() == TargetOpcode::PATCHPOINT || 1359 (isInt<16>(Offset) && (Offset % offsetMinAlign(*MI)) == 0); 1360 } 1361