1 //===-- PPCFrameLowering.cpp - PPC Frame Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the PPC implementation of TargetFrameLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "MCTargetDesc/PPCPredicates.h" 14 #include "PPCFrameLowering.h" 15 #include "PPCInstrBuilder.h" 16 #include "PPCInstrInfo.h" 17 #include "PPCMachineFunctionInfo.h" 18 #include "PPCSubtarget.h" 19 #include "PPCTargetMachine.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/CodeGen/MachineFrameInfo.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineModuleInfo.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/RegisterScavenging.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/Target/TargetOptions.h" 29 30 using namespace llvm; 31 32 #define DEBUG_TYPE "framelowering" 33 STATISTIC(NumPESpillVSR, "Number of spills to vector in prologue"); 34 STATISTIC(NumPEReloadVSR, "Number of reloads from vector in epilogue"); 35 STATISTIC(NumPrologProbed, "Number of prologues probed"); 36 37 static cl::opt<bool> 38 EnablePEVectorSpills("ppc-enable-pe-vector-spills", 39 cl::desc("Enable spills in prologue to vector registers."), 40 cl::init(false), cl::Hidden); 41 42 static unsigned computeReturnSaveOffset(const PPCSubtarget &STI) { 43 if (STI.isAIXABI()) 44 return STI.isPPC64() ? 16 : 8; 45 // SVR4 ABI: 46 return STI.isPPC64() ? 16 : 4; 47 } 48 49 static unsigned computeTOCSaveOffset(const PPCSubtarget &STI) { 50 if (STI.isAIXABI()) 51 return STI.isPPC64() ? 40 : 20; 52 return STI.isELFv2ABI() ? 24 : 40; 53 } 54 55 static unsigned computeFramePointerSaveOffset(const PPCSubtarget &STI) { 56 // First slot in the general register save area. 57 return STI.isPPC64() ? -8U : -4U; 58 } 59 60 static unsigned computeLinkageSize(const PPCSubtarget &STI) { 61 if (STI.isAIXABI() || STI.isPPC64()) 62 return (STI.isELFv2ABI() ? 4 : 6) * (STI.isPPC64() ? 8 : 4); 63 64 // 32-bit SVR4 ABI: 65 return 8; 66 } 67 68 static unsigned computeBasePointerSaveOffset(const PPCSubtarget &STI) { 69 // Third slot in the general purpose register save area. 70 if (STI.is32BitELFABI() && STI.getTargetMachine().isPositionIndependent()) 71 return -12U; 72 73 // Second slot in the general purpose register save area. 74 return STI.isPPC64() ? -16U : -8U; 75 } 76 77 static unsigned computeCRSaveOffset(const PPCSubtarget &STI) { 78 return (STI.isAIXABI() && !STI.isPPC64()) ? 4 : 8; 79 } 80 81 PPCFrameLowering::PPCFrameLowering(const PPCSubtarget &STI) 82 : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 83 STI.getPlatformStackAlignment(), 0), 84 Subtarget(STI), ReturnSaveOffset(computeReturnSaveOffset(Subtarget)), 85 TOCSaveOffset(computeTOCSaveOffset(Subtarget)), 86 FramePointerSaveOffset(computeFramePointerSaveOffset(Subtarget)), 87 LinkageSize(computeLinkageSize(Subtarget)), 88 BasePointerSaveOffset(computeBasePointerSaveOffset(Subtarget)), 89 CRSaveOffset(computeCRSaveOffset(Subtarget)) {} 90 91 // With the SVR4 ABI, callee-saved registers have fixed offsets on the stack. 92 const PPCFrameLowering::SpillSlot *PPCFrameLowering::getCalleeSavedSpillSlots( 93 unsigned &NumEntries) const { 94 95 // Floating-point register save area offsets. 96 #define CALLEE_SAVED_FPRS \ 97 {PPC::F31, -8}, \ 98 {PPC::F30, -16}, \ 99 {PPC::F29, -24}, \ 100 {PPC::F28, -32}, \ 101 {PPC::F27, -40}, \ 102 {PPC::F26, -48}, \ 103 {PPC::F25, -56}, \ 104 {PPC::F24, -64}, \ 105 {PPC::F23, -72}, \ 106 {PPC::F22, -80}, \ 107 {PPC::F21, -88}, \ 108 {PPC::F20, -96}, \ 109 {PPC::F19, -104}, \ 110 {PPC::F18, -112}, \ 111 {PPC::F17, -120}, \ 112 {PPC::F16, -128}, \ 113 {PPC::F15, -136}, \ 114 {PPC::F14, -144} 115 116 // 32-bit general purpose register save area offsets shared by ELF and 117 // AIX. AIX has an extra CSR with r13. 118 #define CALLEE_SAVED_GPRS32 \ 119 {PPC::R31, -4}, \ 120 {PPC::R30, -8}, \ 121 {PPC::R29, -12}, \ 122 {PPC::R28, -16}, \ 123 {PPC::R27, -20}, \ 124 {PPC::R26, -24}, \ 125 {PPC::R25, -28}, \ 126 {PPC::R24, -32}, \ 127 {PPC::R23, -36}, \ 128 {PPC::R22, -40}, \ 129 {PPC::R21, -44}, \ 130 {PPC::R20, -48}, \ 131 {PPC::R19, -52}, \ 132 {PPC::R18, -56}, \ 133 {PPC::R17, -60}, \ 134 {PPC::R16, -64}, \ 135 {PPC::R15, -68}, \ 136 {PPC::R14, -72} 137 138 // 64-bit general purpose register save area offsets. 139 #define CALLEE_SAVED_GPRS64 \ 140 {PPC::X31, -8}, \ 141 {PPC::X30, -16}, \ 142 {PPC::X29, -24}, \ 143 {PPC::X28, -32}, \ 144 {PPC::X27, -40}, \ 145 {PPC::X26, -48}, \ 146 {PPC::X25, -56}, \ 147 {PPC::X24, -64}, \ 148 {PPC::X23, -72}, \ 149 {PPC::X22, -80}, \ 150 {PPC::X21, -88}, \ 151 {PPC::X20, -96}, \ 152 {PPC::X19, -104}, \ 153 {PPC::X18, -112}, \ 154 {PPC::X17, -120}, \ 155 {PPC::X16, -128}, \ 156 {PPC::X15, -136}, \ 157 {PPC::X14, -144} 158 159 // Vector register save area offsets. 160 #define CALLEE_SAVED_VRS \ 161 {PPC::V31, -16}, \ 162 {PPC::V30, -32}, \ 163 {PPC::V29, -48}, \ 164 {PPC::V28, -64}, \ 165 {PPC::V27, -80}, \ 166 {PPC::V26, -96}, \ 167 {PPC::V25, -112}, \ 168 {PPC::V24, -128}, \ 169 {PPC::V23, -144}, \ 170 {PPC::V22, -160}, \ 171 {PPC::V21, -176}, \ 172 {PPC::V20, -192} 173 174 // Note that the offsets here overlap, but this is fixed up in 175 // processFunctionBeforeFrameFinalized. 176 177 static const SpillSlot ELFOffsets32[] = { 178 CALLEE_SAVED_FPRS, 179 CALLEE_SAVED_GPRS32, 180 181 // CR save area offset. We map each of the nonvolatile CR fields 182 // to the slot for CR2, which is the first of the nonvolatile CR 183 // fields to be assigned, so that we only allocate one save slot. 184 // See PPCRegisterInfo::hasReservedSpillSlot() for more information. 185 {PPC::CR2, -4}, 186 187 // VRSAVE save area offset. 188 {PPC::VRSAVE, -4}, 189 190 CALLEE_SAVED_VRS, 191 192 // SPE register save area (overlaps Vector save area). 193 {PPC::S31, -8}, 194 {PPC::S30, -16}, 195 {PPC::S29, -24}, 196 {PPC::S28, -32}, 197 {PPC::S27, -40}, 198 {PPC::S26, -48}, 199 {PPC::S25, -56}, 200 {PPC::S24, -64}, 201 {PPC::S23, -72}, 202 {PPC::S22, -80}, 203 {PPC::S21, -88}, 204 {PPC::S20, -96}, 205 {PPC::S19, -104}, 206 {PPC::S18, -112}, 207 {PPC::S17, -120}, 208 {PPC::S16, -128}, 209 {PPC::S15, -136}, 210 {PPC::S14, -144}}; 211 212 static const SpillSlot ELFOffsets64[] = { 213 CALLEE_SAVED_FPRS, 214 CALLEE_SAVED_GPRS64, 215 216 // VRSAVE save area offset. 217 {PPC::VRSAVE, -4}, 218 CALLEE_SAVED_VRS 219 }; 220 221 static const SpillSlot AIXOffsets32[] = {CALLEE_SAVED_FPRS, 222 CALLEE_SAVED_GPRS32, 223 // Add AIX's extra CSR. 224 {PPC::R13, -76}, 225 CALLEE_SAVED_VRS}; 226 227 static const SpillSlot AIXOffsets64[] = { 228 CALLEE_SAVED_FPRS, CALLEE_SAVED_GPRS64, CALLEE_SAVED_VRS}; 229 230 if (Subtarget.is64BitELFABI()) { 231 NumEntries = array_lengthof(ELFOffsets64); 232 return ELFOffsets64; 233 } 234 235 if (Subtarget.is32BitELFABI()) { 236 NumEntries = array_lengthof(ELFOffsets32); 237 return ELFOffsets32; 238 } 239 240 assert(Subtarget.isAIXABI() && "Unexpected ABI."); 241 242 if (Subtarget.isPPC64()) { 243 NumEntries = array_lengthof(AIXOffsets64); 244 return AIXOffsets64; 245 } 246 247 NumEntries = array_lengthof(AIXOffsets32); 248 return AIXOffsets32; 249 } 250 251 static bool spillsCR(const MachineFunction &MF) { 252 const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 253 return FuncInfo->isCRSpilled(); 254 } 255 256 static bool hasSpills(const MachineFunction &MF) { 257 const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 258 return FuncInfo->hasSpills(); 259 } 260 261 static bool hasNonRISpills(const MachineFunction &MF) { 262 const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 263 return FuncInfo->hasNonRISpills(); 264 } 265 266 /// MustSaveLR - Return true if this function requires that we save the LR 267 /// register onto the stack in the prolog and restore it in the epilog of the 268 /// function. 269 static bool MustSaveLR(const MachineFunction &MF, unsigned LR) { 270 const PPCFunctionInfo *MFI = MF.getInfo<PPCFunctionInfo>(); 271 272 // We need a save/restore of LR if there is any def of LR (which is 273 // defined by calls, including the PIC setup sequence), or if there is 274 // some use of the LR stack slot (e.g. for builtin_return_address). 275 // (LR comes in 32 and 64 bit versions.) 276 MachineRegisterInfo::def_iterator RI = MF.getRegInfo().def_begin(LR); 277 return RI !=MF.getRegInfo().def_end() || MFI->isLRStoreRequired(); 278 } 279 280 /// determineFrameLayoutAndUpdate - Determine the size of the frame and maximum 281 /// call frame size. Update the MachineFunction object with the stack size. 282 uint64_t 283 PPCFrameLowering::determineFrameLayoutAndUpdate(MachineFunction &MF, 284 bool UseEstimate) const { 285 unsigned NewMaxCallFrameSize = 0; 286 uint64_t FrameSize = determineFrameLayout(MF, UseEstimate, 287 &NewMaxCallFrameSize); 288 MF.getFrameInfo().setStackSize(FrameSize); 289 MF.getFrameInfo().setMaxCallFrameSize(NewMaxCallFrameSize); 290 return FrameSize; 291 } 292 293 /// determineFrameLayout - Determine the size of the frame and maximum call 294 /// frame size. 295 uint64_t 296 PPCFrameLowering::determineFrameLayout(const MachineFunction &MF, 297 bool UseEstimate, 298 unsigned *NewMaxCallFrameSize) const { 299 const MachineFrameInfo &MFI = MF.getFrameInfo(); 300 const PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 301 302 // Get the number of bytes to allocate from the FrameInfo 303 uint64_t FrameSize = 304 UseEstimate ? MFI.estimateStackSize(MF) : MFI.getStackSize(); 305 306 // Get stack alignments. The frame must be aligned to the greatest of these: 307 Align TargetAlign = getStackAlign(); // alignment required per the ABI 308 Align MaxAlign = MFI.getMaxAlign(); // algmt required by data in frame 309 Align Alignment = std::max(TargetAlign, MaxAlign); 310 311 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 312 313 unsigned LR = RegInfo->getRARegister(); 314 bool DisableRedZone = MF.getFunction().hasFnAttribute(Attribute::NoRedZone); 315 bool CanUseRedZone = !MFI.hasVarSizedObjects() && // No dynamic alloca. 316 !MFI.adjustsStack() && // No calls. 317 !MustSaveLR(MF, LR) && // No need to save LR. 318 !FI->mustSaveTOC() && // No need to save TOC. 319 !RegInfo->hasBasePointer(MF); // No special alignment. 320 321 // Note: for PPC32 SVR4ABI, we can still generate stackless 322 // code if all local vars are reg-allocated. 323 bool FitsInRedZone = FrameSize <= Subtarget.getRedZoneSize(); 324 325 // Check whether we can skip adjusting the stack pointer (by using red zone) 326 if (!DisableRedZone && CanUseRedZone && FitsInRedZone) { 327 // No need for frame 328 return 0; 329 } 330 331 // Get the maximum call frame size of all the calls. 332 unsigned maxCallFrameSize = MFI.getMaxCallFrameSize(); 333 334 // Maximum call frame needs to be at least big enough for linkage area. 335 unsigned minCallFrameSize = getLinkageSize(); 336 maxCallFrameSize = std::max(maxCallFrameSize, minCallFrameSize); 337 338 // If we have dynamic alloca then maxCallFrameSize needs to be aligned so 339 // that allocations will be aligned. 340 if (MFI.hasVarSizedObjects()) 341 maxCallFrameSize = alignTo(maxCallFrameSize, Alignment); 342 343 // Update the new max call frame size if the caller passes in a valid pointer. 344 if (NewMaxCallFrameSize) 345 *NewMaxCallFrameSize = maxCallFrameSize; 346 347 // Include call frame size in total. 348 FrameSize += maxCallFrameSize; 349 350 // Make sure the frame is aligned. 351 FrameSize = alignTo(FrameSize, Alignment); 352 353 return FrameSize; 354 } 355 356 // hasFP - Return true if the specified function actually has a dedicated frame 357 // pointer register. 358 bool PPCFrameLowering::hasFP(const MachineFunction &MF) const { 359 const MachineFrameInfo &MFI = MF.getFrameInfo(); 360 // FIXME: This is pretty much broken by design: hasFP() might be called really 361 // early, before the stack layout was calculated and thus hasFP() might return 362 // true or false here depending on the time of call. 363 return (MFI.getStackSize()) && needsFP(MF); 364 } 365 366 // needsFP - Return true if the specified function should have a dedicated frame 367 // pointer register. This is true if the function has variable sized allocas or 368 // if frame pointer elimination is disabled. 369 bool PPCFrameLowering::needsFP(const MachineFunction &MF) const { 370 const MachineFrameInfo &MFI = MF.getFrameInfo(); 371 372 // Naked functions have no stack frame pushed, so we don't have a frame 373 // pointer. 374 if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 375 return false; 376 377 return MF.getTarget().Options.DisableFramePointerElim(MF) || 378 MFI.hasVarSizedObjects() || MFI.hasStackMap() || MFI.hasPatchPoint() || 379 MF.exposesReturnsTwice() || 380 (MF.getTarget().Options.GuaranteedTailCallOpt && 381 MF.getInfo<PPCFunctionInfo>()->hasFastCall()); 382 } 383 384 void PPCFrameLowering::replaceFPWithRealFP(MachineFunction &MF) const { 385 bool is31 = needsFP(MF); 386 unsigned FPReg = is31 ? PPC::R31 : PPC::R1; 387 unsigned FP8Reg = is31 ? PPC::X31 : PPC::X1; 388 389 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 390 bool HasBP = RegInfo->hasBasePointer(MF); 391 unsigned BPReg = HasBP ? (unsigned) RegInfo->getBaseRegister(MF) : FPReg; 392 unsigned BP8Reg = HasBP ? (unsigned) PPC::X30 : FP8Reg; 393 394 for (MachineBasicBlock &MBB : MF) 395 for (MachineBasicBlock::iterator MBBI = MBB.end(); MBBI != MBB.begin();) { 396 --MBBI; 397 for (unsigned I = 0, E = MBBI->getNumOperands(); I != E; ++I) { 398 MachineOperand &MO = MBBI->getOperand(I); 399 if (!MO.isReg()) 400 continue; 401 402 switch (MO.getReg()) { 403 case PPC::FP: 404 MO.setReg(FPReg); 405 break; 406 case PPC::FP8: 407 MO.setReg(FP8Reg); 408 break; 409 case PPC::BP: 410 MO.setReg(BPReg); 411 break; 412 case PPC::BP8: 413 MO.setReg(BP8Reg); 414 break; 415 416 } 417 } 418 } 419 } 420 421 /* This function will do the following: 422 - If MBB is an entry or exit block, set SR1 and SR2 to R0 and R12 423 respectively (defaults recommended by the ABI) and return true 424 - If MBB is not an entry block, initialize the register scavenger and look 425 for available registers. 426 - If the defaults (R0/R12) are available, return true 427 - If TwoUniqueRegsRequired is set to true, it looks for two unique 428 registers. Otherwise, look for a single available register. 429 - If the required registers are found, set SR1 and SR2 and return true. 430 - If the required registers are not found, set SR2 or both SR1 and SR2 to 431 PPC::NoRegister and return false. 432 433 Note that if both SR1 and SR2 are valid parameters and TwoUniqueRegsRequired 434 is not set, this function will attempt to find two different registers, but 435 still return true if only one register is available (and set SR1 == SR2). 436 */ 437 bool 438 PPCFrameLowering::findScratchRegister(MachineBasicBlock *MBB, 439 bool UseAtEnd, 440 bool TwoUniqueRegsRequired, 441 Register *SR1, 442 Register *SR2) const { 443 RegScavenger RS; 444 Register R0 = Subtarget.isPPC64() ? PPC::X0 : PPC::R0; 445 Register R12 = Subtarget.isPPC64() ? PPC::X12 : PPC::R12; 446 447 // Set the defaults for the two scratch registers. 448 if (SR1) 449 *SR1 = R0; 450 451 if (SR2) { 452 assert (SR1 && "Asking for the second scratch register but not the first?"); 453 *SR2 = R12; 454 } 455 456 // If MBB is an entry or exit block, use R0 and R12 as the scratch registers. 457 if ((UseAtEnd && MBB->isReturnBlock()) || 458 (!UseAtEnd && (&MBB->getParent()->front() == MBB))) 459 return true; 460 461 RS.enterBasicBlock(*MBB); 462 463 if (UseAtEnd && !MBB->empty()) { 464 // The scratch register will be used at the end of the block, so must 465 // consider all registers used within the block 466 467 MachineBasicBlock::iterator MBBI = MBB->getFirstTerminator(); 468 // If no terminator, back iterator up to previous instruction. 469 if (MBBI == MBB->end()) 470 MBBI = std::prev(MBBI); 471 472 if (MBBI != MBB->begin()) 473 RS.forward(MBBI); 474 } 475 476 // If the two registers are available, we're all good. 477 // Note that we only return here if both R0 and R12 are available because 478 // although the function may not require two unique registers, it may benefit 479 // from having two so we should try to provide them. 480 if (!RS.isRegUsed(R0) && !RS.isRegUsed(R12)) 481 return true; 482 483 // Get the list of callee-saved registers for the target. 484 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 485 const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(MBB->getParent()); 486 487 // Get all the available registers in the block. 488 BitVector BV = RS.getRegsAvailable(Subtarget.isPPC64() ? &PPC::G8RCRegClass : 489 &PPC::GPRCRegClass); 490 491 // We shouldn't use callee-saved registers as scratch registers as they may be 492 // available when looking for a candidate block for shrink wrapping but not 493 // available when the actual prologue/epilogue is being emitted because they 494 // were added as live-in to the prologue block by PrologueEpilogueInserter. 495 for (int i = 0; CSRegs[i]; ++i) 496 BV.reset(CSRegs[i]); 497 498 // Set the first scratch register to the first available one. 499 if (SR1) { 500 int FirstScratchReg = BV.find_first(); 501 *SR1 = FirstScratchReg == -1 ? (unsigned)PPC::NoRegister : FirstScratchReg; 502 } 503 504 // If there is another one available, set the second scratch register to that. 505 // Otherwise, set it to either PPC::NoRegister if this function requires two 506 // or to whatever SR1 is set to if this function doesn't require two. 507 if (SR2) { 508 int SecondScratchReg = BV.find_next(*SR1); 509 if (SecondScratchReg != -1) 510 *SR2 = SecondScratchReg; 511 else 512 *SR2 = TwoUniqueRegsRequired ? Register() : *SR1; 513 } 514 515 // Now that we've done our best to provide both registers, double check 516 // whether we were unable to provide enough. 517 if (BV.count() < (TwoUniqueRegsRequired ? 2U : 1U)) 518 return false; 519 520 return true; 521 } 522 523 // We need a scratch register for spilling LR and for spilling CR. By default, 524 // we use two scratch registers to hide latency. However, if only one scratch 525 // register is available, we can adjust for that by not overlapping the spill 526 // code. However, if we need to realign the stack (i.e. have a base pointer) 527 // and the stack frame is large, we need two scratch registers. 528 // Also, stack probe requires two scratch registers, one for old sp, one for 529 // large frame and large probe size. 530 bool 531 PPCFrameLowering::twoUniqueScratchRegsRequired(MachineBasicBlock *MBB) const { 532 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 533 MachineFunction &MF = *(MBB->getParent()); 534 bool HasBP = RegInfo->hasBasePointer(MF); 535 unsigned FrameSize = determineFrameLayout(MF); 536 int NegFrameSize = -FrameSize; 537 bool IsLargeFrame = !isInt<16>(NegFrameSize); 538 MachineFrameInfo &MFI = MF.getFrameInfo(); 539 Align MaxAlign = MFI.getMaxAlign(); 540 bool HasRedZone = Subtarget.isPPC64() || !Subtarget.isSVR4ABI(); 541 const PPCTargetLowering &TLI = *Subtarget.getTargetLowering(); 542 543 return ((IsLargeFrame || !HasRedZone) && HasBP && MaxAlign > 1) || 544 TLI.hasInlineStackProbe(MF); 545 } 546 547 bool PPCFrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const { 548 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB); 549 550 return findScratchRegister(TmpMBB, false, 551 twoUniqueScratchRegsRequired(TmpMBB)); 552 } 553 554 bool PPCFrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const { 555 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB); 556 557 return findScratchRegister(TmpMBB, true); 558 } 559 560 bool PPCFrameLowering::stackUpdateCanBeMoved(MachineFunction &MF) const { 561 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 562 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 563 564 // Abort if there is no register info or function info. 565 if (!RegInfo || !FI) 566 return false; 567 568 // Only move the stack update on ELFv2 ABI and PPC64. 569 if (!Subtarget.isELFv2ABI() || !Subtarget.isPPC64()) 570 return false; 571 572 // Check the frame size first and return false if it does not fit the 573 // requirements. 574 // We need a non-zero frame size as well as a frame that will fit in the red 575 // zone. This is because by moving the stack pointer update we are now storing 576 // to the red zone until the stack pointer is updated. If we get an interrupt 577 // inside the prologue but before the stack update we now have a number of 578 // stores to the red zone and those stores must all fit. 579 MachineFrameInfo &MFI = MF.getFrameInfo(); 580 unsigned FrameSize = MFI.getStackSize(); 581 if (!FrameSize || FrameSize > Subtarget.getRedZoneSize()) 582 return false; 583 584 // Frame pointers and base pointers complicate matters so don't do anything 585 // if we have them. For example having a frame pointer will sometimes require 586 // a copy of r1 into r31 and that makes keeping track of updates to r1 more 587 // difficult. Similar situation exists with setjmp. 588 if (hasFP(MF) || RegInfo->hasBasePointer(MF) || MF.exposesReturnsTwice()) 589 return false; 590 591 // Calls to fast_cc functions use different rules for passing parameters on 592 // the stack from the ABI and using PIC base in the function imposes 593 // similar restrictions to using the base pointer. It is not generally safe 594 // to move the stack pointer update in these situations. 595 if (FI->hasFastCall() || FI->usesPICBase()) 596 return false; 597 598 // Finally we can move the stack update if we do not require register 599 // scavenging. Register scavenging can introduce more spills and so 600 // may make the frame size larger than we have computed. 601 return !RegInfo->requiresFrameIndexScavenging(MF); 602 } 603 604 void PPCFrameLowering::emitPrologue(MachineFunction &MF, 605 MachineBasicBlock &MBB) const { 606 MachineBasicBlock::iterator MBBI = MBB.begin(); 607 MachineFrameInfo &MFI = MF.getFrameInfo(); 608 const PPCInstrInfo &TII = *Subtarget.getInstrInfo(); 609 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 610 const PPCTargetLowering &TLI = *Subtarget.getTargetLowering(); 611 612 MachineModuleInfo &MMI = MF.getMMI(); 613 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 614 DebugLoc dl; 615 // AIX assembler does not support cfi directives. 616 const bool needsCFI = MF.needsFrameMoves() && !Subtarget.isAIXABI(); 617 618 // Get processor type. 619 bool isPPC64 = Subtarget.isPPC64(); 620 // Get the ABI. 621 bool isSVR4ABI = Subtarget.isSVR4ABI(); 622 bool isELFv2ABI = Subtarget.isELFv2ABI(); 623 assert((isSVR4ABI || Subtarget.isAIXABI()) && "Unsupported PPC ABI."); 624 625 // Work out frame sizes. 626 uint64_t FrameSize = determineFrameLayoutAndUpdate(MF); 627 int64_t NegFrameSize = -FrameSize; 628 if (!isInt<32>(FrameSize) || !isInt<32>(NegFrameSize)) 629 llvm_unreachable("Unhandled stack size!"); 630 631 if (MFI.isFrameAddressTaken()) 632 replaceFPWithRealFP(MF); 633 634 // Check if the link register (LR) must be saved. 635 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 636 bool MustSaveLR = FI->mustSaveLR(); 637 bool MustSaveTOC = FI->mustSaveTOC(); 638 const SmallVectorImpl<Register> &MustSaveCRs = FI->getMustSaveCRs(); 639 bool MustSaveCR = !MustSaveCRs.empty(); 640 // Do we have a frame pointer and/or base pointer for this function? 641 bool HasFP = hasFP(MF); 642 bool HasBP = RegInfo->hasBasePointer(MF); 643 bool HasRedZone = isPPC64 || !isSVR4ABI; 644 bool HasROPProtect = Subtarget.hasROPProtect(); 645 bool HasPrivileged = Subtarget.hasPrivileged(); 646 647 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1; 648 Register BPReg = RegInfo->getBaseRegister(MF); 649 Register FPReg = isPPC64 ? PPC::X31 : PPC::R31; 650 Register LRReg = isPPC64 ? PPC::LR8 : PPC::LR; 651 Register TOCReg = isPPC64 ? PPC::X2 : PPC::R2; 652 Register ScratchReg; 653 Register TempReg = isPPC64 ? PPC::X12 : PPC::R12; // another scratch reg 654 // ...(R12/X12 is volatile in both Darwin & SVR4, & can't be a function arg.) 655 const MCInstrDesc& MFLRInst = TII.get(isPPC64 ? PPC::MFLR8 656 : PPC::MFLR ); 657 const MCInstrDesc& StoreInst = TII.get(isPPC64 ? PPC::STD 658 : PPC::STW ); 659 const MCInstrDesc& StoreUpdtInst = TII.get(isPPC64 ? PPC::STDU 660 : PPC::STWU ); 661 const MCInstrDesc& StoreUpdtIdxInst = TII.get(isPPC64 ? PPC::STDUX 662 : PPC::STWUX); 663 const MCInstrDesc& LoadImmShiftedInst = TII.get(isPPC64 ? PPC::LIS8 664 : PPC::LIS ); 665 const MCInstrDesc& OrImmInst = TII.get(isPPC64 ? PPC::ORI8 666 : PPC::ORI ); 667 const MCInstrDesc& OrInst = TII.get(isPPC64 ? PPC::OR8 668 : PPC::OR ); 669 const MCInstrDesc& SubtractCarryingInst = TII.get(isPPC64 ? PPC::SUBFC8 670 : PPC::SUBFC); 671 const MCInstrDesc& SubtractImmCarryingInst = TII.get(isPPC64 ? PPC::SUBFIC8 672 : PPC::SUBFIC); 673 const MCInstrDesc &MoveFromCondRegInst = TII.get(isPPC64 ? PPC::MFCR8 674 : PPC::MFCR); 675 const MCInstrDesc &StoreWordInst = TII.get(isPPC64 ? PPC::STW8 : PPC::STW); 676 const MCInstrDesc &HashST = 677 TII.get(isPPC64 ? (HasPrivileged ? PPC::HASHSTP8 : PPC::HASHST8) 678 : (HasPrivileged ? PPC::HASHSTP : PPC::HASHST)); 679 680 // Regarding this assert: Even though LR is saved in the caller's frame (i.e., 681 // LROffset is positive), that slot is callee-owned. Because PPC32 SVR4 has no 682 // Red Zone, an asynchronous event (a form of "callee") could claim a frame & 683 // overwrite it, so PPC32 SVR4 must claim at least a minimal frame to save LR. 684 assert((isPPC64 || !isSVR4ABI || !(!FrameSize && (MustSaveLR || HasFP))) && 685 "FrameSize must be >0 to save/restore the FP or LR for 32-bit SVR4."); 686 687 // Using the same bool variable as below to suppress compiler warnings. 688 bool SingleScratchReg = findScratchRegister( 689 &MBB, false, twoUniqueScratchRegsRequired(&MBB), &ScratchReg, &TempReg); 690 assert(SingleScratchReg && 691 "Required number of registers not available in this block"); 692 693 SingleScratchReg = ScratchReg == TempReg; 694 695 int64_t LROffset = getReturnSaveOffset(); 696 697 int64_t FPOffset = 0; 698 if (HasFP) { 699 MachineFrameInfo &MFI = MF.getFrameInfo(); 700 int FPIndex = FI->getFramePointerSaveIndex(); 701 assert(FPIndex && "No Frame Pointer Save Slot!"); 702 FPOffset = MFI.getObjectOffset(FPIndex); 703 } 704 705 int64_t BPOffset = 0; 706 if (HasBP) { 707 MachineFrameInfo &MFI = MF.getFrameInfo(); 708 int BPIndex = FI->getBasePointerSaveIndex(); 709 assert(BPIndex && "No Base Pointer Save Slot!"); 710 BPOffset = MFI.getObjectOffset(BPIndex); 711 } 712 713 int64_t PBPOffset = 0; 714 if (FI->usesPICBase()) { 715 MachineFrameInfo &MFI = MF.getFrameInfo(); 716 int PBPIndex = FI->getPICBasePointerSaveIndex(); 717 assert(PBPIndex && "No PIC Base Pointer Save Slot!"); 718 PBPOffset = MFI.getObjectOffset(PBPIndex); 719 } 720 721 // Get stack alignments. 722 Align MaxAlign = MFI.getMaxAlign(); 723 if (HasBP && MaxAlign > 1) 724 assert(Log2(MaxAlign) < 16 && "Invalid alignment!"); 725 726 // Frames of 32KB & larger require special handling because they cannot be 727 // indexed into with a simple STDU/STWU/STD/STW immediate offset operand. 728 bool isLargeFrame = !isInt<16>(NegFrameSize); 729 730 // Check if we can move the stack update instruction (stdu) down the prologue 731 // past the callee saves. Hopefully this will avoid the situation where the 732 // saves are waiting for the update on the store with update to complete. 733 MachineBasicBlock::iterator StackUpdateLoc = MBBI; 734 bool MovingStackUpdateDown = false; 735 736 // Check if we can move the stack update. 737 if (stackUpdateCanBeMoved(MF)) { 738 const std::vector<CalleeSavedInfo> &Info = MFI.getCalleeSavedInfo(); 739 for (CalleeSavedInfo CSI : Info) { 740 // If the callee saved register is spilled to a register instead of the 741 // stack then the spill no longer uses the stack pointer. 742 // This can lead to two consequences: 743 // 1) We no longer need to update the stack because the function does not 744 // spill any callee saved registers to stack. 745 // 2) We have a situation where we still have to update the stack pointer 746 // even though some registers are spilled to other registers. In 747 // this case the current code moves the stack update to an incorrect 748 // position. 749 // In either case we should abort moving the stack update operation. 750 if (CSI.isSpilledToReg()) { 751 StackUpdateLoc = MBBI; 752 MovingStackUpdateDown = false; 753 break; 754 } 755 756 int FrIdx = CSI.getFrameIdx(); 757 // If the frame index is not negative the callee saved info belongs to a 758 // stack object that is not a fixed stack object. We ignore non-fixed 759 // stack objects because we won't move the stack update pointer past them. 760 if (FrIdx >= 0) 761 continue; 762 763 if (MFI.isFixedObjectIndex(FrIdx) && MFI.getObjectOffset(FrIdx) < 0) { 764 StackUpdateLoc++; 765 MovingStackUpdateDown = true; 766 } else { 767 // We need all of the Frame Indices to meet these conditions. 768 // If they do not, abort the whole operation. 769 StackUpdateLoc = MBBI; 770 MovingStackUpdateDown = false; 771 break; 772 } 773 } 774 775 // If the operation was not aborted then update the object offset. 776 if (MovingStackUpdateDown) { 777 for (CalleeSavedInfo CSI : Info) { 778 int FrIdx = CSI.getFrameIdx(); 779 if (FrIdx < 0) 780 MFI.setObjectOffset(FrIdx, MFI.getObjectOffset(FrIdx) + NegFrameSize); 781 } 782 } 783 } 784 785 // Where in the prologue we move the CR fields depends on how many scratch 786 // registers we have, and if we need to save the link register or not. This 787 // lambda is to avoid duplicating the logic in 2 places. 788 auto BuildMoveFromCR = [&]() { 789 if (isELFv2ABI && MustSaveCRs.size() == 1) { 790 // In the ELFv2 ABI, we are not required to save all CR fields. 791 // If only one CR field is clobbered, it is more efficient to use 792 // mfocrf to selectively save just that field, because mfocrf has short 793 // latency compares to mfcr. 794 assert(isPPC64 && "V2 ABI is 64-bit only."); 795 MachineInstrBuilder MIB = 796 BuildMI(MBB, MBBI, dl, TII.get(PPC::MFOCRF8), TempReg); 797 MIB.addReg(MustSaveCRs[0], RegState::Kill); 798 } else { 799 MachineInstrBuilder MIB = 800 BuildMI(MBB, MBBI, dl, MoveFromCondRegInst, TempReg); 801 for (unsigned CRfield : MustSaveCRs) 802 MIB.addReg(CRfield, RegState::ImplicitKill); 803 } 804 }; 805 806 // If we need to spill the CR and the LR but we don't have two separate 807 // registers available, we must spill them one at a time 808 if (MustSaveCR && SingleScratchReg && MustSaveLR) { 809 BuildMoveFromCR(); 810 BuildMI(MBB, MBBI, dl, StoreWordInst) 811 .addReg(TempReg, getKillRegState(true)) 812 .addImm(CRSaveOffset) 813 .addReg(SPReg); 814 } 815 816 if (MustSaveLR) 817 BuildMI(MBB, MBBI, dl, MFLRInst, ScratchReg); 818 819 if (MustSaveCR && !(SingleScratchReg && MustSaveLR)) 820 BuildMoveFromCR(); 821 822 if (HasRedZone) { 823 if (HasFP) 824 BuildMI(MBB, MBBI, dl, StoreInst) 825 .addReg(FPReg) 826 .addImm(FPOffset) 827 .addReg(SPReg); 828 if (FI->usesPICBase()) 829 BuildMI(MBB, MBBI, dl, StoreInst) 830 .addReg(PPC::R30) 831 .addImm(PBPOffset) 832 .addReg(SPReg); 833 if (HasBP) 834 BuildMI(MBB, MBBI, dl, StoreInst) 835 .addReg(BPReg) 836 .addImm(BPOffset) 837 .addReg(SPReg); 838 } 839 840 // Generate the instruction to store the LR. In the case where ROP protection 841 // is required the register holding the LR should not be killed as it will be 842 // used by the hash store instruction. 843 if (MustSaveLR) { 844 BuildMI(MBB, StackUpdateLoc, dl, StoreInst) 845 .addReg(ScratchReg, getKillRegState(!HasROPProtect)) 846 .addImm(LROffset) 847 .addReg(SPReg); 848 849 // Add the ROP protection Hash Store instruction. 850 // NOTE: This is technically a violation of the ABI. The hash can be saved 851 // up to 512 bytes into the Protected Zone. This can be outside of the 852 // initial 288 byte volatile program storage region in the Protected Zone. 853 // However, this restriction will be removed in an upcoming revision of the 854 // ABI. 855 if (HasROPProtect) { 856 const int SaveIndex = FI->getROPProtectionHashSaveIndex(); 857 const int64_t ImmOffset = MFI.getObjectOffset(SaveIndex); 858 assert((ImmOffset <= -8 && ImmOffset >= -512) && 859 "ROP hash save offset out of range."); 860 assert(((ImmOffset & 0x7) == 0) && 861 "ROP hash save offset must be 8 byte aligned."); 862 BuildMI(MBB, StackUpdateLoc, dl, HashST) 863 .addReg(ScratchReg, getKillRegState(true)) 864 .addImm(ImmOffset) 865 .addReg(SPReg); 866 } 867 } 868 869 if (MustSaveCR && 870 !(SingleScratchReg && MustSaveLR)) { 871 assert(HasRedZone && "A red zone is always available on PPC64"); 872 BuildMI(MBB, MBBI, dl, StoreWordInst) 873 .addReg(TempReg, getKillRegState(true)) 874 .addImm(CRSaveOffset) 875 .addReg(SPReg); 876 } 877 878 // Skip the rest if this is a leaf function & all spills fit in the Red Zone. 879 if (!FrameSize) 880 return; 881 882 // Adjust stack pointer: r1 += NegFrameSize. 883 // If there is a preferred stack alignment, align R1 now 884 885 if (HasBP && HasRedZone) { 886 // Save a copy of r1 as the base pointer. 887 BuildMI(MBB, MBBI, dl, OrInst, BPReg) 888 .addReg(SPReg) 889 .addReg(SPReg); 890 } 891 892 // Have we generated a STUX instruction to claim stack frame? If so, 893 // the negated frame size will be placed in ScratchReg. 894 bool HasSTUX = false; 895 896 // If FrameSize <= TLI.getStackProbeSize(MF), as POWER ABI requires backchain 897 // pointer is always stored at SP, we will get a free probe due to an essential 898 // STU(X) instruction. 899 if (TLI.hasInlineStackProbe(MF) && FrameSize > TLI.getStackProbeSize(MF)) { 900 // To be consistent with other targets, a pseudo instruction is emitted and 901 // will be later expanded in `inlineStackProbe`. 902 BuildMI(MBB, MBBI, dl, 903 TII.get(isPPC64 ? PPC::PROBED_STACKALLOC_64 904 : PPC::PROBED_STACKALLOC_32)) 905 .addDef(TempReg) 906 .addDef(ScratchReg) // ScratchReg stores the old sp. 907 .addImm(NegFrameSize); 908 // FIXME: HasSTUX is only read if HasRedZone is not set, in such case, we 909 // update the ScratchReg to meet the assumption that ScratchReg contains 910 // the NegFrameSize. This solution is rather tricky. 911 if (!HasRedZone) { 912 BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBF), ScratchReg) 913 .addReg(ScratchReg) 914 .addReg(SPReg); 915 HasSTUX = true; 916 } 917 } else { 918 // This condition must be kept in sync with canUseAsPrologue. 919 if (HasBP && MaxAlign > 1) { 920 if (isPPC64) 921 BuildMI(MBB, MBBI, dl, TII.get(PPC::RLDICL), ScratchReg) 922 .addReg(SPReg) 923 .addImm(0) 924 .addImm(64 - Log2(MaxAlign)); 925 else // PPC32... 926 BuildMI(MBB, MBBI, dl, TII.get(PPC::RLWINM), ScratchReg) 927 .addReg(SPReg) 928 .addImm(0) 929 .addImm(32 - Log2(MaxAlign)) 930 .addImm(31); 931 if (!isLargeFrame) { 932 BuildMI(MBB, MBBI, dl, SubtractImmCarryingInst, ScratchReg) 933 .addReg(ScratchReg, RegState::Kill) 934 .addImm(NegFrameSize); 935 } else { 936 assert(!SingleScratchReg && "Only a single scratch reg available"); 937 BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, TempReg) 938 .addImm(NegFrameSize >> 16); 939 BuildMI(MBB, MBBI, dl, OrImmInst, TempReg) 940 .addReg(TempReg, RegState::Kill) 941 .addImm(NegFrameSize & 0xFFFF); 942 BuildMI(MBB, MBBI, dl, SubtractCarryingInst, ScratchReg) 943 .addReg(ScratchReg, RegState::Kill) 944 .addReg(TempReg, RegState::Kill); 945 } 946 947 BuildMI(MBB, MBBI, dl, StoreUpdtIdxInst, SPReg) 948 .addReg(SPReg, RegState::Kill) 949 .addReg(SPReg) 950 .addReg(ScratchReg); 951 HasSTUX = true; 952 953 } else if (!isLargeFrame) { 954 BuildMI(MBB, StackUpdateLoc, dl, StoreUpdtInst, SPReg) 955 .addReg(SPReg) 956 .addImm(NegFrameSize) 957 .addReg(SPReg); 958 959 } else { 960 BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, ScratchReg) 961 .addImm(NegFrameSize >> 16); 962 BuildMI(MBB, MBBI, dl, OrImmInst, ScratchReg) 963 .addReg(ScratchReg, RegState::Kill) 964 .addImm(NegFrameSize & 0xFFFF); 965 BuildMI(MBB, MBBI, dl, StoreUpdtIdxInst, SPReg) 966 .addReg(SPReg, RegState::Kill) 967 .addReg(SPReg) 968 .addReg(ScratchReg); 969 HasSTUX = true; 970 } 971 } 972 973 // Save the TOC register after the stack pointer update if a prologue TOC 974 // save is required for the function. 975 if (MustSaveTOC) { 976 assert(isELFv2ABI && "TOC saves in the prologue only supported on ELFv2"); 977 BuildMI(MBB, StackUpdateLoc, dl, TII.get(PPC::STD)) 978 .addReg(TOCReg, getKillRegState(true)) 979 .addImm(TOCSaveOffset) 980 .addReg(SPReg); 981 } 982 983 if (!HasRedZone) { 984 assert(!isPPC64 && "A red zone is always available on PPC64"); 985 if (HasSTUX) { 986 // The negated frame size is in ScratchReg, and the SPReg has been 987 // decremented by the frame size: SPReg = old SPReg + ScratchReg. 988 // Since FPOffset, PBPOffset, etc. are relative to the beginning of 989 // the stack frame (i.e. the old SP), ideally, we would put the old 990 // SP into a register and use it as the base for the stores. The 991 // problem is that the only available register may be ScratchReg, 992 // which could be R0, and R0 cannot be used as a base address. 993 994 // First, set ScratchReg to the old SP. This may need to be modified 995 // later. 996 BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBF), ScratchReg) 997 .addReg(ScratchReg, RegState::Kill) 998 .addReg(SPReg); 999 1000 if (ScratchReg == PPC::R0) { 1001 // R0 cannot be used as a base register, but it can be used as an 1002 // index in a store-indexed. 1003 int LastOffset = 0; 1004 if (HasFP) { 1005 // R0 += (FPOffset-LastOffset). 1006 // Need addic, since addi treats R0 as 0. 1007 BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), ScratchReg) 1008 .addReg(ScratchReg) 1009 .addImm(FPOffset-LastOffset); 1010 LastOffset = FPOffset; 1011 // Store FP into *R0. 1012 BuildMI(MBB, MBBI, dl, TII.get(PPC::STWX)) 1013 .addReg(FPReg, RegState::Kill) // Save FP. 1014 .addReg(PPC::ZERO) 1015 .addReg(ScratchReg); // This will be the index (R0 is ok here). 1016 } 1017 if (FI->usesPICBase()) { 1018 // R0 += (PBPOffset-LastOffset). 1019 BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), ScratchReg) 1020 .addReg(ScratchReg) 1021 .addImm(PBPOffset-LastOffset); 1022 LastOffset = PBPOffset; 1023 BuildMI(MBB, MBBI, dl, TII.get(PPC::STWX)) 1024 .addReg(PPC::R30, RegState::Kill) // Save PIC base pointer. 1025 .addReg(PPC::ZERO) 1026 .addReg(ScratchReg); // This will be the index (R0 is ok here). 1027 } 1028 if (HasBP) { 1029 // R0 += (BPOffset-LastOffset). 1030 BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), ScratchReg) 1031 .addReg(ScratchReg) 1032 .addImm(BPOffset-LastOffset); 1033 LastOffset = BPOffset; 1034 BuildMI(MBB, MBBI, dl, TII.get(PPC::STWX)) 1035 .addReg(BPReg, RegState::Kill) // Save BP. 1036 .addReg(PPC::ZERO) 1037 .addReg(ScratchReg); // This will be the index (R0 is ok here). 1038 // BP = R0-LastOffset 1039 BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), BPReg) 1040 .addReg(ScratchReg, RegState::Kill) 1041 .addImm(-LastOffset); 1042 } 1043 } else { 1044 // ScratchReg is not R0, so use it as the base register. It is 1045 // already set to the old SP, so we can use the offsets directly. 1046 1047 // Now that the stack frame has been allocated, save all the necessary 1048 // registers using ScratchReg as the base address. 1049 if (HasFP) 1050 BuildMI(MBB, MBBI, dl, StoreInst) 1051 .addReg(FPReg) 1052 .addImm(FPOffset) 1053 .addReg(ScratchReg); 1054 if (FI->usesPICBase()) 1055 BuildMI(MBB, MBBI, dl, StoreInst) 1056 .addReg(PPC::R30) 1057 .addImm(PBPOffset) 1058 .addReg(ScratchReg); 1059 if (HasBP) { 1060 BuildMI(MBB, MBBI, dl, StoreInst) 1061 .addReg(BPReg) 1062 .addImm(BPOffset) 1063 .addReg(ScratchReg); 1064 BuildMI(MBB, MBBI, dl, OrInst, BPReg) 1065 .addReg(ScratchReg, RegState::Kill) 1066 .addReg(ScratchReg); 1067 } 1068 } 1069 } else { 1070 // The frame size is a known 16-bit constant (fitting in the immediate 1071 // field of STWU). To be here we have to be compiling for PPC32. 1072 // Since the SPReg has been decreased by FrameSize, add it back to each 1073 // offset. 1074 if (HasFP) 1075 BuildMI(MBB, MBBI, dl, StoreInst) 1076 .addReg(FPReg) 1077 .addImm(FrameSize + FPOffset) 1078 .addReg(SPReg); 1079 if (FI->usesPICBase()) 1080 BuildMI(MBB, MBBI, dl, StoreInst) 1081 .addReg(PPC::R30) 1082 .addImm(FrameSize + PBPOffset) 1083 .addReg(SPReg); 1084 if (HasBP) { 1085 BuildMI(MBB, MBBI, dl, StoreInst) 1086 .addReg(BPReg) 1087 .addImm(FrameSize + BPOffset) 1088 .addReg(SPReg); 1089 BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), BPReg) 1090 .addReg(SPReg) 1091 .addImm(FrameSize); 1092 } 1093 } 1094 } 1095 1096 // Add Call Frame Information for the instructions we generated above. 1097 if (needsCFI) { 1098 unsigned CFIIndex; 1099 1100 if (HasBP) { 1101 // Define CFA in terms of BP. Do this in preference to using FP/SP, 1102 // because if the stack needed aligning then CFA won't be at a fixed 1103 // offset from FP/SP. 1104 unsigned Reg = MRI->getDwarfRegNum(BPReg, true); 1105 CFIIndex = MF.addFrameInst( 1106 MCCFIInstruction::createDefCfaRegister(nullptr, Reg)); 1107 } else { 1108 // Adjust the definition of CFA to account for the change in SP. 1109 assert(NegFrameSize); 1110 CFIIndex = MF.addFrameInst( 1111 MCCFIInstruction::cfiDefCfaOffset(nullptr, -NegFrameSize)); 1112 } 1113 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 1114 .addCFIIndex(CFIIndex); 1115 1116 if (HasFP) { 1117 // Describe where FP was saved, at a fixed offset from CFA. 1118 unsigned Reg = MRI->getDwarfRegNum(FPReg, true); 1119 CFIIndex = MF.addFrameInst( 1120 MCCFIInstruction::createOffset(nullptr, Reg, FPOffset)); 1121 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 1122 .addCFIIndex(CFIIndex); 1123 } 1124 1125 if (FI->usesPICBase()) { 1126 // Describe where FP was saved, at a fixed offset from CFA. 1127 unsigned Reg = MRI->getDwarfRegNum(PPC::R30, true); 1128 CFIIndex = MF.addFrameInst( 1129 MCCFIInstruction::createOffset(nullptr, Reg, PBPOffset)); 1130 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 1131 .addCFIIndex(CFIIndex); 1132 } 1133 1134 if (HasBP) { 1135 // Describe where BP was saved, at a fixed offset from CFA. 1136 unsigned Reg = MRI->getDwarfRegNum(BPReg, true); 1137 CFIIndex = MF.addFrameInst( 1138 MCCFIInstruction::createOffset(nullptr, Reg, BPOffset)); 1139 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 1140 .addCFIIndex(CFIIndex); 1141 } 1142 1143 if (MustSaveLR) { 1144 // Describe where LR was saved, at a fixed offset from CFA. 1145 unsigned Reg = MRI->getDwarfRegNum(LRReg, true); 1146 CFIIndex = MF.addFrameInst( 1147 MCCFIInstruction::createOffset(nullptr, Reg, LROffset)); 1148 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 1149 .addCFIIndex(CFIIndex); 1150 } 1151 } 1152 1153 // If there is a frame pointer, copy R1 into R31 1154 if (HasFP) { 1155 BuildMI(MBB, MBBI, dl, OrInst, FPReg) 1156 .addReg(SPReg) 1157 .addReg(SPReg); 1158 1159 if (!HasBP && needsCFI) { 1160 // Change the definition of CFA from SP+offset to FP+offset, because SP 1161 // will change at every alloca. 1162 unsigned Reg = MRI->getDwarfRegNum(FPReg, true); 1163 unsigned CFIIndex = MF.addFrameInst( 1164 MCCFIInstruction::createDefCfaRegister(nullptr, Reg)); 1165 1166 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 1167 .addCFIIndex(CFIIndex); 1168 } 1169 } 1170 1171 if (needsCFI) { 1172 // Describe where callee saved registers were saved, at fixed offsets from 1173 // CFA. 1174 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 1175 for (const CalleeSavedInfo &I : CSI) { 1176 Register Reg = I.getReg(); 1177 if (Reg == PPC::LR || Reg == PPC::LR8 || Reg == PPC::RM) continue; 1178 1179 // This is a bit of a hack: CR2LT, CR2GT, CR2EQ and CR2UN are just 1180 // subregisters of CR2. We just need to emit a move of CR2. 1181 if (PPC::CRBITRCRegClass.contains(Reg)) 1182 continue; 1183 1184 if ((Reg == PPC::X2 || Reg == PPC::R2) && MustSaveTOC) 1185 continue; 1186 1187 // For SVR4, don't emit a move for the CR spill slot if we haven't 1188 // spilled CRs. 1189 if (isSVR4ABI && (PPC::CR2 <= Reg && Reg <= PPC::CR4) 1190 && !MustSaveCR) 1191 continue; 1192 1193 // For 64-bit SVR4 when we have spilled CRs, the spill location 1194 // is SP+8, not a frame-relative slot. 1195 if (isSVR4ABI && isPPC64 && (PPC::CR2 <= Reg && Reg <= PPC::CR4)) { 1196 // In the ELFv1 ABI, only CR2 is noted in CFI and stands in for 1197 // the whole CR word. In the ELFv2 ABI, every CR that was 1198 // actually saved gets its own CFI record. 1199 Register CRReg = isELFv2ABI? Reg : PPC::CR2; 1200 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( 1201 nullptr, MRI->getDwarfRegNum(CRReg, true), CRSaveOffset)); 1202 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 1203 .addCFIIndex(CFIIndex); 1204 continue; 1205 } 1206 1207 if (I.isSpilledToReg()) { 1208 unsigned SpilledReg = I.getDstReg(); 1209 unsigned CFIRegister = MF.addFrameInst(MCCFIInstruction::createRegister( 1210 nullptr, MRI->getDwarfRegNum(Reg, true), 1211 MRI->getDwarfRegNum(SpilledReg, true))); 1212 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 1213 .addCFIIndex(CFIRegister); 1214 } else { 1215 int64_t Offset = MFI.getObjectOffset(I.getFrameIdx()); 1216 // We have changed the object offset above but we do not want to change 1217 // the actual offsets in the CFI instruction so we have to undo the 1218 // offset change here. 1219 if (MovingStackUpdateDown) 1220 Offset -= NegFrameSize; 1221 1222 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( 1223 nullptr, MRI->getDwarfRegNum(Reg, true), Offset)); 1224 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 1225 .addCFIIndex(CFIIndex); 1226 } 1227 } 1228 } 1229 } 1230 1231 void PPCFrameLowering::inlineStackProbe(MachineFunction &MF, 1232 MachineBasicBlock &PrologMBB) const { 1233 bool isPPC64 = Subtarget.isPPC64(); 1234 const PPCTargetLowering &TLI = *Subtarget.getTargetLowering(); 1235 const PPCInstrInfo &TII = *Subtarget.getInstrInfo(); 1236 MachineFrameInfo &MFI = MF.getFrameInfo(); 1237 MachineModuleInfo &MMI = MF.getMMI(); 1238 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 1239 // AIX assembler does not support cfi directives. 1240 const bool needsCFI = MF.needsFrameMoves() && !Subtarget.isAIXABI(); 1241 auto StackAllocMIPos = llvm::find_if(PrologMBB, [](MachineInstr &MI) { 1242 int Opc = MI.getOpcode(); 1243 return Opc == PPC::PROBED_STACKALLOC_64 || Opc == PPC::PROBED_STACKALLOC_32; 1244 }); 1245 if (StackAllocMIPos == PrologMBB.end()) 1246 return; 1247 const BasicBlock *ProbedBB = PrologMBB.getBasicBlock(); 1248 MachineBasicBlock *CurrentMBB = &PrologMBB; 1249 DebugLoc DL = PrologMBB.findDebugLoc(StackAllocMIPos); 1250 MachineInstr &MI = *StackAllocMIPos; 1251 int64_t NegFrameSize = MI.getOperand(2).getImm(); 1252 unsigned ProbeSize = TLI.getStackProbeSize(MF); 1253 int64_t NegProbeSize = -(int64_t)ProbeSize; 1254 assert(isInt<32>(NegProbeSize) && "Unhandled probe size"); 1255 int64_t NumBlocks = NegFrameSize / NegProbeSize; 1256 int64_t NegResidualSize = NegFrameSize % NegProbeSize; 1257 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1; 1258 Register ScratchReg = MI.getOperand(0).getReg(); 1259 Register FPReg = MI.getOperand(1).getReg(); 1260 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 1261 bool HasBP = RegInfo->hasBasePointer(MF); 1262 Register BPReg = RegInfo->getBaseRegister(MF); 1263 Align MaxAlign = MFI.getMaxAlign(); 1264 bool HasRedZone = Subtarget.isPPC64() || !Subtarget.isSVR4ABI(); 1265 const MCInstrDesc &CopyInst = TII.get(isPPC64 ? PPC::OR8 : PPC::OR); 1266 // Subroutines to generate .cfi_* directives. 1267 auto buildDefCFAReg = [&](MachineBasicBlock &MBB, 1268 MachineBasicBlock::iterator MBBI, Register Reg) { 1269 unsigned RegNum = MRI->getDwarfRegNum(Reg, true); 1270 unsigned CFIIndex = MF.addFrameInst( 1271 MCCFIInstruction::createDefCfaRegister(nullptr, RegNum)); 1272 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 1273 .addCFIIndex(CFIIndex); 1274 }; 1275 auto buildDefCFA = [&](MachineBasicBlock &MBB, 1276 MachineBasicBlock::iterator MBBI, Register Reg, 1277 int Offset) { 1278 unsigned RegNum = MRI->getDwarfRegNum(Reg, true); 1279 unsigned CFIIndex = MBB.getParent()->addFrameInst( 1280 MCCFIInstruction::cfiDefCfa(nullptr, RegNum, Offset)); 1281 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 1282 .addCFIIndex(CFIIndex); 1283 }; 1284 // Subroutine to determine if we can use the Imm as part of d-form. 1285 auto CanUseDForm = [](int64_t Imm) { return isInt<16>(Imm) && Imm % 4 == 0; }; 1286 // Subroutine to materialize the Imm into TempReg. 1287 auto MaterializeImm = [&](MachineBasicBlock &MBB, 1288 MachineBasicBlock::iterator MBBI, int64_t Imm, 1289 Register &TempReg) { 1290 assert(isInt<32>(Imm) && "Unhandled imm"); 1291 if (isInt<16>(Imm)) 1292 BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::LI8 : PPC::LI), TempReg) 1293 .addImm(Imm); 1294 else { 1295 BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg) 1296 .addImm(Imm >> 16); 1297 BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::ORI8 : PPC::ORI), TempReg) 1298 .addReg(TempReg) 1299 .addImm(Imm & 0xFFFF); 1300 } 1301 }; 1302 // Subroutine to store frame pointer and decrease stack pointer by probe size. 1303 auto allocateAndProbe = [&](MachineBasicBlock &MBB, 1304 MachineBasicBlock::iterator MBBI, int64_t NegSize, 1305 Register NegSizeReg, bool UseDForm, 1306 Register StoreReg) { 1307 if (UseDForm) 1308 BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::STDU : PPC::STWU), SPReg) 1309 .addReg(StoreReg) 1310 .addImm(NegSize) 1311 .addReg(SPReg); 1312 else 1313 BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg) 1314 .addReg(StoreReg) 1315 .addReg(SPReg) 1316 .addReg(NegSizeReg); 1317 }; 1318 // Used to probe stack when realignment is required. 1319 // Note that, according to ABI's requirement, *sp must always equals the 1320 // value of back-chain pointer, only st(w|d)u(x) can be used to update sp. 1321 // Following is pseudo code: 1322 // final_sp = (sp & align) + negframesize; 1323 // neg_gap = final_sp - sp; 1324 // while (neg_gap < negprobesize) { 1325 // stdu fp, negprobesize(sp); 1326 // neg_gap -= negprobesize; 1327 // } 1328 // stdux fp, sp, neg_gap 1329 // 1330 // When HasBP & HasRedzone, back-chain pointer is already saved in BPReg 1331 // before probe code, we don't need to save it, so we get one additional reg 1332 // that can be used to materialize the probeside if needed to use xform. 1333 // Otherwise, we can NOT materialize probeside, so we can only use Dform for 1334 // now. 1335 // 1336 // The allocations are: 1337 // if (HasBP && HasRedzone) { 1338 // r0: materialize the probesize if needed so that we can use xform. 1339 // r12: `neg_gap` 1340 // } else { 1341 // r0: back-chain pointer 1342 // r12: `neg_gap`. 1343 // } 1344 auto probeRealignedStack = [&](MachineBasicBlock &MBB, 1345 MachineBasicBlock::iterator MBBI, 1346 Register ScratchReg, Register TempReg) { 1347 assert(HasBP && "The function is supposed to have base pointer when its " 1348 "stack is realigned."); 1349 assert(isPowerOf2_64(ProbeSize) && "Probe size should be power of 2"); 1350 1351 // FIXME: We can eliminate this limitation if we get more infomation about 1352 // which part of redzone are already used. Used redzone can be treated 1353 // probed. But there might be `holes' in redzone probed, this could 1354 // complicate the implementation. 1355 assert(ProbeSize >= Subtarget.getRedZoneSize() && 1356 "Probe size should be larger or equal to the size of red-zone so " 1357 "that red-zone is not clobbered by probing."); 1358 1359 Register &FinalStackPtr = TempReg; 1360 // FIXME: We only support NegProbeSize materializable by DForm currently. 1361 // When HasBP && HasRedzone, we can use xform if we have an additional idle 1362 // register. 1363 NegProbeSize = std::max(NegProbeSize, -((int64_t)1 << 15)); 1364 assert(isInt<16>(NegProbeSize) && 1365 "NegProbeSize should be materializable by DForm"); 1366 Register CRReg = PPC::CR0; 1367 // Layout of output assembly kinda like: 1368 // bb.0: 1369 // ... 1370 // sub $scratchreg, $finalsp, r1 1371 // cmpdi $scratchreg, <negprobesize> 1372 // bge bb.2 1373 // bb.1: 1374 // stdu <backchain>, <negprobesize>(r1) 1375 // sub $scratchreg, $scratchreg, negprobesize 1376 // cmpdi $scratchreg, <negprobesize> 1377 // blt bb.1 1378 // bb.2: 1379 // stdux <backchain>, r1, $scratchreg 1380 MachineFunction::iterator MBBInsertPoint = std::next(MBB.getIterator()); 1381 MachineBasicBlock *ProbeLoopBodyMBB = MF.CreateMachineBasicBlock(ProbedBB); 1382 MF.insert(MBBInsertPoint, ProbeLoopBodyMBB); 1383 MachineBasicBlock *ProbeExitMBB = MF.CreateMachineBasicBlock(ProbedBB); 1384 MF.insert(MBBInsertPoint, ProbeExitMBB); 1385 // bb.2 1386 { 1387 Register BackChainPointer = HasRedZone ? BPReg : TempReg; 1388 allocateAndProbe(*ProbeExitMBB, ProbeExitMBB->end(), 0, ScratchReg, false, 1389 BackChainPointer); 1390 if (HasRedZone) 1391 // PROBED_STACKALLOC_64 assumes Operand(1) stores the old sp, copy BPReg 1392 // to TempReg to satisfy it. 1393 BuildMI(*ProbeExitMBB, ProbeExitMBB->end(), DL, CopyInst, TempReg) 1394 .addReg(BPReg) 1395 .addReg(BPReg); 1396 ProbeExitMBB->splice(ProbeExitMBB->end(), &MBB, MBBI, MBB.end()); 1397 ProbeExitMBB->transferSuccessorsAndUpdatePHIs(&MBB); 1398 } 1399 // bb.0 1400 { 1401 BuildMI(&MBB, DL, TII.get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), ScratchReg) 1402 .addReg(SPReg) 1403 .addReg(FinalStackPtr); 1404 if (!HasRedZone) 1405 BuildMI(&MBB, DL, CopyInst, TempReg).addReg(SPReg).addReg(SPReg); 1406 BuildMI(&MBB, DL, TII.get(isPPC64 ? PPC::CMPDI : PPC::CMPWI), CRReg) 1407 .addReg(ScratchReg) 1408 .addImm(NegProbeSize); 1409 BuildMI(&MBB, DL, TII.get(PPC::BCC)) 1410 .addImm(PPC::PRED_GE) 1411 .addReg(CRReg) 1412 .addMBB(ProbeExitMBB); 1413 MBB.addSuccessor(ProbeLoopBodyMBB); 1414 MBB.addSuccessor(ProbeExitMBB); 1415 } 1416 // bb.1 1417 { 1418 Register BackChainPointer = HasRedZone ? BPReg : TempReg; 1419 allocateAndProbe(*ProbeLoopBodyMBB, ProbeLoopBodyMBB->end(), NegProbeSize, 1420 0, true /*UseDForm*/, BackChainPointer); 1421 BuildMI(ProbeLoopBodyMBB, DL, TII.get(isPPC64 ? PPC::ADDI8 : PPC::ADDI), 1422 ScratchReg) 1423 .addReg(ScratchReg) 1424 .addImm(-NegProbeSize); 1425 BuildMI(ProbeLoopBodyMBB, DL, TII.get(isPPC64 ? PPC::CMPDI : PPC::CMPWI), 1426 CRReg) 1427 .addReg(ScratchReg) 1428 .addImm(NegProbeSize); 1429 BuildMI(ProbeLoopBodyMBB, DL, TII.get(PPC::BCC)) 1430 .addImm(PPC::PRED_LT) 1431 .addReg(CRReg) 1432 .addMBB(ProbeLoopBodyMBB); 1433 ProbeLoopBodyMBB->addSuccessor(ProbeExitMBB); 1434 ProbeLoopBodyMBB->addSuccessor(ProbeLoopBodyMBB); 1435 } 1436 // Update liveins. 1437 recomputeLiveIns(*ProbeLoopBodyMBB); 1438 recomputeLiveIns(*ProbeExitMBB); 1439 return ProbeExitMBB; 1440 }; 1441 // For case HasBP && MaxAlign > 1, we have to realign the SP by performing 1442 // SP = SP - SP % MaxAlign, thus make the probe more like dynamic probe since 1443 // the offset subtracted from SP is determined by SP's runtime value. 1444 if (HasBP && MaxAlign > 1) { 1445 // Calculate final stack pointer. 1446 if (isPPC64) 1447 BuildMI(*CurrentMBB, {MI}, DL, TII.get(PPC::RLDICL), ScratchReg) 1448 .addReg(SPReg) 1449 .addImm(0) 1450 .addImm(64 - Log2(MaxAlign)); 1451 else 1452 BuildMI(*CurrentMBB, {MI}, DL, TII.get(PPC::RLWINM), ScratchReg) 1453 .addReg(SPReg) 1454 .addImm(0) 1455 .addImm(32 - Log2(MaxAlign)) 1456 .addImm(31); 1457 BuildMI(*CurrentMBB, {MI}, DL, TII.get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), 1458 FPReg) 1459 .addReg(ScratchReg) 1460 .addReg(SPReg); 1461 MaterializeImm(*CurrentMBB, {MI}, NegFrameSize, ScratchReg); 1462 BuildMI(*CurrentMBB, {MI}, DL, TII.get(isPPC64 ? PPC::ADD8 : PPC::ADD4), 1463 FPReg) 1464 .addReg(ScratchReg) 1465 .addReg(FPReg); 1466 CurrentMBB = probeRealignedStack(*CurrentMBB, {MI}, ScratchReg, FPReg); 1467 if (needsCFI) 1468 buildDefCFAReg(*CurrentMBB, {MI}, FPReg); 1469 } else { 1470 // Initialize current frame pointer. 1471 BuildMI(*CurrentMBB, {MI}, DL, CopyInst, FPReg).addReg(SPReg).addReg(SPReg); 1472 // Use FPReg to calculate CFA. 1473 if (needsCFI) 1474 buildDefCFA(*CurrentMBB, {MI}, FPReg, 0); 1475 // Probe residual part. 1476 if (NegResidualSize) { 1477 bool ResidualUseDForm = CanUseDForm(NegResidualSize); 1478 if (!ResidualUseDForm) 1479 MaterializeImm(*CurrentMBB, {MI}, NegResidualSize, ScratchReg); 1480 allocateAndProbe(*CurrentMBB, {MI}, NegResidualSize, ScratchReg, 1481 ResidualUseDForm, FPReg); 1482 } 1483 bool UseDForm = CanUseDForm(NegProbeSize); 1484 // If number of blocks is small, just probe them directly. 1485 if (NumBlocks < 3) { 1486 if (!UseDForm) 1487 MaterializeImm(*CurrentMBB, {MI}, NegProbeSize, ScratchReg); 1488 for (int i = 0; i < NumBlocks; ++i) 1489 allocateAndProbe(*CurrentMBB, {MI}, NegProbeSize, ScratchReg, UseDForm, 1490 FPReg); 1491 if (needsCFI) { 1492 // Restore using SPReg to calculate CFA. 1493 buildDefCFAReg(*CurrentMBB, {MI}, SPReg); 1494 } 1495 } else { 1496 // Since CTR is a volatile register and current shrinkwrap implementation 1497 // won't choose an MBB in a loop as the PrologMBB, it's safe to synthesize a 1498 // CTR loop to probe. 1499 // Calculate trip count and stores it in CTRReg. 1500 MaterializeImm(*CurrentMBB, {MI}, NumBlocks, ScratchReg); 1501 BuildMI(*CurrentMBB, {MI}, DL, TII.get(isPPC64 ? PPC::MTCTR8 : PPC::MTCTR)) 1502 .addReg(ScratchReg, RegState::Kill); 1503 if (!UseDForm) 1504 MaterializeImm(*CurrentMBB, {MI}, NegProbeSize, ScratchReg); 1505 // Create MBBs of the loop. 1506 MachineFunction::iterator MBBInsertPoint = 1507 std::next(CurrentMBB->getIterator()); 1508 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(ProbedBB); 1509 MF.insert(MBBInsertPoint, LoopMBB); 1510 MachineBasicBlock *ExitMBB = MF.CreateMachineBasicBlock(ProbedBB); 1511 MF.insert(MBBInsertPoint, ExitMBB); 1512 // Synthesize the loop body. 1513 allocateAndProbe(*LoopMBB, LoopMBB->end(), NegProbeSize, ScratchReg, 1514 UseDForm, FPReg); 1515 BuildMI(LoopMBB, DL, TII.get(isPPC64 ? PPC::BDNZ8 : PPC::BDNZ)) 1516 .addMBB(LoopMBB); 1517 LoopMBB->addSuccessor(ExitMBB); 1518 LoopMBB->addSuccessor(LoopMBB); 1519 // Synthesize the exit MBB. 1520 ExitMBB->splice(ExitMBB->end(), CurrentMBB, 1521 std::next(MachineBasicBlock::iterator(MI)), 1522 CurrentMBB->end()); 1523 ExitMBB->transferSuccessorsAndUpdatePHIs(CurrentMBB); 1524 CurrentMBB->addSuccessor(LoopMBB); 1525 if (needsCFI) { 1526 // Restore using SPReg to calculate CFA. 1527 buildDefCFAReg(*ExitMBB, ExitMBB->begin(), SPReg); 1528 } 1529 // Update liveins. 1530 recomputeLiveIns(*LoopMBB); 1531 recomputeLiveIns(*ExitMBB); 1532 } 1533 } 1534 ++NumPrologProbed; 1535 MI.eraseFromParent(); 1536 } 1537 1538 void PPCFrameLowering::emitEpilogue(MachineFunction &MF, 1539 MachineBasicBlock &MBB) const { 1540 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 1541 DebugLoc dl; 1542 1543 if (MBBI != MBB.end()) 1544 dl = MBBI->getDebugLoc(); 1545 1546 const PPCInstrInfo &TII = *Subtarget.getInstrInfo(); 1547 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 1548 1549 // Get alignment info so we know how to restore the SP. 1550 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1551 1552 // Get the number of bytes allocated from the FrameInfo. 1553 int64_t FrameSize = MFI.getStackSize(); 1554 1555 // Get processor type. 1556 bool isPPC64 = Subtarget.isPPC64(); 1557 1558 // Check if the link register (LR) has been saved. 1559 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 1560 bool MustSaveLR = FI->mustSaveLR(); 1561 const SmallVectorImpl<Register> &MustSaveCRs = FI->getMustSaveCRs(); 1562 bool MustSaveCR = !MustSaveCRs.empty(); 1563 // Do we have a frame pointer and/or base pointer for this function? 1564 bool HasFP = hasFP(MF); 1565 bool HasBP = RegInfo->hasBasePointer(MF); 1566 bool HasRedZone = Subtarget.isPPC64() || !Subtarget.isSVR4ABI(); 1567 bool HasROPProtect = Subtarget.hasROPProtect(); 1568 bool HasPrivileged = Subtarget.hasPrivileged(); 1569 1570 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1; 1571 Register BPReg = RegInfo->getBaseRegister(MF); 1572 Register FPReg = isPPC64 ? PPC::X31 : PPC::R31; 1573 Register ScratchReg; 1574 Register TempReg = isPPC64 ? PPC::X12 : PPC::R12; // another scratch reg 1575 const MCInstrDesc& MTLRInst = TII.get( isPPC64 ? PPC::MTLR8 1576 : PPC::MTLR ); 1577 const MCInstrDesc& LoadInst = TII.get( isPPC64 ? PPC::LD 1578 : PPC::LWZ ); 1579 const MCInstrDesc& LoadImmShiftedInst = TII.get( isPPC64 ? PPC::LIS8 1580 : PPC::LIS ); 1581 const MCInstrDesc& OrInst = TII.get(isPPC64 ? PPC::OR8 1582 : PPC::OR ); 1583 const MCInstrDesc& OrImmInst = TII.get( isPPC64 ? PPC::ORI8 1584 : PPC::ORI ); 1585 const MCInstrDesc& AddImmInst = TII.get( isPPC64 ? PPC::ADDI8 1586 : PPC::ADDI ); 1587 const MCInstrDesc& AddInst = TII.get( isPPC64 ? PPC::ADD8 1588 : PPC::ADD4 ); 1589 const MCInstrDesc& LoadWordInst = TII.get( isPPC64 ? PPC::LWZ8 1590 : PPC::LWZ); 1591 const MCInstrDesc& MoveToCRInst = TII.get( isPPC64 ? PPC::MTOCRF8 1592 : PPC::MTOCRF); 1593 const MCInstrDesc &HashChk = 1594 TII.get(isPPC64 ? (HasPrivileged ? PPC::HASHCHKP8 : PPC::HASHCHK8) 1595 : (HasPrivileged ? PPC::HASHCHKP : PPC::HASHCHK)); 1596 int64_t LROffset = getReturnSaveOffset(); 1597 1598 int64_t FPOffset = 0; 1599 1600 // Using the same bool variable as below to suppress compiler warnings. 1601 bool SingleScratchReg = findScratchRegister(&MBB, true, false, &ScratchReg, 1602 &TempReg); 1603 assert(SingleScratchReg && 1604 "Could not find an available scratch register"); 1605 1606 SingleScratchReg = ScratchReg == TempReg; 1607 1608 if (HasFP) { 1609 int FPIndex = FI->getFramePointerSaveIndex(); 1610 assert(FPIndex && "No Frame Pointer Save Slot!"); 1611 FPOffset = MFI.getObjectOffset(FPIndex); 1612 } 1613 1614 int64_t BPOffset = 0; 1615 if (HasBP) { 1616 int BPIndex = FI->getBasePointerSaveIndex(); 1617 assert(BPIndex && "No Base Pointer Save Slot!"); 1618 BPOffset = MFI.getObjectOffset(BPIndex); 1619 } 1620 1621 int64_t PBPOffset = 0; 1622 if (FI->usesPICBase()) { 1623 int PBPIndex = FI->getPICBasePointerSaveIndex(); 1624 assert(PBPIndex && "No PIC Base Pointer Save Slot!"); 1625 PBPOffset = MFI.getObjectOffset(PBPIndex); 1626 } 1627 1628 bool IsReturnBlock = (MBBI != MBB.end() && MBBI->isReturn()); 1629 1630 if (IsReturnBlock) { 1631 unsigned RetOpcode = MBBI->getOpcode(); 1632 bool UsesTCRet = RetOpcode == PPC::TCRETURNri || 1633 RetOpcode == PPC::TCRETURNdi || 1634 RetOpcode == PPC::TCRETURNai || 1635 RetOpcode == PPC::TCRETURNri8 || 1636 RetOpcode == PPC::TCRETURNdi8 || 1637 RetOpcode == PPC::TCRETURNai8; 1638 1639 if (UsesTCRet) { 1640 int MaxTCRetDelta = FI->getTailCallSPDelta(); 1641 MachineOperand &StackAdjust = MBBI->getOperand(1); 1642 assert(StackAdjust.isImm() && "Expecting immediate value."); 1643 // Adjust stack pointer. 1644 int StackAdj = StackAdjust.getImm(); 1645 int Delta = StackAdj - MaxTCRetDelta; 1646 assert((Delta >= 0) && "Delta must be positive"); 1647 if (MaxTCRetDelta>0) 1648 FrameSize += (StackAdj +Delta); 1649 else 1650 FrameSize += StackAdj; 1651 } 1652 } 1653 1654 // Frames of 32KB & larger require special handling because they cannot be 1655 // indexed into with a simple LD/LWZ immediate offset operand. 1656 bool isLargeFrame = !isInt<16>(FrameSize); 1657 1658 // On targets without red zone, the SP needs to be restored last, so that 1659 // all live contents of the stack frame are upwards of the SP. This means 1660 // that we cannot restore SP just now, since there may be more registers 1661 // to restore from the stack frame (e.g. R31). If the frame size is not 1662 // a simple immediate value, we will need a spare register to hold the 1663 // restored SP. If the frame size is known and small, we can simply adjust 1664 // the offsets of the registers to be restored, and still use SP to restore 1665 // them. In such case, the final update of SP will be to add the frame 1666 // size to it. 1667 // To simplify the code, set RBReg to the base register used to restore 1668 // values from the stack, and set SPAdd to the value that needs to be added 1669 // to the SP at the end. The default values are as if red zone was present. 1670 unsigned RBReg = SPReg; 1671 unsigned SPAdd = 0; 1672 1673 // Check if we can move the stack update instruction up the epilogue 1674 // past the callee saves. This will allow the move to LR instruction 1675 // to be executed before the restores of the callee saves which means 1676 // that the callee saves can hide the latency from the MTLR instrcution. 1677 MachineBasicBlock::iterator StackUpdateLoc = MBBI; 1678 if (stackUpdateCanBeMoved(MF)) { 1679 const std::vector<CalleeSavedInfo> & Info = MFI.getCalleeSavedInfo(); 1680 for (CalleeSavedInfo CSI : Info) { 1681 // If the callee saved register is spilled to another register abort the 1682 // stack update movement. 1683 if (CSI.isSpilledToReg()) { 1684 StackUpdateLoc = MBBI; 1685 break; 1686 } 1687 int FrIdx = CSI.getFrameIdx(); 1688 // If the frame index is not negative the callee saved info belongs to a 1689 // stack object that is not a fixed stack object. We ignore non-fixed 1690 // stack objects because we won't move the update of the stack pointer 1691 // past them. 1692 if (FrIdx >= 0) 1693 continue; 1694 1695 if (MFI.isFixedObjectIndex(FrIdx) && MFI.getObjectOffset(FrIdx) < 0) 1696 StackUpdateLoc--; 1697 else { 1698 // Abort the operation as we can't update all CSR restores. 1699 StackUpdateLoc = MBBI; 1700 break; 1701 } 1702 } 1703 } 1704 1705 if (FrameSize) { 1706 // In the prologue, the loaded (or persistent) stack pointer value is 1707 // offset by the STDU/STDUX/STWU/STWUX instruction. For targets with red 1708 // zone add this offset back now. 1709 1710 // If the function has a base pointer, the stack pointer has been copied 1711 // to it so we can restore it by copying in the other direction. 1712 if (HasRedZone && HasBP) { 1713 BuildMI(MBB, MBBI, dl, OrInst, RBReg). 1714 addReg(BPReg). 1715 addReg(BPReg); 1716 } 1717 // If this function contained a fastcc call and GuaranteedTailCallOpt is 1718 // enabled (=> hasFastCall()==true) the fastcc call might contain a tail 1719 // call which invalidates the stack pointer value in SP(0). So we use the 1720 // value of R31 in this case. Similar situation exists with setjmp. 1721 else if (FI->hasFastCall() || MF.exposesReturnsTwice()) { 1722 assert(HasFP && "Expecting a valid frame pointer."); 1723 if (!HasRedZone) 1724 RBReg = FPReg; 1725 if (!isLargeFrame) { 1726 BuildMI(MBB, MBBI, dl, AddImmInst, RBReg) 1727 .addReg(FPReg).addImm(FrameSize); 1728 } else { 1729 BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, ScratchReg) 1730 .addImm(FrameSize >> 16); 1731 BuildMI(MBB, MBBI, dl, OrImmInst, ScratchReg) 1732 .addReg(ScratchReg, RegState::Kill) 1733 .addImm(FrameSize & 0xFFFF); 1734 BuildMI(MBB, MBBI, dl, AddInst) 1735 .addReg(RBReg) 1736 .addReg(FPReg) 1737 .addReg(ScratchReg); 1738 } 1739 } else if (!isLargeFrame && !HasBP && !MFI.hasVarSizedObjects()) { 1740 if (HasRedZone) { 1741 BuildMI(MBB, StackUpdateLoc, dl, AddImmInst, SPReg) 1742 .addReg(SPReg) 1743 .addImm(FrameSize); 1744 } else { 1745 // Make sure that adding FrameSize will not overflow the max offset 1746 // size. 1747 assert(FPOffset <= 0 && BPOffset <= 0 && PBPOffset <= 0 && 1748 "Local offsets should be negative"); 1749 SPAdd = FrameSize; 1750 FPOffset += FrameSize; 1751 BPOffset += FrameSize; 1752 PBPOffset += FrameSize; 1753 } 1754 } else { 1755 // We don't want to use ScratchReg as a base register, because it 1756 // could happen to be R0. Use FP instead, but make sure to preserve it. 1757 if (!HasRedZone) { 1758 // If FP is not saved, copy it to ScratchReg. 1759 if (!HasFP) 1760 BuildMI(MBB, MBBI, dl, OrInst, ScratchReg) 1761 .addReg(FPReg) 1762 .addReg(FPReg); 1763 RBReg = FPReg; 1764 } 1765 BuildMI(MBB, StackUpdateLoc, dl, LoadInst, RBReg) 1766 .addImm(0) 1767 .addReg(SPReg); 1768 } 1769 } 1770 assert(RBReg != ScratchReg && "Should have avoided ScratchReg"); 1771 // If there is no red zone, ScratchReg may be needed for holding a useful 1772 // value (although not the base register). Make sure it is not overwritten 1773 // too early. 1774 1775 // If we need to restore both the LR and the CR and we only have one 1776 // available scratch register, we must do them one at a time. 1777 if (MustSaveCR && SingleScratchReg && MustSaveLR) { 1778 // Here TempReg == ScratchReg, and in the absence of red zone ScratchReg 1779 // is live here. 1780 assert(HasRedZone && "Expecting red zone"); 1781 BuildMI(MBB, MBBI, dl, LoadWordInst, TempReg) 1782 .addImm(CRSaveOffset) 1783 .addReg(SPReg); 1784 for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i) 1785 BuildMI(MBB, MBBI, dl, MoveToCRInst, MustSaveCRs[i]) 1786 .addReg(TempReg, getKillRegState(i == e-1)); 1787 } 1788 1789 // Delay restoring of the LR if ScratchReg is needed. This is ok, since 1790 // LR is stored in the caller's stack frame. ScratchReg will be needed 1791 // if RBReg is anything other than SP. We shouldn't use ScratchReg as 1792 // a base register anyway, because it may happen to be R0. 1793 bool LoadedLR = false; 1794 if (MustSaveLR && RBReg == SPReg && isInt<16>(LROffset+SPAdd)) { 1795 BuildMI(MBB, StackUpdateLoc, dl, LoadInst, ScratchReg) 1796 .addImm(LROffset+SPAdd) 1797 .addReg(RBReg); 1798 LoadedLR = true; 1799 } 1800 1801 if (MustSaveCR && !(SingleScratchReg && MustSaveLR)) { 1802 assert(RBReg == SPReg && "Should be using SP as a base register"); 1803 BuildMI(MBB, MBBI, dl, LoadWordInst, TempReg) 1804 .addImm(CRSaveOffset) 1805 .addReg(RBReg); 1806 } 1807 1808 if (HasFP) { 1809 // If there is red zone, restore FP directly, since SP has already been 1810 // restored. Otherwise, restore the value of FP into ScratchReg. 1811 if (HasRedZone || RBReg == SPReg) 1812 BuildMI(MBB, MBBI, dl, LoadInst, FPReg) 1813 .addImm(FPOffset) 1814 .addReg(SPReg); 1815 else 1816 BuildMI(MBB, MBBI, dl, LoadInst, ScratchReg) 1817 .addImm(FPOffset) 1818 .addReg(RBReg); 1819 } 1820 1821 if (FI->usesPICBase()) 1822 BuildMI(MBB, MBBI, dl, LoadInst, PPC::R30) 1823 .addImm(PBPOffset) 1824 .addReg(RBReg); 1825 1826 if (HasBP) 1827 BuildMI(MBB, MBBI, dl, LoadInst, BPReg) 1828 .addImm(BPOffset) 1829 .addReg(RBReg); 1830 1831 // There is nothing more to be loaded from the stack, so now we can 1832 // restore SP: SP = RBReg + SPAdd. 1833 if (RBReg != SPReg || SPAdd != 0) { 1834 assert(!HasRedZone && "This should not happen with red zone"); 1835 // If SPAdd is 0, generate a copy. 1836 if (SPAdd == 0) 1837 BuildMI(MBB, MBBI, dl, OrInst, SPReg) 1838 .addReg(RBReg) 1839 .addReg(RBReg); 1840 else 1841 BuildMI(MBB, MBBI, dl, AddImmInst, SPReg) 1842 .addReg(RBReg) 1843 .addImm(SPAdd); 1844 1845 assert(RBReg != ScratchReg && "Should be using FP or SP as base register"); 1846 if (RBReg == FPReg) 1847 BuildMI(MBB, MBBI, dl, OrInst, FPReg) 1848 .addReg(ScratchReg) 1849 .addReg(ScratchReg); 1850 1851 // Now load the LR from the caller's stack frame. 1852 if (MustSaveLR && !LoadedLR) 1853 BuildMI(MBB, MBBI, dl, LoadInst, ScratchReg) 1854 .addImm(LROffset) 1855 .addReg(SPReg); 1856 } 1857 1858 if (MustSaveCR && 1859 !(SingleScratchReg && MustSaveLR)) 1860 for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i) 1861 BuildMI(MBB, MBBI, dl, MoveToCRInst, MustSaveCRs[i]) 1862 .addReg(TempReg, getKillRegState(i == e-1)); 1863 1864 if (MustSaveLR) { 1865 // If ROP protection is required, an extra instruction is added to compute a 1866 // hash and then compare it to the hash stored in the prologue. 1867 if (HasROPProtect) { 1868 const int SaveIndex = FI->getROPProtectionHashSaveIndex(); 1869 const int64_t ImmOffset = MFI.getObjectOffset(SaveIndex); 1870 assert((ImmOffset <= -8 && ImmOffset >= -512) && 1871 "ROP hash check location offset out of range."); 1872 assert(((ImmOffset & 0x7) == 0) && 1873 "ROP hash check location offset must be 8 byte aligned."); 1874 BuildMI(MBB, StackUpdateLoc, dl, HashChk) 1875 .addReg(ScratchReg) 1876 .addImm(ImmOffset) 1877 .addReg(SPReg); 1878 } 1879 BuildMI(MBB, StackUpdateLoc, dl, MTLRInst).addReg(ScratchReg); 1880 } 1881 1882 // Callee pop calling convention. Pop parameter/linkage area. Used for tail 1883 // call optimization 1884 if (IsReturnBlock) { 1885 unsigned RetOpcode = MBBI->getOpcode(); 1886 if (MF.getTarget().Options.GuaranteedTailCallOpt && 1887 (RetOpcode == PPC::BLR || RetOpcode == PPC::BLR8) && 1888 MF.getFunction().getCallingConv() == CallingConv::Fast) { 1889 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 1890 unsigned CallerAllocatedAmt = FI->getMinReservedArea(); 1891 1892 if (CallerAllocatedAmt && isInt<16>(CallerAllocatedAmt)) { 1893 BuildMI(MBB, MBBI, dl, AddImmInst, SPReg) 1894 .addReg(SPReg).addImm(CallerAllocatedAmt); 1895 } else { 1896 BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, ScratchReg) 1897 .addImm(CallerAllocatedAmt >> 16); 1898 BuildMI(MBB, MBBI, dl, OrImmInst, ScratchReg) 1899 .addReg(ScratchReg, RegState::Kill) 1900 .addImm(CallerAllocatedAmt & 0xFFFF); 1901 BuildMI(MBB, MBBI, dl, AddInst) 1902 .addReg(SPReg) 1903 .addReg(FPReg) 1904 .addReg(ScratchReg); 1905 } 1906 } else { 1907 createTailCallBranchInstr(MBB); 1908 } 1909 } 1910 } 1911 1912 void PPCFrameLowering::createTailCallBranchInstr(MachineBasicBlock &MBB) const { 1913 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 1914 1915 // If we got this far a first terminator should exist. 1916 assert(MBBI != MBB.end() && "Failed to find the first terminator."); 1917 1918 DebugLoc dl = MBBI->getDebugLoc(); 1919 const PPCInstrInfo &TII = *Subtarget.getInstrInfo(); 1920 1921 // Create branch instruction for pseudo tail call return instruction. 1922 // The TCRETURNdi variants are direct calls. Valid targets for those are 1923 // MO_GlobalAddress operands as well as MO_ExternalSymbol with PC-Rel 1924 // since we can tail call external functions with PC-Rel (i.e. we don't need 1925 // to worry about different TOC pointers). Some of the external functions will 1926 // be MO_GlobalAddress while others like memcpy for example, are going to 1927 // be MO_ExternalSymbol. 1928 unsigned RetOpcode = MBBI->getOpcode(); 1929 if (RetOpcode == PPC::TCRETURNdi) { 1930 MBBI = MBB.getLastNonDebugInstr(); 1931 MachineOperand &JumpTarget = MBBI->getOperand(0); 1932 if (JumpTarget.isGlobal()) 1933 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB)). 1934 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset()); 1935 else if (JumpTarget.isSymbol()) 1936 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB)). 1937 addExternalSymbol(JumpTarget.getSymbolName()); 1938 else 1939 llvm_unreachable("Expecting Global or External Symbol"); 1940 } else if (RetOpcode == PPC::TCRETURNri) { 1941 MBBI = MBB.getLastNonDebugInstr(); 1942 assert(MBBI->getOperand(0).isReg() && "Expecting register operand."); 1943 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR)); 1944 } else if (RetOpcode == PPC::TCRETURNai) { 1945 MBBI = MBB.getLastNonDebugInstr(); 1946 MachineOperand &JumpTarget = MBBI->getOperand(0); 1947 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA)).addImm(JumpTarget.getImm()); 1948 } else if (RetOpcode == PPC::TCRETURNdi8) { 1949 MBBI = MBB.getLastNonDebugInstr(); 1950 MachineOperand &JumpTarget = MBBI->getOperand(0); 1951 if (JumpTarget.isGlobal()) 1952 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB8)). 1953 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset()); 1954 else if (JumpTarget.isSymbol()) 1955 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB8)). 1956 addExternalSymbol(JumpTarget.getSymbolName()); 1957 else 1958 llvm_unreachable("Expecting Global or External Symbol"); 1959 } else if (RetOpcode == PPC::TCRETURNri8) { 1960 MBBI = MBB.getLastNonDebugInstr(); 1961 assert(MBBI->getOperand(0).isReg() && "Expecting register operand."); 1962 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR8)); 1963 } else if (RetOpcode == PPC::TCRETURNai8) { 1964 MBBI = MBB.getLastNonDebugInstr(); 1965 MachineOperand &JumpTarget = MBBI->getOperand(0); 1966 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA8)).addImm(JumpTarget.getImm()); 1967 } 1968 } 1969 1970 void PPCFrameLowering::determineCalleeSaves(MachineFunction &MF, 1971 BitVector &SavedRegs, 1972 RegScavenger *RS) const { 1973 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 1974 1975 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 1976 1977 // Save and clear the LR state. 1978 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 1979 unsigned LR = RegInfo->getRARegister(); 1980 FI->setMustSaveLR(MustSaveLR(MF, LR)); 1981 SavedRegs.reset(LR); 1982 1983 // Save R31 if necessary 1984 int FPSI = FI->getFramePointerSaveIndex(); 1985 const bool isPPC64 = Subtarget.isPPC64(); 1986 MachineFrameInfo &MFI = MF.getFrameInfo(); 1987 1988 // If the frame pointer save index hasn't been defined yet. 1989 if (!FPSI && needsFP(MF)) { 1990 // Find out what the fix offset of the frame pointer save area. 1991 int FPOffset = getFramePointerSaveOffset(); 1992 // Allocate the frame index for frame pointer save area. 1993 FPSI = MFI.CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 1994 // Save the result. 1995 FI->setFramePointerSaveIndex(FPSI); 1996 } 1997 1998 int BPSI = FI->getBasePointerSaveIndex(); 1999 if (!BPSI && RegInfo->hasBasePointer(MF)) { 2000 int BPOffset = getBasePointerSaveOffset(); 2001 // Allocate the frame index for the base pointer save area. 2002 BPSI = MFI.CreateFixedObject(isPPC64? 8 : 4, BPOffset, true); 2003 // Save the result. 2004 FI->setBasePointerSaveIndex(BPSI); 2005 } 2006 2007 // Reserve stack space for the PIC Base register (R30). 2008 // Only used in SVR4 32-bit. 2009 if (FI->usesPICBase()) { 2010 int PBPSI = MFI.CreateFixedObject(4, -8, true); 2011 FI->setPICBasePointerSaveIndex(PBPSI); 2012 } 2013 2014 // Make sure we don't explicitly spill r31, because, for example, we have 2015 // some inline asm which explicitly clobbers it, when we otherwise have a 2016 // frame pointer and are using r31's spill slot for the prologue/epilogue 2017 // code. Same goes for the base pointer and the PIC base register. 2018 if (needsFP(MF)) 2019 SavedRegs.reset(isPPC64 ? PPC::X31 : PPC::R31); 2020 if (RegInfo->hasBasePointer(MF)) 2021 SavedRegs.reset(RegInfo->getBaseRegister(MF)); 2022 if (FI->usesPICBase()) 2023 SavedRegs.reset(PPC::R30); 2024 2025 // Reserve stack space to move the linkage area to in case of a tail call. 2026 int TCSPDelta = 0; 2027 if (MF.getTarget().Options.GuaranteedTailCallOpt && 2028 (TCSPDelta = FI->getTailCallSPDelta()) < 0) { 2029 MFI.CreateFixedObject(-1 * TCSPDelta, TCSPDelta, true); 2030 } 2031 2032 // Allocate the nonvolatile CR spill slot iff the function uses CR 2, 3, or 4. 2033 // For 64-bit SVR4, and all flavors of AIX we create a FixedStack 2034 // object at the offset of the CR-save slot in the linkage area. The actual 2035 // save and restore of the condition register will be created as part of the 2036 // prologue and epilogue insertion, but the FixedStack object is needed to 2037 // keep the CalleSavedInfo valid. 2038 if ((SavedRegs.test(PPC::CR2) || SavedRegs.test(PPC::CR3) || 2039 SavedRegs.test(PPC::CR4))) { 2040 const uint64_t SpillSize = 4; // Condition register is always 4 bytes. 2041 const int64_t SpillOffset = 2042 Subtarget.isPPC64() ? 8 : Subtarget.isAIXABI() ? 4 : -4; 2043 int FrameIdx = 2044 MFI.CreateFixedObject(SpillSize, SpillOffset, 2045 /* IsImmutable */ true, /* IsAliased */ false); 2046 FI->setCRSpillFrameIndex(FrameIdx); 2047 } 2048 } 2049 2050 void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF, 2051 RegScavenger *RS) const { 2052 // Get callee saved register information. 2053 MachineFrameInfo &MFI = MF.getFrameInfo(); 2054 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 2055 2056 // If the function is shrink-wrapped, and if the function has a tail call, the 2057 // tail call might not be in the new RestoreBlock, so real branch instruction 2058 // won't be generated by emitEpilogue(), because shrink-wrap has chosen new 2059 // RestoreBlock. So we handle this case here. 2060 if (MFI.getSavePoint() && MFI.hasTailCall()) { 2061 MachineBasicBlock *RestoreBlock = MFI.getRestorePoint(); 2062 for (MachineBasicBlock &MBB : MF) { 2063 if (MBB.isReturnBlock() && (&MBB) != RestoreBlock) 2064 createTailCallBranchInstr(MBB); 2065 } 2066 } 2067 2068 // Early exit if no callee saved registers are modified! 2069 if (CSI.empty() && !needsFP(MF)) { 2070 addScavengingSpillSlot(MF, RS); 2071 return; 2072 } 2073 2074 unsigned MinGPR = PPC::R31; 2075 unsigned MinG8R = PPC::X31; 2076 unsigned MinFPR = PPC::F31; 2077 unsigned MinVR = Subtarget.hasSPE() ? PPC::S31 : PPC::V31; 2078 2079 bool HasGPSaveArea = false; 2080 bool HasG8SaveArea = false; 2081 bool HasFPSaveArea = false; 2082 bool HasVRSaveArea = false; 2083 2084 SmallVector<CalleeSavedInfo, 18> GPRegs; 2085 SmallVector<CalleeSavedInfo, 18> G8Regs; 2086 SmallVector<CalleeSavedInfo, 18> FPRegs; 2087 SmallVector<CalleeSavedInfo, 18> VRegs; 2088 2089 for (const CalleeSavedInfo &I : CSI) { 2090 Register Reg = I.getReg(); 2091 assert((!MF.getInfo<PPCFunctionInfo>()->mustSaveTOC() || 2092 (Reg != PPC::X2 && Reg != PPC::R2)) && 2093 "Not expecting to try to spill R2 in a function that must save TOC"); 2094 if (PPC::GPRCRegClass.contains(Reg)) { 2095 HasGPSaveArea = true; 2096 2097 GPRegs.push_back(I); 2098 2099 if (Reg < MinGPR) { 2100 MinGPR = Reg; 2101 } 2102 } else if (PPC::G8RCRegClass.contains(Reg)) { 2103 HasG8SaveArea = true; 2104 2105 G8Regs.push_back(I); 2106 2107 if (Reg < MinG8R) { 2108 MinG8R = Reg; 2109 } 2110 } else if (PPC::F8RCRegClass.contains(Reg)) { 2111 HasFPSaveArea = true; 2112 2113 FPRegs.push_back(I); 2114 2115 if (Reg < MinFPR) { 2116 MinFPR = Reg; 2117 } 2118 } else if (PPC::CRBITRCRegClass.contains(Reg) || 2119 PPC::CRRCRegClass.contains(Reg)) { 2120 ; // do nothing, as we already know whether CRs are spilled 2121 } else if (PPC::VRRCRegClass.contains(Reg) || 2122 PPC::SPERCRegClass.contains(Reg)) { 2123 // Altivec and SPE are mutually exclusive, but have the same stack 2124 // alignment requirements, so overload the save area for both cases. 2125 HasVRSaveArea = true; 2126 2127 VRegs.push_back(I); 2128 2129 if (Reg < MinVR) { 2130 MinVR = Reg; 2131 } 2132 } else { 2133 llvm_unreachable("Unknown RegisterClass!"); 2134 } 2135 } 2136 2137 PPCFunctionInfo *PFI = MF.getInfo<PPCFunctionInfo>(); 2138 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2139 2140 int64_t LowerBound = 0; 2141 2142 // Take into account stack space reserved for tail calls. 2143 int TCSPDelta = 0; 2144 if (MF.getTarget().Options.GuaranteedTailCallOpt && 2145 (TCSPDelta = PFI->getTailCallSPDelta()) < 0) { 2146 LowerBound = TCSPDelta; 2147 } 2148 2149 // The Floating-point register save area is right below the back chain word 2150 // of the previous stack frame. 2151 if (HasFPSaveArea) { 2152 for (unsigned i = 0, e = FPRegs.size(); i != e; ++i) { 2153 int FI = FPRegs[i].getFrameIdx(); 2154 2155 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI)); 2156 } 2157 2158 LowerBound -= (31 - TRI->getEncodingValue(MinFPR) + 1) * 8; 2159 } 2160 2161 // Check whether the frame pointer register is allocated. If so, make sure it 2162 // is spilled to the correct offset. 2163 if (needsFP(MF)) { 2164 int FI = PFI->getFramePointerSaveIndex(); 2165 assert(FI && "No Frame Pointer Save Slot!"); 2166 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI)); 2167 // FP is R31/X31, so no need to update MinGPR/MinG8R. 2168 HasGPSaveArea = true; 2169 } 2170 2171 if (PFI->usesPICBase()) { 2172 int FI = PFI->getPICBasePointerSaveIndex(); 2173 assert(FI && "No PIC Base Pointer Save Slot!"); 2174 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI)); 2175 2176 MinGPR = std::min<unsigned>(MinGPR, PPC::R30); 2177 HasGPSaveArea = true; 2178 } 2179 2180 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 2181 if (RegInfo->hasBasePointer(MF)) { 2182 int FI = PFI->getBasePointerSaveIndex(); 2183 assert(FI && "No Base Pointer Save Slot!"); 2184 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI)); 2185 2186 Register BP = RegInfo->getBaseRegister(MF); 2187 if (PPC::G8RCRegClass.contains(BP)) { 2188 MinG8R = std::min<unsigned>(MinG8R, BP); 2189 HasG8SaveArea = true; 2190 } else if (PPC::GPRCRegClass.contains(BP)) { 2191 MinGPR = std::min<unsigned>(MinGPR, BP); 2192 HasGPSaveArea = true; 2193 } 2194 } 2195 2196 // General register save area starts right below the Floating-point 2197 // register save area. 2198 if (HasGPSaveArea || HasG8SaveArea) { 2199 // Move general register save area spill slots down, taking into account 2200 // the size of the Floating-point register save area. 2201 for (unsigned i = 0, e = GPRegs.size(); i != e; ++i) { 2202 if (!GPRegs[i].isSpilledToReg()) { 2203 int FI = GPRegs[i].getFrameIdx(); 2204 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI)); 2205 } 2206 } 2207 2208 // Move general register save area spill slots down, taking into account 2209 // the size of the Floating-point register save area. 2210 for (unsigned i = 0, e = G8Regs.size(); i != e; ++i) { 2211 if (!G8Regs[i].isSpilledToReg()) { 2212 int FI = G8Regs[i].getFrameIdx(); 2213 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI)); 2214 } 2215 } 2216 2217 unsigned MinReg = 2218 std::min<unsigned>(TRI->getEncodingValue(MinGPR), 2219 TRI->getEncodingValue(MinG8R)); 2220 2221 const unsigned GPRegSize = Subtarget.isPPC64() ? 8 : 4; 2222 LowerBound -= (31 - MinReg + 1) * GPRegSize; 2223 } 2224 2225 // For 32-bit only, the CR save area is below the general register 2226 // save area. For 64-bit SVR4, the CR save area is addressed relative 2227 // to the stack pointer and hence does not need an adjustment here. 2228 // Only CR2 (the first nonvolatile spilled) has an associated frame 2229 // index so that we have a single uniform save area. 2230 if (spillsCR(MF) && Subtarget.is32BitELFABI()) { 2231 // Adjust the frame index of the CR spill slot. 2232 for (const auto &CSInfo : CSI) { 2233 if (CSInfo.getReg() == PPC::CR2) { 2234 int FI = CSInfo.getFrameIdx(); 2235 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI)); 2236 break; 2237 } 2238 } 2239 2240 LowerBound -= 4; // The CR save area is always 4 bytes long. 2241 } 2242 2243 // Both Altivec and SPE have the same alignment and padding requirements 2244 // within the stack frame. 2245 if (HasVRSaveArea) { 2246 // Insert alignment padding, we need 16-byte alignment. Note: for positive 2247 // number the alignment formula is : y = (x + (n-1)) & (~(n-1)). But since 2248 // we are using negative number here (the stack grows downward). We should 2249 // use formula : y = x & (~(n-1)). Where x is the size before aligning, n 2250 // is the alignment size ( n = 16 here) and y is the size after aligning. 2251 assert(LowerBound <= 0 && "Expect LowerBound have a non-positive value!"); 2252 LowerBound &= ~(15); 2253 2254 for (unsigned i = 0, e = VRegs.size(); i != e; ++i) { 2255 int FI = VRegs[i].getFrameIdx(); 2256 2257 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI)); 2258 } 2259 } 2260 2261 addScavengingSpillSlot(MF, RS); 2262 } 2263 2264 void 2265 PPCFrameLowering::addScavengingSpillSlot(MachineFunction &MF, 2266 RegScavenger *RS) const { 2267 // Reserve a slot closest to SP or frame pointer if we have a dynalloc or 2268 // a large stack, which will require scavenging a register to materialize a 2269 // large offset. 2270 2271 // We need to have a scavenger spill slot for spills if the frame size is 2272 // large. In case there is no free register for large-offset addressing, 2273 // this slot is used for the necessary emergency spill. Also, we need the 2274 // slot for dynamic stack allocations. 2275 2276 // The scavenger might be invoked if the frame offset does not fit into 2277 // the 16-bit immediate. We don't know the complete frame size here 2278 // because we've not yet computed callee-saved register spills or the 2279 // needed alignment padding. 2280 unsigned StackSize = determineFrameLayout(MF, true); 2281 MachineFrameInfo &MFI = MF.getFrameInfo(); 2282 if (MFI.hasVarSizedObjects() || spillsCR(MF) || hasNonRISpills(MF) || 2283 (hasSpills(MF) && !isInt<16>(StackSize))) { 2284 const TargetRegisterClass &GPRC = PPC::GPRCRegClass; 2285 const TargetRegisterClass &G8RC = PPC::G8RCRegClass; 2286 const TargetRegisterClass &RC = Subtarget.isPPC64() ? G8RC : GPRC; 2287 const TargetRegisterInfo &TRI = *Subtarget.getRegisterInfo(); 2288 unsigned Size = TRI.getSpillSize(RC); 2289 Align Alignment = TRI.getSpillAlign(RC); 2290 RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Alignment, false)); 2291 2292 // Might we have over-aligned allocas? 2293 bool HasAlVars = 2294 MFI.hasVarSizedObjects() && MFI.getMaxAlign() > getStackAlign(); 2295 2296 // These kinds of spills might need two registers. 2297 if (spillsCR(MF) || HasAlVars) 2298 RS->addScavengingFrameIndex( 2299 MFI.CreateStackObject(Size, Alignment, false)); 2300 } 2301 } 2302 2303 // This function checks if a callee saved gpr can be spilled to a volatile 2304 // vector register. This occurs for leaf functions when the option 2305 // ppc-enable-pe-vector-spills is enabled. If there are any remaining registers 2306 // which were not spilled to vectors, return false so the target independent 2307 // code can handle them by assigning a FrameIdx to a stack slot. 2308 bool PPCFrameLowering::assignCalleeSavedSpillSlots( 2309 MachineFunction &MF, const TargetRegisterInfo *TRI, 2310 std::vector<CalleeSavedInfo> &CSI) const { 2311 2312 if (CSI.empty()) 2313 return true; // Early exit if no callee saved registers are modified! 2314 2315 // Early exit if cannot spill gprs to volatile vector registers. 2316 MachineFrameInfo &MFI = MF.getFrameInfo(); 2317 if (!EnablePEVectorSpills || MFI.hasCalls() || !Subtarget.hasP9Vector()) 2318 return false; 2319 2320 // Build a BitVector of VSRs that can be used for spilling GPRs. 2321 BitVector BVAllocatable = TRI->getAllocatableSet(MF); 2322 BitVector BVCalleeSaved(TRI->getNumRegs()); 2323 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 2324 const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF); 2325 for (unsigned i = 0; CSRegs[i]; ++i) 2326 BVCalleeSaved.set(CSRegs[i]); 2327 2328 for (unsigned Reg : BVAllocatable.set_bits()) { 2329 // Set to 0 if the register is not a volatile VSX register, or if it is 2330 // used in the function. 2331 if (BVCalleeSaved[Reg] || !PPC::VSRCRegClass.contains(Reg) || 2332 MF.getRegInfo().isPhysRegUsed(Reg)) 2333 BVAllocatable.reset(Reg); 2334 } 2335 2336 bool AllSpilledToReg = true; 2337 unsigned LastVSRUsedForSpill = 0; 2338 for (auto &CS : CSI) { 2339 if (BVAllocatable.none()) 2340 return false; 2341 2342 Register Reg = CS.getReg(); 2343 2344 if (!PPC::G8RCRegClass.contains(Reg)) { 2345 AllSpilledToReg = false; 2346 continue; 2347 } 2348 2349 // For P9, we can reuse LastVSRUsedForSpill to spill two GPRs 2350 // into one VSR using the mtvsrdd instruction. 2351 if (LastVSRUsedForSpill != 0) { 2352 CS.setDstReg(LastVSRUsedForSpill); 2353 BVAllocatable.reset(LastVSRUsedForSpill); 2354 LastVSRUsedForSpill = 0; 2355 continue; 2356 } 2357 2358 unsigned VolatileVFReg = BVAllocatable.find_first(); 2359 if (VolatileVFReg < BVAllocatable.size()) { 2360 CS.setDstReg(VolatileVFReg); 2361 LastVSRUsedForSpill = VolatileVFReg; 2362 } else { 2363 AllSpilledToReg = false; 2364 } 2365 } 2366 return AllSpilledToReg; 2367 } 2368 2369 bool PPCFrameLowering::spillCalleeSavedRegisters( 2370 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 2371 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 2372 2373 MachineFunction *MF = MBB.getParent(); 2374 const PPCInstrInfo &TII = *Subtarget.getInstrInfo(); 2375 PPCFunctionInfo *FI = MF->getInfo<PPCFunctionInfo>(); 2376 bool MustSaveTOC = FI->mustSaveTOC(); 2377 DebugLoc DL; 2378 bool CRSpilled = false; 2379 MachineInstrBuilder CRMIB; 2380 BitVector Spilled(TRI->getNumRegs()); 2381 2382 VSRContainingGPRs.clear(); 2383 2384 // Map each VSR to GPRs to be spilled with into it. Single VSR can contain one 2385 // or two GPRs, so we need table to record information for later save/restore. 2386 llvm::for_each(CSI, [&](const CalleeSavedInfo &Info) { 2387 if (Info.isSpilledToReg()) { 2388 auto &SpilledVSR = 2389 VSRContainingGPRs.FindAndConstruct(Info.getDstReg()).second; 2390 assert(SpilledVSR.second == 0 && 2391 "Can't spill more than two GPRs into VSR!"); 2392 if (SpilledVSR.first == 0) 2393 SpilledVSR.first = Info.getReg(); 2394 else 2395 SpilledVSR.second = Info.getReg(); 2396 } 2397 }); 2398 2399 for (const CalleeSavedInfo &I : CSI) { 2400 Register Reg = I.getReg(); 2401 2402 // CR2 through CR4 are the nonvolatile CR fields. 2403 bool IsCRField = PPC::CR2 <= Reg && Reg <= PPC::CR4; 2404 2405 // Add the callee-saved register as live-in; it's killed at the spill. 2406 // Do not do this for callee-saved registers that are live-in to the 2407 // function because they will already be marked live-in and this will be 2408 // adding it for a second time. It is an error to add the same register 2409 // to the set more than once. 2410 const MachineRegisterInfo &MRI = MF->getRegInfo(); 2411 bool IsLiveIn = MRI.isLiveIn(Reg); 2412 if (!IsLiveIn) 2413 MBB.addLiveIn(Reg); 2414 2415 if (CRSpilled && IsCRField) { 2416 CRMIB.addReg(Reg, RegState::ImplicitKill); 2417 continue; 2418 } 2419 2420 // The actual spill will happen in the prologue. 2421 if ((Reg == PPC::X2 || Reg == PPC::R2) && MustSaveTOC) 2422 continue; 2423 2424 // Insert the spill to the stack frame. 2425 if (IsCRField) { 2426 PPCFunctionInfo *FuncInfo = MF->getInfo<PPCFunctionInfo>(); 2427 if (!Subtarget.is32BitELFABI()) { 2428 // The actual spill will happen at the start of the prologue. 2429 FuncInfo->addMustSaveCR(Reg); 2430 } else { 2431 CRSpilled = true; 2432 FuncInfo->setSpillsCR(); 2433 2434 // 32-bit: FP-relative. Note that we made sure CR2-CR4 all have 2435 // the same frame index in PPCRegisterInfo::hasReservedSpillSlot. 2436 CRMIB = BuildMI(*MF, DL, TII.get(PPC::MFCR), PPC::R12) 2437 .addReg(Reg, RegState::ImplicitKill); 2438 2439 MBB.insert(MI, CRMIB); 2440 MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::STW)) 2441 .addReg(PPC::R12, 2442 getKillRegState(true)), 2443 I.getFrameIdx())); 2444 } 2445 } else { 2446 if (I.isSpilledToReg()) { 2447 unsigned Dst = I.getDstReg(); 2448 2449 if (Spilled[Dst]) 2450 continue; 2451 2452 if (VSRContainingGPRs[Dst].second != 0) { 2453 assert(Subtarget.hasP9Vector() && 2454 "mtvsrdd is unavailable on pre-P9 targets."); 2455 2456 NumPESpillVSR += 2; 2457 BuildMI(MBB, MI, DL, TII.get(PPC::MTVSRDD), Dst) 2458 .addReg(VSRContainingGPRs[Dst].first, getKillRegState(true)) 2459 .addReg(VSRContainingGPRs[Dst].second, getKillRegState(true)); 2460 } else if (VSRContainingGPRs[Dst].second == 0) { 2461 assert(Subtarget.hasP8Vector() && 2462 "Can't move GPR to VSR on pre-P8 targets."); 2463 2464 ++NumPESpillVSR; 2465 BuildMI(MBB, MI, DL, TII.get(PPC::MTVSRD), 2466 TRI->getSubReg(Dst, PPC::sub_64)) 2467 .addReg(VSRContainingGPRs[Dst].first, getKillRegState(true)); 2468 } else { 2469 llvm_unreachable("More than two GPRs spilled to a VSR!"); 2470 } 2471 Spilled.set(Dst); 2472 } else { 2473 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 2474 // Use !IsLiveIn for the kill flag. 2475 // We do not want to kill registers that are live in this function 2476 // before their use because they will become undefined registers. 2477 // Functions without NoUnwind need to preserve the order of elements in 2478 // saved vector registers. 2479 if (Subtarget.needsSwapsForVSXMemOps() && 2480 !MF->getFunction().hasFnAttribute(Attribute::NoUnwind)) 2481 TII.storeRegToStackSlotNoUpd(MBB, MI, Reg, !IsLiveIn, 2482 I.getFrameIdx(), RC, TRI); 2483 else 2484 TII.storeRegToStackSlot(MBB, MI, Reg, !IsLiveIn, I.getFrameIdx(), 2485 RC, TRI); 2486 } 2487 } 2488 } 2489 return true; 2490 } 2491 2492 static void restoreCRs(bool is31, bool CR2Spilled, bool CR3Spilled, 2493 bool CR4Spilled, MachineBasicBlock &MBB, 2494 MachineBasicBlock::iterator MI, 2495 ArrayRef<CalleeSavedInfo> CSI, unsigned CSIIndex) { 2496 2497 MachineFunction *MF = MBB.getParent(); 2498 const PPCInstrInfo &TII = *MF->getSubtarget<PPCSubtarget>().getInstrInfo(); 2499 DebugLoc DL; 2500 unsigned MoveReg = PPC::R12; 2501 2502 // 32-bit: FP-relative 2503 MBB.insert(MI, 2504 addFrameReference(BuildMI(*MF, DL, TII.get(PPC::LWZ), MoveReg), 2505 CSI[CSIIndex].getFrameIdx())); 2506 2507 unsigned RestoreOp = PPC::MTOCRF; 2508 if (CR2Spilled) 2509 MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR2) 2510 .addReg(MoveReg, getKillRegState(!CR3Spilled && !CR4Spilled))); 2511 2512 if (CR3Spilled) 2513 MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR3) 2514 .addReg(MoveReg, getKillRegState(!CR4Spilled))); 2515 2516 if (CR4Spilled) 2517 MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR4) 2518 .addReg(MoveReg, getKillRegState(true))); 2519 } 2520 2521 MachineBasicBlock::iterator PPCFrameLowering:: 2522 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 2523 MachineBasicBlock::iterator I) const { 2524 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 2525 if (MF.getTarget().Options.GuaranteedTailCallOpt && 2526 I->getOpcode() == PPC::ADJCALLSTACKUP) { 2527 // Add (actually subtract) back the amount the callee popped on return. 2528 if (int CalleeAmt = I->getOperand(1).getImm()) { 2529 bool is64Bit = Subtarget.isPPC64(); 2530 CalleeAmt *= -1; 2531 unsigned StackReg = is64Bit ? PPC::X1 : PPC::R1; 2532 unsigned TmpReg = is64Bit ? PPC::X0 : PPC::R0; 2533 unsigned ADDIInstr = is64Bit ? PPC::ADDI8 : PPC::ADDI; 2534 unsigned ADDInstr = is64Bit ? PPC::ADD8 : PPC::ADD4; 2535 unsigned LISInstr = is64Bit ? PPC::LIS8 : PPC::LIS; 2536 unsigned ORIInstr = is64Bit ? PPC::ORI8 : PPC::ORI; 2537 const DebugLoc &dl = I->getDebugLoc(); 2538 2539 if (isInt<16>(CalleeAmt)) { 2540 BuildMI(MBB, I, dl, TII.get(ADDIInstr), StackReg) 2541 .addReg(StackReg, RegState::Kill) 2542 .addImm(CalleeAmt); 2543 } else { 2544 MachineBasicBlock::iterator MBBI = I; 2545 BuildMI(MBB, MBBI, dl, TII.get(LISInstr), TmpReg) 2546 .addImm(CalleeAmt >> 16); 2547 BuildMI(MBB, MBBI, dl, TII.get(ORIInstr), TmpReg) 2548 .addReg(TmpReg, RegState::Kill) 2549 .addImm(CalleeAmt & 0xFFFF); 2550 BuildMI(MBB, MBBI, dl, TII.get(ADDInstr), StackReg) 2551 .addReg(StackReg, RegState::Kill) 2552 .addReg(TmpReg); 2553 } 2554 } 2555 } 2556 // Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions. 2557 return MBB.erase(I); 2558 } 2559 2560 static bool isCalleeSavedCR(unsigned Reg) { 2561 return PPC::CR2 == Reg || Reg == PPC::CR3 || Reg == PPC::CR4; 2562 } 2563 2564 bool PPCFrameLowering::restoreCalleeSavedRegisters( 2565 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 2566 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 2567 MachineFunction *MF = MBB.getParent(); 2568 const PPCInstrInfo &TII = *Subtarget.getInstrInfo(); 2569 PPCFunctionInfo *FI = MF->getInfo<PPCFunctionInfo>(); 2570 bool MustSaveTOC = FI->mustSaveTOC(); 2571 bool CR2Spilled = false; 2572 bool CR3Spilled = false; 2573 bool CR4Spilled = false; 2574 unsigned CSIIndex = 0; 2575 BitVector Restored(TRI->getNumRegs()); 2576 2577 // Initialize insertion-point logic; we will be restoring in reverse 2578 // order of spill. 2579 MachineBasicBlock::iterator I = MI, BeforeI = I; 2580 bool AtStart = I == MBB.begin(); 2581 2582 if (!AtStart) 2583 --BeforeI; 2584 2585 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 2586 Register Reg = CSI[i].getReg(); 2587 2588 if ((Reg == PPC::X2 || Reg == PPC::R2) && MustSaveTOC) 2589 continue; 2590 2591 // Restore of callee saved condition register field is handled during 2592 // epilogue insertion. 2593 if (isCalleeSavedCR(Reg) && !Subtarget.is32BitELFABI()) 2594 continue; 2595 2596 if (Reg == PPC::CR2) { 2597 CR2Spilled = true; 2598 // The spill slot is associated only with CR2, which is the 2599 // first nonvolatile spilled. Save it here. 2600 CSIIndex = i; 2601 continue; 2602 } else if (Reg == PPC::CR3) { 2603 CR3Spilled = true; 2604 continue; 2605 } else if (Reg == PPC::CR4) { 2606 CR4Spilled = true; 2607 continue; 2608 } else { 2609 // On 32-bit ELF when we first encounter a non-CR register after seeing at 2610 // least one CR register, restore all spilled CRs together. 2611 if (CR2Spilled || CR3Spilled || CR4Spilled) { 2612 bool is31 = needsFP(*MF); 2613 restoreCRs(is31, CR2Spilled, CR3Spilled, CR4Spilled, MBB, I, CSI, 2614 CSIIndex); 2615 CR2Spilled = CR3Spilled = CR4Spilled = false; 2616 } 2617 2618 if (CSI[i].isSpilledToReg()) { 2619 DebugLoc DL; 2620 unsigned Dst = CSI[i].getDstReg(); 2621 2622 if (Restored[Dst]) 2623 continue; 2624 2625 if (VSRContainingGPRs[Dst].second != 0) { 2626 assert(Subtarget.hasP9Vector()); 2627 NumPEReloadVSR += 2; 2628 BuildMI(MBB, I, DL, TII.get(PPC::MFVSRLD), 2629 VSRContainingGPRs[Dst].second) 2630 .addReg(Dst); 2631 BuildMI(MBB, I, DL, TII.get(PPC::MFVSRD), 2632 VSRContainingGPRs[Dst].first) 2633 .addReg(TRI->getSubReg(Dst, PPC::sub_64), getKillRegState(true)); 2634 } else if (VSRContainingGPRs[Dst].second == 0) { 2635 assert(Subtarget.hasP8Vector()); 2636 ++NumPEReloadVSR; 2637 BuildMI(MBB, I, DL, TII.get(PPC::MFVSRD), 2638 VSRContainingGPRs[Dst].first) 2639 .addReg(TRI->getSubReg(Dst, PPC::sub_64), getKillRegState(true)); 2640 } else { 2641 llvm_unreachable("More than two GPRs spilled to a VSR!"); 2642 } 2643 2644 Restored.set(Dst); 2645 2646 } else { 2647 // Default behavior for non-CR saves. 2648 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 2649 2650 // Functions without NoUnwind need to preserve the order of elements in 2651 // saved vector registers. 2652 if (Subtarget.needsSwapsForVSXMemOps() && 2653 !MF->getFunction().hasFnAttribute(Attribute::NoUnwind)) 2654 TII.loadRegFromStackSlotNoUpd(MBB, I, Reg, CSI[i].getFrameIdx(), RC, 2655 TRI); 2656 else 2657 TII.loadRegFromStackSlot(MBB, I, Reg, CSI[i].getFrameIdx(), RC, TRI); 2658 2659 assert(I != MBB.begin() && 2660 "loadRegFromStackSlot didn't insert any code!"); 2661 } 2662 } 2663 2664 // Insert in reverse order. 2665 if (AtStart) 2666 I = MBB.begin(); 2667 else { 2668 I = BeforeI; 2669 ++I; 2670 } 2671 } 2672 2673 // If we haven't yet spilled the CRs, do so now. 2674 if (CR2Spilled || CR3Spilled || CR4Spilled) { 2675 assert(Subtarget.is32BitELFABI() && 2676 "Only set CR[2|3|4]Spilled on 32-bit SVR4."); 2677 bool is31 = needsFP(*MF); 2678 restoreCRs(is31, CR2Spilled, CR3Spilled, CR4Spilled, MBB, I, CSI, CSIIndex); 2679 } 2680 2681 return true; 2682 } 2683 2684 uint64_t PPCFrameLowering::getTOCSaveOffset() const { 2685 return TOCSaveOffset; 2686 } 2687 2688 uint64_t PPCFrameLowering::getFramePointerSaveOffset() const { 2689 return FramePointerSaveOffset; 2690 } 2691 2692 uint64_t PPCFrameLowering::getBasePointerSaveOffset() const { 2693 return BasePointerSaveOffset; 2694 } 2695 2696 bool PPCFrameLowering::enableShrinkWrapping(const MachineFunction &MF) const { 2697 if (MF.getInfo<PPCFunctionInfo>()->shrinkWrapDisabled()) 2698 return false; 2699 return !MF.getSubtarget<PPCSubtarget>().is32BitELFABI(); 2700 } 2701