1 //===- AArch64FrameLowering.cpp - AArch64 Frame Lowering -------*- C++ -*-====// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the AArch64 implementation of TargetFrameLowering class. 10 // 11 // On AArch64, stack frames are structured as follows: 12 // 13 // The stack grows downward. 14 // 15 // All of the individual frame areas on the frame below are optional, i.e. it's 16 // possible to create a function so that the particular area isn't present 17 // in the frame. 18 // 19 // At function entry, the "frame" looks as follows: 20 // 21 // | | Higher address 22 // |-----------------------------------| 23 // | | 24 // | arguments passed on the stack | 25 // | | 26 // |-----------------------------------| <- sp 27 // | | Lower address 28 // 29 // 30 // After the prologue has run, the frame has the following general structure. 31 // Note that this doesn't depict the case where a red-zone is used. Also, 32 // technically the last frame area (VLAs) doesn't get created until in the 33 // main function body, after the prologue is run. However, it's depicted here 34 // for completeness. 35 // 36 // | | Higher address 37 // |-----------------------------------| 38 // | | 39 // | arguments passed on the stack | 40 // | | 41 // |-----------------------------------| 42 // | | 43 // | (Win64 only) varargs from reg | 44 // | | 45 // |-----------------------------------| 46 // | | 47 // | callee-saved gpr registers | <--. 48 // | | | On Darwin platforms these 49 // |- - - - - - - - - - - - - - - - - -| | callee saves are swapped, 50 // | prev_lr | | (frame record first) 51 // | prev_fp | <--' 52 // | async context if needed | 53 // | (a.k.a. "frame record") | 54 // |-----------------------------------| <- fp(=x29) 55 // | | 56 // | callee-saved fp/simd/SVE regs | 57 // | | 58 // |-----------------------------------| 59 // | | 60 // | SVE stack objects | 61 // | | 62 // |-----------------------------------| 63 // |.empty.space.to.make.part.below....| 64 // |.aligned.in.case.it.needs.more.than| (size of this area is unknown at 65 // |.the.standard.16-byte.alignment....| compile time; if present) 66 // |-----------------------------------| 67 // | | 68 // | local variables of fixed size | 69 // | including spill slots | 70 // |-----------------------------------| <- bp(not defined by ABI, 71 // |.variable-sized.local.variables....| LLVM chooses X19) 72 // |.(VLAs)............................| (size of this area is unknown at 73 // |...................................| compile time) 74 // |-----------------------------------| <- sp 75 // | | Lower address 76 // 77 // 78 // To access the data in a frame, at-compile time, a constant offset must be 79 // computable from one of the pointers (fp, bp, sp) to access it. The size 80 // of the areas with a dotted background cannot be computed at compile-time 81 // if they are present, making it required to have all three of fp, bp and 82 // sp to be set up to be able to access all contents in the frame areas, 83 // assuming all of the frame areas are non-empty. 84 // 85 // For most functions, some of the frame areas are empty. For those functions, 86 // it may not be necessary to set up fp or bp: 87 // * A base pointer is definitely needed when there are both VLAs and local 88 // variables with more-than-default alignment requirements. 89 // * A frame pointer is definitely needed when there are local variables with 90 // more-than-default alignment requirements. 91 // 92 // For Darwin platforms the frame-record (fp, lr) is stored at the top of the 93 // callee-saved area, since the unwind encoding does not allow for encoding 94 // this dynamically and existing tools depend on this layout. For other 95 // platforms, the frame-record is stored at the bottom of the (gpr) callee-saved 96 // area to allow SVE stack objects (allocated directly below the callee-saves, 97 // if available) to be accessed directly from the framepointer. 98 // The SVE spill/fill instructions have VL-scaled addressing modes such 99 // as: 100 // ldr z8, [fp, #-7 mul vl] 101 // For SVE the size of the vector length (VL) is not known at compile-time, so 102 // '#-7 mul vl' is an offset that can only be evaluated at runtime. With this 103 // layout, we don't need to add an unscaled offset to the framepointer before 104 // accessing the SVE object in the frame. 105 // 106 // In some cases when a base pointer is not strictly needed, it is generated 107 // anyway when offsets from the frame pointer to access local variables become 108 // so large that the offset can't be encoded in the immediate fields of loads 109 // or stores. 110 // 111 // Outgoing function arguments must be at the bottom of the stack frame when 112 // calling another function. If we do not have variable-sized stack objects, we 113 // can allocate a "reserved call frame" area at the bottom of the local 114 // variable area, large enough for all outgoing calls. If we do have VLAs, then 115 // the stack pointer must be decremented and incremented around each call to 116 // make space for the arguments below the VLAs. 117 // 118 // FIXME: also explain the redzone concept. 119 // 120 //===----------------------------------------------------------------------===// 121 122 #include "AArch64FrameLowering.h" 123 #include "AArch64InstrInfo.h" 124 #include "AArch64MachineFunctionInfo.h" 125 #include "AArch64RegisterInfo.h" 126 #include "AArch64Subtarget.h" 127 #include "AArch64TargetMachine.h" 128 #include "MCTargetDesc/AArch64AddressingModes.h" 129 #include "llvm/ADT/ScopeExit.h" 130 #include "llvm/ADT/SmallVector.h" 131 #include "llvm/ADT/Statistic.h" 132 #include "llvm/CodeGen/LivePhysRegs.h" 133 #include "llvm/CodeGen/MachineBasicBlock.h" 134 #include "llvm/CodeGen/MachineFrameInfo.h" 135 #include "llvm/CodeGen/MachineFunction.h" 136 #include "llvm/CodeGen/MachineInstr.h" 137 #include "llvm/CodeGen/MachineInstrBuilder.h" 138 #include "llvm/CodeGen/MachineMemOperand.h" 139 #include "llvm/CodeGen/MachineModuleInfo.h" 140 #include "llvm/CodeGen/MachineOperand.h" 141 #include "llvm/CodeGen/MachineRegisterInfo.h" 142 #include "llvm/CodeGen/RegisterScavenging.h" 143 #include "llvm/CodeGen/TargetInstrInfo.h" 144 #include "llvm/CodeGen/TargetRegisterInfo.h" 145 #include "llvm/CodeGen/TargetSubtargetInfo.h" 146 #include "llvm/CodeGen/WinEHFuncInfo.h" 147 #include "llvm/IR/Attributes.h" 148 #include "llvm/IR/CallingConv.h" 149 #include "llvm/IR/DataLayout.h" 150 #include "llvm/IR/DebugLoc.h" 151 #include "llvm/IR/Function.h" 152 #include "llvm/MC/MCAsmInfo.h" 153 #include "llvm/MC/MCDwarf.h" 154 #include "llvm/Support/CommandLine.h" 155 #include "llvm/Support/Debug.h" 156 #include "llvm/Support/ErrorHandling.h" 157 #include "llvm/Support/LEB128.h" 158 #include "llvm/Support/MathExtras.h" 159 #include "llvm/Support/raw_ostream.h" 160 #include "llvm/Target/TargetMachine.h" 161 #include "llvm/Target/TargetOptions.h" 162 #include <cassert> 163 #include <cstdint> 164 #include <iterator> 165 #include <vector> 166 167 using namespace llvm; 168 169 #define DEBUG_TYPE "frame-info" 170 171 static cl::opt<bool> EnableRedZone("aarch64-redzone", 172 cl::desc("enable use of redzone on AArch64"), 173 cl::init(false), cl::Hidden); 174 175 static cl::opt<bool> 176 ReverseCSRRestoreSeq("reverse-csr-restore-seq", 177 cl::desc("reverse the CSR restore sequence"), 178 cl::init(false), cl::Hidden); 179 180 static cl::opt<bool> StackTaggingMergeSetTag( 181 "stack-tagging-merge-settag", 182 cl::desc("merge settag instruction in function epilog"), cl::init(true), 183 cl::Hidden); 184 185 static cl::opt<bool> OrderFrameObjects("aarch64-order-frame-objects", 186 cl::desc("sort stack allocations"), 187 cl::init(true), cl::Hidden); 188 189 cl::opt<bool> EnableHomogeneousPrologEpilog( 190 "homogeneous-prolog-epilog", cl::init(false), cl::ZeroOrMore, cl::Hidden, 191 cl::desc("Emit homogeneous prologue and epilogue for the size " 192 "optimization (default = off)")); 193 194 STATISTIC(NumRedZoneFunctions, "Number of functions using red zone"); 195 196 /// Returns how much of the incoming argument stack area (in bytes) we should 197 /// clean up in an epilogue. For the C calling convention this will be 0, for 198 /// guaranteed tail call conventions it can be positive (a normal return or a 199 /// tail call to a function that uses less stack space for arguments) or 200 /// negative (for a tail call to a function that needs more stack space than us 201 /// for arguments). 202 static int64_t getArgumentStackToRestore(MachineFunction &MF, 203 MachineBasicBlock &MBB) { 204 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 205 bool IsTailCallReturn = false; 206 if (MBB.end() != MBBI) { 207 unsigned RetOpcode = MBBI->getOpcode(); 208 IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi || 209 RetOpcode == AArch64::TCRETURNri || 210 RetOpcode == AArch64::TCRETURNriBTI; 211 } 212 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 213 214 int64_t ArgumentPopSize = 0; 215 if (IsTailCallReturn) { 216 MachineOperand &StackAdjust = MBBI->getOperand(1); 217 218 // For a tail-call in a callee-pops-arguments environment, some or all of 219 // the stack may actually be in use for the call's arguments, this is 220 // calculated during LowerCall and consumed here... 221 ArgumentPopSize = StackAdjust.getImm(); 222 } else { 223 // ... otherwise the amount to pop is *all* of the argument space, 224 // conveniently stored in the MachineFunctionInfo by 225 // LowerFormalArguments. This will, of course, be zero for the C calling 226 // convention. 227 ArgumentPopSize = AFI->getArgumentStackToRestore(); 228 } 229 230 return ArgumentPopSize; 231 } 232 233 static bool produceCompactUnwindFrame(MachineFunction &MF); 234 static bool needsWinCFI(const MachineFunction &MF); 235 static StackOffset getSVEStackSize(const MachineFunction &MF); 236 237 /// Returns true if a homogeneous prolog or epilog code can be emitted 238 /// for the size optimization. If possible, a frame helper call is injected. 239 /// When Exit block is given, this check is for epilog. 240 bool AArch64FrameLowering::homogeneousPrologEpilog( 241 MachineFunction &MF, MachineBasicBlock *Exit) const { 242 if (!MF.getFunction().hasMinSize()) 243 return false; 244 if (!EnableHomogeneousPrologEpilog) 245 return false; 246 if (ReverseCSRRestoreSeq) 247 return false; 248 if (EnableRedZone) 249 return false; 250 251 // TODO: Window is supported yet. 252 if (needsWinCFI(MF)) 253 return false; 254 // TODO: SVE is not supported yet. 255 if (getSVEStackSize(MF)) 256 return false; 257 258 // Bail on stack adjustment needed on return for simplicity. 259 const MachineFrameInfo &MFI = MF.getFrameInfo(); 260 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 261 if (MFI.hasVarSizedObjects() || RegInfo->hasStackRealignment(MF)) 262 return false; 263 if (Exit && getArgumentStackToRestore(MF, *Exit)) 264 return false; 265 266 return true; 267 } 268 269 /// Returns true if CSRs should be paired. 270 bool AArch64FrameLowering::producePairRegisters(MachineFunction &MF) const { 271 return produceCompactUnwindFrame(MF) || homogeneousPrologEpilog(MF); 272 } 273 274 /// This is the biggest offset to the stack pointer we can encode in aarch64 275 /// instructions (without using a separate calculation and a temp register). 276 /// Note that the exception here are vector stores/loads which cannot encode any 277 /// displacements (see estimateRSStackSizeLimit(), isAArch64FrameOffsetLegal()). 278 static const unsigned DefaultSafeSPDisplacement = 255; 279 280 /// Look at each instruction that references stack frames and return the stack 281 /// size limit beyond which some of these instructions will require a scratch 282 /// register during their expansion later. 283 static unsigned estimateRSStackSizeLimit(MachineFunction &MF) { 284 // FIXME: For now, just conservatively guestimate based on unscaled indexing 285 // range. We'll end up allocating an unnecessary spill slot a lot, but 286 // realistically that's not a big deal at this stage of the game. 287 for (MachineBasicBlock &MBB : MF) { 288 for (MachineInstr &MI : MBB) { 289 if (MI.isDebugInstr() || MI.isPseudo() || 290 MI.getOpcode() == AArch64::ADDXri || 291 MI.getOpcode() == AArch64::ADDSXri) 292 continue; 293 294 for (const MachineOperand &MO : MI.operands()) { 295 if (!MO.isFI()) 296 continue; 297 298 StackOffset Offset; 299 if (isAArch64FrameOffsetLegal(MI, Offset, nullptr, nullptr, nullptr) == 300 AArch64FrameOffsetCannotUpdate) 301 return 0; 302 } 303 } 304 } 305 return DefaultSafeSPDisplacement; 306 } 307 308 TargetStackID::Value 309 AArch64FrameLowering::getStackIDForScalableVectors() const { 310 return TargetStackID::ScalableVector; 311 } 312 313 /// Returns the size of the fixed object area (allocated next to sp on entry) 314 /// On Win64 this may include a var args area and an UnwindHelp object for EH. 315 static unsigned getFixedObjectSize(const MachineFunction &MF, 316 const AArch64FunctionInfo *AFI, bool IsWin64, 317 bool IsFunclet) { 318 if (!IsWin64 || IsFunclet) { 319 return AFI->getTailCallReservedStack(); 320 } else { 321 if (AFI->getTailCallReservedStack() != 0) 322 report_fatal_error("cannot generate ABI-changing tail call for Win64"); 323 // Var args are stored here in the primary function. 324 const unsigned VarArgsArea = AFI->getVarArgsGPRSize(); 325 // To support EH funclets we allocate an UnwindHelp object 326 const unsigned UnwindHelpObject = (MF.hasEHFunclets() ? 8 : 0); 327 return alignTo(VarArgsArea + UnwindHelpObject, 16); 328 } 329 } 330 331 /// Returns the size of the entire SVE stackframe (calleesaves + spills). 332 static StackOffset getSVEStackSize(const MachineFunction &MF) { 333 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 334 return StackOffset::getScalable((int64_t)AFI->getStackSizeSVE()); 335 } 336 337 bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { 338 if (!EnableRedZone) 339 return false; 340 341 // Don't use the red zone if the function explicitly asks us not to. 342 // This is typically used for kernel code. 343 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 344 const unsigned RedZoneSize = 345 Subtarget.getTargetLowering()->getRedZoneSize(MF.getFunction()); 346 if (!RedZoneSize) 347 return false; 348 349 const MachineFrameInfo &MFI = MF.getFrameInfo(); 350 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 351 uint64_t NumBytes = AFI->getLocalStackSize(); 352 353 return !(MFI.hasCalls() || hasFP(MF) || NumBytes > RedZoneSize || 354 getSVEStackSize(MF)); 355 } 356 357 /// hasFP - Return true if the specified function should have a dedicated frame 358 /// pointer register. 359 bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const { 360 const MachineFrameInfo &MFI = MF.getFrameInfo(); 361 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 362 // Win64 EH requires a frame pointer if funclets are present, as the locals 363 // are accessed off the frame pointer in both the parent function and the 364 // funclets. 365 if (MF.hasEHFunclets()) 366 return true; 367 // Retain behavior of always omitting the FP for leaf functions when possible. 368 if (MF.getTarget().Options.DisableFramePointerElim(MF)) 369 return true; 370 if (MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || 371 MFI.hasStackMap() || MFI.hasPatchPoint() || 372 RegInfo->hasStackRealignment(MF)) 373 return true; 374 // With large callframes around we may need to use FP to access the scavenging 375 // emergency spillslot. 376 // 377 // Unfortunately some calls to hasFP() like machine verifier -> 378 // getReservedReg() -> hasFP in the middle of global isel are too early 379 // to know the max call frame size. Hopefully conservatively returning "true" 380 // in those cases is fine. 381 // DefaultSafeSPDisplacement is fine as we only emergency spill GP regs. 382 if (!MFI.isMaxCallFrameSizeComputed() || 383 MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement) 384 return true; 385 386 return false; 387 } 388 389 /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is 390 /// not required, we reserve argument space for call sites in the function 391 /// immediately on entry to the current function. This eliminates the need for 392 /// add/sub sp brackets around call sites. Returns true if the call frame is 393 /// included as part of the stack frame. 394 bool 395 AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 396 return !MF.getFrameInfo().hasVarSizedObjects(); 397 } 398 399 MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr( 400 MachineFunction &MF, MachineBasicBlock &MBB, 401 MachineBasicBlock::iterator I) const { 402 const AArch64InstrInfo *TII = 403 static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo()); 404 DebugLoc DL = I->getDebugLoc(); 405 unsigned Opc = I->getOpcode(); 406 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 407 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 408 409 if (!hasReservedCallFrame(MF)) { 410 int64_t Amount = I->getOperand(0).getImm(); 411 Amount = alignTo(Amount, getStackAlign()); 412 if (!IsDestroy) 413 Amount = -Amount; 414 415 // N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it 416 // doesn't have to pop anything), then the first operand will be zero too so 417 // this adjustment is a no-op. 418 if (CalleePopAmount == 0) { 419 // FIXME: in-function stack adjustment for calls is limited to 24-bits 420 // because there's no guaranteed temporary register available. 421 // 422 // ADD/SUB (immediate) has only LSL #0 and LSL #12 available. 423 // 1) For offset <= 12-bit, we use LSL #0 424 // 2) For 12-bit <= offset <= 24-bit, we use two instructions. One uses 425 // LSL #0, and the other uses LSL #12. 426 // 427 // Most call frames will be allocated at the start of a function so 428 // this is OK, but it is a limitation that needs dealing with. 429 assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large"); 430 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, 431 StackOffset::getFixed(Amount), TII); 432 } 433 } else if (CalleePopAmount != 0) { 434 // If the calling convention demands that the callee pops arguments from the 435 // stack, we want to add it back if we have a reserved call frame. 436 assert(CalleePopAmount < 0xffffff && "call frame too large"); 437 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, 438 StackOffset::getFixed(-(int64_t)CalleePopAmount), TII); 439 } 440 return MBB.erase(I); 441 } 442 443 // Convenience function to create a DWARF expression for 444 // Expr + NumBytes + NumVGScaledBytes * AArch64::VG 445 static void appendVGScaledOffsetExpr(SmallVectorImpl<char> &Expr, 446 int NumBytes, int NumVGScaledBytes, unsigned VG, 447 llvm::raw_string_ostream &Comment) { 448 uint8_t buffer[16]; 449 450 if (NumBytes) { 451 Expr.push_back(dwarf::DW_OP_consts); 452 Expr.append(buffer, buffer + encodeSLEB128(NumBytes, buffer)); 453 Expr.push_back((uint8_t)dwarf::DW_OP_plus); 454 Comment << (NumBytes < 0 ? " - " : " + ") << std::abs(NumBytes); 455 } 456 457 if (NumVGScaledBytes) { 458 Expr.push_back((uint8_t)dwarf::DW_OP_consts); 459 Expr.append(buffer, buffer + encodeSLEB128(NumVGScaledBytes, buffer)); 460 461 Expr.push_back((uint8_t)dwarf::DW_OP_bregx); 462 Expr.append(buffer, buffer + encodeULEB128(VG, buffer)); 463 Expr.push_back(0); 464 465 Expr.push_back((uint8_t)dwarf::DW_OP_mul); 466 Expr.push_back((uint8_t)dwarf::DW_OP_plus); 467 468 Comment << (NumVGScaledBytes < 0 ? " - " : " + ") 469 << std::abs(NumVGScaledBytes) << " * VG"; 470 } 471 } 472 473 // Creates an MCCFIInstruction: 474 // { DW_CFA_def_cfa_expression, ULEB128 (sizeof expr), expr } 475 MCCFIInstruction AArch64FrameLowering::createDefCFAExpressionFromSP( 476 const TargetRegisterInfo &TRI, const StackOffset &OffsetFromSP) const { 477 int64_t NumBytes, NumVGScaledBytes; 478 AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets(OffsetFromSP, NumBytes, 479 NumVGScaledBytes); 480 481 std::string CommentBuffer = "sp"; 482 llvm::raw_string_ostream Comment(CommentBuffer); 483 484 // Build up the expression (SP + NumBytes + NumVGScaledBytes * AArch64::VG) 485 SmallString<64> Expr; 486 Expr.push_back((uint8_t)(dwarf::DW_OP_breg0 + /*SP*/ 31)); 487 Expr.push_back(0); 488 appendVGScaledOffsetExpr(Expr, NumBytes, NumVGScaledBytes, 489 TRI.getDwarfRegNum(AArch64::VG, true), Comment); 490 491 // Wrap this into DW_CFA_def_cfa. 492 SmallString<64> DefCfaExpr; 493 DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression); 494 uint8_t buffer[16]; 495 DefCfaExpr.append(buffer, 496 buffer + encodeULEB128(Expr.size(), buffer)); 497 DefCfaExpr.append(Expr.str()); 498 return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(), 499 Comment.str()); 500 } 501 502 MCCFIInstruction AArch64FrameLowering::createCfaOffset( 503 const TargetRegisterInfo &TRI, unsigned Reg, 504 const StackOffset &OffsetFromDefCFA) const { 505 int64_t NumBytes, NumVGScaledBytes; 506 AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets( 507 OffsetFromDefCFA, NumBytes, NumVGScaledBytes); 508 509 unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true); 510 511 // Non-scalable offsets can use DW_CFA_offset directly. 512 if (!NumVGScaledBytes) 513 return MCCFIInstruction::createOffset(nullptr, DwarfReg, NumBytes); 514 515 std::string CommentBuffer; 516 llvm::raw_string_ostream Comment(CommentBuffer); 517 Comment << printReg(Reg, &TRI) << " @ cfa"; 518 519 // Build up expression (NumBytes + NumVGScaledBytes * AArch64::VG) 520 SmallString<64> OffsetExpr; 521 appendVGScaledOffsetExpr(OffsetExpr, NumBytes, NumVGScaledBytes, 522 TRI.getDwarfRegNum(AArch64::VG, true), Comment); 523 524 // Wrap this into DW_CFA_expression 525 SmallString<64> CfaExpr; 526 CfaExpr.push_back(dwarf::DW_CFA_expression); 527 uint8_t buffer[16]; 528 CfaExpr.append(buffer, buffer + encodeULEB128(DwarfReg, buffer)); 529 CfaExpr.append(buffer, buffer + encodeULEB128(OffsetExpr.size(), buffer)); 530 CfaExpr.append(OffsetExpr.str()); 531 532 return MCCFIInstruction::createEscape(nullptr, CfaExpr.str(), Comment.str()); 533 } 534 535 void AArch64FrameLowering::emitCalleeSavedFrameMoves( 536 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 537 MachineFunction &MF = *MBB.getParent(); 538 MachineFrameInfo &MFI = MF.getFrameInfo(); 539 const TargetSubtargetInfo &STI = MF.getSubtarget(); 540 const TargetRegisterInfo *TRI = STI.getRegisterInfo(); 541 const TargetInstrInfo *TII = STI.getInstrInfo(); 542 DebugLoc DL = MBB.findDebugLoc(MBBI); 543 544 // Add callee saved registers to move list. 545 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 546 if (CSI.empty()) 547 return; 548 549 for (const auto &Info : CSI) { 550 Register Reg = Info.getReg(); 551 552 // Not all unwinders may know about SVE registers, so assume the lowest 553 // common demoninator. 554 unsigned NewReg; 555 if (static_cast<const AArch64RegisterInfo *>(TRI)->regNeedsCFI(Reg, NewReg)) 556 Reg = NewReg; 557 else 558 continue; 559 560 StackOffset Offset; 561 if (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector) { 562 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 563 Offset = 564 StackOffset::getScalable(MFI.getObjectOffset(Info.getFrameIdx())) - 565 StackOffset::getFixed(AFI->getCalleeSavedStackSize(MFI)); 566 } else { 567 Offset = StackOffset::getFixed(MFI.getObjectOffset(Info.getFrameIdx()) - 568 getOffsetOfLocalArea()); 569 } 570 unsigned CFIIndex = MF.addFrameInst(createCfaOffset(*TRI, Reg, Offset)); 571 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 572 .addCFIIndex(CFIIndex) 573 .setMIFlags(MachineInstr::FrameSetup); 574 } 575 } 576 577 // Find a scratch register that we can use at the start of the prologue to 578 // re-align the stack pointer. We avoid using callee-save registers since they 579 // may appear to be free when this is called from canUseAsPrologue (during 580 // shrink wrapping), but then no longer be free when this is called from 581 // emitPrologue. 582 // 583 // FIXME: This is a bit conservative, since in the above case we could use one 584 // of the callee-save registers as a scratch temp to re-align the stack pointer, 585 // but we would then have to make sure that we were in fact saving at least one 586 // callee-save register in the prologue, which is additional complexity that 587 // doesn't seem worth the benefit. 588 static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB) { 589 MachineFunction *MF = MBB->getParent(); 590 591 // If MBB is an entry block, use X9 as the scratch register 592 if (&MF->front() == MBB) 593 return AArch64::X9; 594 595 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>(); 596 const AArch64RegisterInfo &TRI = *Subtarget.getRegisterInfo(); 597 LivePhysRegs LiveRegs(TRI); 598 LiveRegs.addLiveIns(*MBB); 599 600 // Mark callee saved registers as used so we will not choose them. 601 const MCPhysReg *CSRegs = MF->getRegInfo().getCalleeSavedRegs(); 602 for (unsigned i = 0; CSRegs[i]; ++i) 603 LiveRegs.addReg(CSRegs[i]); 604 605 // Prefer X9 since it was historically used for the prologue scratch reg. 606 const MachineRegisterInfo &MRI = MF->getRegInfo(); 607 if (LiveRegs.available(MRI, AArch64::X9)) 608 return AArch64::X9; 609 610 for (unsigned Reg : AArch64::GPR64RegClass) { 611 if (LiveRegs.available(MRI, Reg)) 612 return Reg; 613 } 614 return AArch64::NoRegister; 615 } 616 617 bool AArch64FrameLowering::canUseAsPrologue( 618 const MachineBasicBlock &MBB) const { 619 const MachineFunction *MF = MBB.getParent(); 620 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB); 621 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>(); 622 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 623 624 // Don't need a scratch register if we're not going to re-align the stack. 625 if (!RegInfo->hasStackRealignment(*MF)) 626 return true; 627 // Otherwise, we can use any block as long as it has a scratch register 628 // available. 629 return findScratchNonCalleeSaveRegister(TmpMBB) != AArch64::NoRegister; 630 } 631 632 static bool windowsRequiresStackProbe(MachineFunction &MF, 633 uint64_t StackSizeInBytes) { 634 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 635 if (!Subtarget.isTargetWindows()) 636 return false; 637 const Function &F = MF.getFunction(); 638 // TODO: When implementing stack protectors, take that into account 639 // for the probe threshold. 640 unsigned StackProbeSize = 4096; 641 if (F.hasFnAttribute("stack-probe-size")) 642 F.getFnAttribute("stack-probe-size") 643 .getValueAsString() 644 .getAsInteger(0, StackProbeSize); 645 return (StackSizeInBytes >= StackProbeSize) && 646 !F.hasFnAttribute("no-stack-arg-probe"); 647 } 648 649 static bool needsWinCFI(const MachineFunction &MF) { 650 const Function &F = MF.getFunction(); 651 return MF.getTarget().getMCAsmInfo()->usesWindowsCFI() && 652 F.needsUnwindTableEntry(); 653 } 654 655 bool AArch64FrameLowering::shouldCombineCSRLocalStackBump( 656 MachineFunction &MF, uint64_t StackBumpBytes) const { 657 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 658 const MachineFrameInfo &MFI = MF.getFrameInfo(); 659 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 660 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 661 if (homogeneousPrologEpilog(MF)) 662 return false; 663 664 if (AFI->getLocalStackSize() == 0) 665 return false; 666 667 // For WinCFI, if optimizing for size, prefer to not combine the stack bump 668 // (to force a stp with predecrement) to match the packed unwind format, 669 // provided that there actually are any callee saved registers to merge the 670 // decrement with. 671 // This is potentially marginally slower, but allows using the packed 672 // unwind format for functions that both have a local area and callee saved 673 // registers. Using the packed unwind format notably reduces the size of 674 // the unwind info. 675 if (needsWinCFI(MF) && AFI->getCalleeSavedStackSize() > 0 && 676 MF.getFunction().hasOptSize()) 677 return false; 678 679 // 512 is the maximum immediate for stp/ldp that will be used for 680 // callee-save save/restores 681 if (StackBumpBytes >= 512 || windowsRequiresStackProbe(MF, StackBumpBytes)) 682 return false; 683 684 if (MFI.hasVarSizedObjects()) 685 return false; 686 687 if (RegInfo->hasStackRealignment(MF)) 688 return false; 689 690 // This isn't strictly necessary, but it simplifies things a bit since the 691 // current RedZone handling code assumes the SP is adjusted by the 692 // callee-save save/restore code. 693 if (canUseRedZone(MF)) 694 return false; 695 696 // When there is an SVE area on the stack, always allocate the 697 // callee-saves and spills/locals separately. 698 if (getSVEStackSize(MF)) 699 return false; 700 701 return true; 702 } 703 704 bool AArch64FrameLowering::shouldCombineCSRLocalStackBumpInEpilogue( 705 MachineBasicBlock &MBB, unsigned StackBumpBytes) const { 706 if (!shouldCombineCSRLocalStackBump(*MBB.getParent(), StackBumpBytes)) 707 return false; 708 709 if (MBB.empty()) 710 return true; 711 712 // Disable combined SP bump if the last instruction is an MTE tag store. It 713 // is almost always better to merge SP adjustment into those instructions. 714 MachineBasicBlock::iterator LastI = MBB.getFirstTerminator(); 715 MachineBasicBlock::iterator Begin = MBB.begin(); 716 while (LastI != Begin) { 717 --LastI; 718 if (LastI->isTransient()) 719 continue; 720 if (!LastI->getFlag(MachineInstr::FrameDestroy)) 721 break; 722 } 723 switch (LastI->getOpcode()) { 724 case AArch64::STGloop: 725 case AArch64::STZGloop: 726 case AArch64::STGOffset: 727 case AArch64::STZGOffset: 728 case AArch64::ST2GOffset: 729 case AArch64::STZ2GOffset: 730 return false; 731 default: 732 return true; 733 } 734 llvm_unreachable("unreachable"); 735 } 736 737 // Given a load or a store instruction, generate an appropriate unwinding SEH 738 // code on Windows. 739 static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI, 740 const TargetInstrInfo &TII, 741 MachineInstr::MIFlag Flag) { 742 unsigned Opc = MBBI->getOpcode(); 743 MachineBasicBlock *MBB = MBBI->getParent(); 744 MachineFunction &MF = *MBB->getParent(); 745 DebugLoc DL = MBBI->getDebugLoc(); 746 unsigned ImmIdx = MBBI->getNumOperands() - 1; 747 int Imm = MBBI->getOperand(ImmIdx).getImm(); 748 MachineInstrBuilder MIB; 749 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 750 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 751 752 switch (Opc) { 753 default: 754 llvm_unreachable("No SEH Opcode for this instruction"); 755 case AArch64::LDPDpost: 756 Imm = -Imm; 757 LLVM_FALLTHROUGH; 758 case AArch64::STPDpre: { 759 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 760 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(2).getReg()); 761 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFRegP_X)) 762 .addImm(Reg0) 763 .addImm(Reg1) 764 .addImm(Imm * 8) 765 .setMIFlag(Flag); 766 break; 767 } 768 case AArch64::LDPXpost: 769 Imm = -Imm; 770 LLVM_FALLTHROUGH; 771 case AArch64::STPXpre: { 772 Register Reg0 = MBBI->getOperand(1).getReg(); 773 Register Reg1 = MBBI->getOperand(2).getReg(); 774 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR) 775 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR_X)) 776 .addImm(Imm * 8) 777 .setMIFlag(Flag); 778 else 779 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveRegP_X)) 780 .addImm(RegInfo->getSEHRegNum(Reg0)) 781 .addImm(RegInfo->getSEHRegNum(Reg1)) 782 .addImm(Imm * 8) 783 .setMIFlag(Flag); 784 break; 785 } 786 case AArch64::LDRDpost: 787 Imm = -Imm; 788 LLVM_FALLTHROUGH; 789 case AArch64::STRDpre: { 790 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 791 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg_X)) 792 .addImm(Reg) 793 .addImm(Imm) 794 .setMIFlag(Flag); 795 break; 796 } 797 case AArch64::LDRXpost: 798 Imm = -Imm; 799 LLVM_FALLTHROUGH; 800 case AArch64::STRXpre: { 801 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 802 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg_X)) 803 .addImm(Reg) 804 .addImm(Imm) 805 .setMIFlag(Flag); 806 break; 807 } 808 case AArch64::STPDi: 809 case AArch64::LDPDi: { 810 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 811 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 812 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFRegP)) 813 .addImm(Reg0) 814 .addImm(Reg1) 815 .addImm(Imm * 8) 816 .setMIFlag(Flag); 817 break; 818 } 819 case AArch64::STPXi: 820 case AArch64::LDPXi: { 821 Register Reg0 = MBBI->getOperand(0).getReg(); 822 Register Reg1 = MBBI->getOperand(1).getReg(); 823 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR) 824 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR)) 825 .addImm(Imm * 8) 826 .setMIFlag(Flag); 827 else 828 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveRegP)) 829 .addImm(RegInfo->getSEHRegNum(Reg0)) 830 .addImm(RegInfo->getSEHRegNum(Reg1)) 831 .addImm(Imm * 8) 832 .setMIFlag(Flag); 833 break; 834 } 835 case AArch64::STRXui: 836 case AArch64::LDRXui: { 837 int Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 838 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg)) 839 .addImm(Reg) 840 .addImm(Imm * 8) 841 .setMIFlag(Flag); 842 break; 843 } 844 case AArch64::STRDui: 845 case AArch64::LDRDui: { 846 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 847 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg)) 848 .addImm(Reg) 849 .addImm(Imm * 8) 850 .setMIFlag(Flag); 851 break; 852 } 853 } 854 auto I = MBB->insertAfter(MBBI, MIB); 855 return I; 856 } 857 858 // Fix up the SEH opcode associated with the save/restore instruction. 859 static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, 860 unsigned LocalStackSize) { 861 MachineOperand *ImmOpnd = nullptr; 862 unsigned ImmIdx = MBBI->getNumOperands() - 1; 863 switch (MBBI->getOpcode()) { 864 default: 865 llvm_unreachable("Fix the offset in the SEH instruction"); 866 case AArch64::SEH_SaveFPLR: 867 case AArch64::SEH_SaveRegP: 868 case AArch64::SEH_SaveReg: 869 case AArch64::SEH_SaveFRegP: 870 case AArch64::SEH_SaveFReg: 871 ImmOpnd = &MBBI->getOperand(ImmIdx); 872 break; 873 } 874 if (ImmOpnd) 875 ImmOpnd->setImm(ImmOpnd->getImm() + LocalStackSize); 876 } 877 878 // Convert callee-save register save/restore instruction to do stack pointer 879 // decrement/increment to allocate/deallocate the callee-save stack area by 880 // converting store/load to use pre/post increment version. 881 static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec( 882 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 883 const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc, 884 bool NeedsWinCFI, bool *HasWinCFI, bool InProlog = true) { 885 // Ignore instructions that do not operate on SP, i.e. shadow call stack 886 // instructions and associated CFI instruction. 887 while (MBBI->getOpcode() == AArch64::STRXpost || 888 MBBI->getOpcode() == AArch64::LDRXpre || 889 MBBI->getOpcode() == AArch64::CFI_INSTRUCTION) { 890 if (MBBI->getOpcode() != AArch64::CFI_INSTRUCTION) 891 assert(MBBI->getOperand(0).getReg() != AArch64::SP); 892 ++MBBI; 893 } 894 unsigned NewOpc; 895 switch (MBBI->getOpcode()) { 896 default: 897 llvm_unreachable("Unexpected callee-save save/restore opcode!"); 898 case AArch64::STPXi: 899 NewOpc = AArch64::STPXpre; 900 break; 901 case AArch64::STPDi: 902 NewOpc = AArch64::STPDpre; 903 break; 904 case AArch64::STPQi: 905 NewOpc = AArch64::STPQpre; 906 break; 907 case AArch64::STRXui: 908 NewOpc = AArch64::STRXpre; 909 break; 910 case AArch64::STRDui: 911 NewOpc = AArch64::STRDpre; 912 break; 913 case AArch64::STRQui: 914 NewOpc = AArch64::STRQpre; 915 break; 916 case AArch64::LDPXi: 917 NewOpc = AArch64::LDPXpost; 918 break; 919 case AArch64::LDPDi: 920 NewOpc = AArch64::LDPDpost; 921 break; 922 case AArch64::LDPQi: 923 NewOpc = AArch64::LDPQpost; 924 break; 925 case AArch64::LDRXui: 926 NewOpc = AArch64::LDRXpost; 927 break; 928 case AArch64::LDRDui: 929 NewOpc = AArch64::LDRDpost; 930 break; 931 case AArch64::LDRQui: 932 NewOpc = AArch64::LDRQpost; 933 break; 934 } 935 // Get rid of the SEH code associated with the old instruction. 936 if (NeedsWinCFI) { 937 auto SEH = std::next(MBBI); 938 if (AArch64InstrInfo::isSEHInstruction(*SEH)) 939 SEH->eraseFromParent(); 940 } 941 942 TypeSize Scale = TypeSize::Fixed(1); 943 unsigned Width; 944 int64_t MinOffset, MaxOffset; 945 bool Success = static_cast<const AArch64InstrInfo *>(TII)->getMemOpInfo( 946 NewOpc, Scale, Width, MinOffset, MaxOffset); 947 (void)Success; 948 assert(Success && "unknown load/store opcode"); 949 950 // If the first store isn't right where we want SP then we can't fold the 951 // update in so create a normal arithmetic instruction instead. 952 if (MBBI->getOperand(MBBI->getNumOperands() - 1).getImm() != 0 || 953 CSStackSizeInc < MinOffset || CSStackSizeInc > MaxOffset) { 954 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, 955 StackOffset::getFixed(CSStackSizeInc), TII, 956 InProlog ? MachineInstr::FrameSetup 957 : MachineInstr::FrameDestroy); 958 return std::prev(MBBI); 959 } 960 961 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc)); 962 MIB.addReg(AArch64::SP, RegState::Define); 963 964 // Copy all operands other than the immediate offset. 965 unsigned OpndIdx = 0; 966 for (unsigned OpndEnd = MBBI->getNumOperands() - 1; OpndIdx < OpndEnd; 967 ++OpndIdx) 968 MIB.add(MBBI->getOperand(OpndIdx)); 969 970 assert(MBBI->getOperand(OpndIdx).getImm() == 0 && 971 "Unexpected immediate offset in first/last callee-save save/restore " 972 "instruction!"); 973 assert(MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP && 974 "Unexpected base register in callee-save save/restore instruction!"); 975 assert(CSStackSizeInc % Scale == 0); 976 MIB.addImm(CSStackSizeInc / (int)Scale); 977 978 MIB.setMIFlags(MBBI->getFlags()); 979 MIB.setMemRefs(MBBI->memoperands()); 980 981 // Generate a new SEH code that corresponds to the new instruction. 982 if (NeedsWinCFI) { 983 *HasWinCFI = true; 984 InsertSEH(*MIB, *TII, 985 InProlog ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy); 986 } 987 988 return std::prev(MBB.erase(MBBI)); 989 } 990 991 // Fixup callee-save register save/restore instructions to take into account 992 // combined SP bump by adding the local stack size to the stack offsets. 993 static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, 994 uint64_t LocalStackSize, 995 bool NeedsWinCFI, 996 bool *HasWinCFI) { 997 if (AArch64InstrInfo::isSEHInstruction(MI)) 998 return; 999 1000 unsigned Opc = MI.getOpcode(); 1001 1002 // Ignore instructions that do not operate on SP, i.e. shadow call stack 1003 // instructions and associated CFI instruction. 1004 if (Opc == AArch64::STRXpost || Opc == AArch64::LDRXpre || 1005 Opc == AArch64::CFI_INSTRUCTION) { 1006 if (Opc != AArch64::CFI_INSTRUCTION) 1007 assert(MI.getOperand(0).getReg() != AArch64::SP); 1008 return; 1009 } 1010 1011 unsigned Scale; 1012 switch (Opc) { 1013 case AArch64::STPXi: 1014 case AArch64::STRXui: 1015 case AArch64::STPDi: 1016 case AArch64::STRDui: 1017 case AArch64::LDPXi: 1018 case AArch64::LDRXui: 1019 case AArch64::LDPDi: 1020 case AArch64::LDRDui: 1021 Scale = 8; 1022 break; 1023 case AArch64::STPQi: 1024 case AArch64::STRQui: 1025 case AArch64::LDPQi: 1026 case AArch64::LDRQui: 1027 Scale = 16; 1028 break; 1029 default: 1030 llvm_unreachable("Unexpected callee-save save/restore opcode!"); 1031 } 1032 1033 unsigned OffsetIdx = MI.getNumExplicitOperands() - 1; 1034 assert(MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP && 1035 "Unexpected base register in callee-save save/restore instruction!"); 1036 // Last operand is immediate offset that needs fixing. 1037 MachineOperand &OffsetOpnd = MI.getOperand(OffsetIdx); 1038 // All generated opcodes have scaled offsets. 1039 assert(LocalStackSize % Scale == 0); 1040 OffsetOpnd.setImm(OffsetOpnd.getImm() + LocalStackSize / Scale); 1041 1042 if (NeedsWinCFI) { 1043 *HasWinCFI = true; 1044 auto MBBI = std::next(MachineBasicBlock::iterator(MI)); 1045 assert(MBBI != MI.getParent()->end() && "Expecting a valid instruction"); 1046 assert(AArch64InstrInfo::isSEHInstruction(*MBBI) && 1047 "Expecting a SEH instruction"); 1048 fixupSEHOpcode(MBBI, LocalStackSize); 1049 } 1050 } 1051 1052 static void adaptForLdStOpt(MachineBasicBlock &MBB, 1053 MachineBasicBlock::iterator FirstSPPopI, 1054 MachineBasicBlock::iterator LastPopI) { 1055 // Sometimes (when we restore in the same order as we save), we can end up 1056 // with code like this: 1057 // 1058 // ldp x26, x25, [sp] 1059 // ldp x24, x23, [sp, #16] 1060 // ldp x22, x21, [sp, #32] 1061 // ldp x20, x19, [sp, #48] 1062 // add sp, sp, #64 1063 // 1064 // In this case, it is always better to put the first ldp at the end, so 1065 // that the load-store optimizer can run and merge the ldp and the add into 1066 // a post-index ldp. 1067 // If we managed to grab the first pop instruction, move it to the end. 1068 if (ReverseCSRRestoreSeq) 1069 MBB.splice(FirstSPPopI, &MBB, LastPopI); 1070 // We should end up with something like this now: 1071 // 1072 // ldp x24, x23, [sp, #16] 1073 // ldp x22, x21, [sp, #32] 1074 // ldp x20, x19, [sp, #48] 1075 // ldp x26, x25, [sp] 1076 // add sp, sp, #64 1077 // 1078 // and the load-store optimizer can merge the last two instructions into: 1079 // 1080 // ldp x26, x25, [sp], #64 1081 // 1082 } 1083 1084 static bool isTargetWindows(const MachineFunction &MF) { 1085 return MF.getSubtarget<AArch64Subtarget>().isTargetWindows(); 1086 } 1087 1088 // Convenience function to determine whether I is an SVE callee save. 1089 static bool IsSVECalleeSave(MachineBasicBlock::iterator I) { 1090 switch (I->getOpcode()) { 1091 default: 1092 return false; 1093 case AArch64::STR_ZXI: 1094 case AArch64::STR_PXI: 1095 case AArch64::LDR_ZXI: 1096 case AArch64::LDR_PXI: 1097 return I->getFlag(MachineInstr::FrameSetup) || 1098 I->getFlag(MachineInstr::FrameDestroy); 1099 } 1100 } 1101 1102 void AArch64FrameLowering::emitPrologue(MachineFunction &MF, 1103 MachineBasicBlock &MBB) const { 1104 MachineBasicBlock::iterator MBBI = MBB.begin(); 1105 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1106 const Function &F = MF.getFunction(); 1107 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1108 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 1109 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 1110 MachineModuleInfo &MMI = MF.getMMI(); 1111 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1112 bool needsFrameMoves = 1113 MF.needsFrameMoves() && !MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 1114 bool HasFP = hasFP(MF); 1115 bool NeedsWinCFI = needsWinCFI(MF); 1116 bool HasWinCFI = false; 1117 auto Cleanup = make_scope_exit([&]() { MF.setHasWinCFI(HasWinCFI); }); 1118 1119 bool IsFunclet = MBB.isEHFuncletEntry(); 1120 1121 // At this point, we're going to decide whether or not the function uses a 1122 // redzone. In most cases, the function doesn't have a redzone so let's 1123 // assume that's false and set it to true in the case that there's a redzone. 1124 AFI->setHasRedZone(false); 1125 1126 // Debug location must be unknown since the first debug location is used 1127 // to determine the end of the prologue. 1128 DebugLoc DL; 1129 1130 const auto &MFnI = *MF.getInfo<AArch64FunctionInfo>(); 1131 if (MFnI.shouldSignReturnAddress()) { 1132 1133 unsigned PACI; 1134 if (MFnI.shouldSignWithBKey()) { 1135 BuildMI(MBB, MBBI, DL, TII->get(AArch64::EMITBKEY)) 1136 .setMIFlag(MachineInstr::FrameSetup); 1137 PACI = Subtarget.hasPAuth() ? AArch64::PACIB : AArch64::PACIBSP; 1138 } else { 1139 PACI = Subtarget.hasPAuth() ? AArch64::PACIA : AArch64::PACIASP; 1140 } 1141 1142 auto MI = BuildMI(MBB, MBBI, DL, TII->get(PACI)); 1143 if (Subtarget.hasPAuth()) 1144 MI.addReg(AArch64::LR, RegState::Define) 1145 .addReg(AArch64::LR) 1146 .addReg(AArch64::SP, RegState::InternalRead); 1147 MI.setMIFlag(MachineInstr::FrameSetup); 1148 1149 unsigned CFIIndex = 1150 MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr)); 1151 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1152 .addCFIIndex(CFIIndex) 1153 .setMIFlags(MachineInstr::FrameSetup); 1154 } 1155 1156 // We signal the presence of a Swift extended frame to external tools by 1157 // storing FP with 0b0001 in bits 63:60. In normal userland operation a simple 1158 // ORR is sufficient, it is assumed a Swift kernel would initialize the TBI 1159 // bits so that is still true. 1160 if (HasFP && AFI->hasSwiftAsyncContext()) { 1161 switch (MF.getTarget().Options.SwiftAsyncFramePointer) { 1162 case SwiftAsyncFramePointerMode::DeploymentBased: 1163 if (Subtarget.swiftAsyncContextIsDynamicallySet()) { 1164 // The special symbol below is absolute and has a *value* that can be 1165 // combined with the frame pointer to signal an extended frame. 1166 BuildMI(MBB, MBBI, DL, TII->get(AArch64::LOADgot), AArch64::X16) 1167 .addExternalSymbol("swift_async_extendedFramePointerFlags", 1168 AArch64II::MO_GOT); 1169 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ORRXrs), AArch64::FP) 1170 .addUse(AArch64::FP) 1171 .addUse(AArch64::X16) 1172 .addImm(Subtarget.isTargetILP32() ? 32 : 0); 1173 break; 1174 } 1175 LLVM_FALLTHROUGH; 1176 1177 case SwiftAsyncFramePointerMode::Always: 1178 // ORR x29, x29, #0x1000_0000_0000_0000 1179 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ORRXri), AArch64::FP) 1180 .addUse(AArch64::FP) 1181 .addImm(0x1100) 1182 .setMIFlag(MachineInstr::FrameSetup); 1183 break; 1184 1185 case SwiftAsyncFramePointerMode::Never: 1186 break; 1187 } 1188 } 1189 1190 // All calls are tail calls in GHC calling conv, and functions have no 1191 // prologue/epilogue. 1192 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 1193 return; 1194 1195 // Set tagged base pointer to the requested stack slot. 1196 // Ideally it should match SP value after prologue. 1197 Optional<int> TBPI = AFI->getTaggedBasePointerIndex(); 1198 if (TBPI) 1199 AFI->setTaggedBasePointerOffset(-MFI.getObjectOffset(*TBPI)); 1200 else 1201 AFI->setTaggedBasePointerOffset(MFI.getStackSize()); 1202 1203 const StackOffset &SVEStackSize = getSVEStackSize(MF); 1204 1205 // getStackSize() includes all the locals in its size calculation. We don't 1206 // include these locals when computing the stack size of a funclet, as they 1207 // are allocated in the parent's stack frame and accessed via the frame 1208 // pointer from the funclet. We only save the callee saved registers in the 1209 // funclet, which are really the callee saved registers of the parent 1210 // function, including the funclet. 1211 int64_t NumBytes = IsFunclet ? getWinEHFuncletFrameSize(MF) 1212 : MFI.getStackSize(); 1213 if (!AFI->hasStackFrame() && !windowsRequiresStackProbe(MF, NumBytes)) { 1214 assert(!HasFP && "unexpected function without stack frame but with FP"); 1215 assert(!SVEStackSize && 1216 "unexpected function without stack frame but with SVE objects"); 1217 // All of the stack allocation is for locals. 1218 AFI->setLocalStackSize(NumBytes); 1219 if (!NumBytes) 1220 return; 1221 // REDZONE: If the stack size is less than 128 bytes, we don't need 1222 // to actually allocate. 1223 if (canUseRedZone(MF)) { 1224 AFI->setHasRedZone(true); 1225 ++NumRedZoneFunctions; 1226 } else { 1227 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, 1228 StackOffset::getFixed(-NumBytes), TII, 1229 MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI); 1230 if (needsFrameMoves) { 1231 // Label used to tie together the PROLOG_LABEL and the MachineMoves. 1232 MCSymbol *FrameLabel = MMI.getContext().createTempSymbol(); 1233 // Encode the stack size of the leaf function. 1234 unsigned CFIIndex = MF.addFrameInst( 1235 MCCFIInstruction::cfiDefCfaOffset(FrameLabel, NumBytes)); 1236 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1237 .addCFIIndex(CFIIndex) 1238 .setMIFlags(MachineInstr::FrameSetup); 1239 } 1240 } 1241 1242 if (NeedsWinCFI) { 1243 HasWinCFI = true; 1244 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd)) 1245 .setMIFlag(MachineInstr::FrameSetup); 1246 } 1247 1248 return; 1249 } 1250 1251 bool IsWin64 = 1252 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 1253 unsigned FixedObject = getFixedObjectSize(MF, AFI, IsWin64, IsFunclet); 1254 1255 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; 1256 // All of the remaining stack allocations are for locals. 1257 AFI->setLocalStackSize(NumBytes - PrologueSaveSize); 1258 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes); 1259 bool HomPrologEpilog = homogeneousPrologEpilog(MF); 1260 if (CombineSPBump) { 1261 assert(!SVEStackSize && "Cannot combine SP bump with SVE"); 1262 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, 1263 StackOffset::getFixed(-NumBytes), TII, 1264 MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI); 1265 NumBytes = 0; 1266 } else if (HomPrologEpilog) { 1267 // Stack has been already adjusted. 1268 NumBytes -= PrologueSaveSize; 1269 } else if (PrologueSaveSize != 0) { 1270 MBBI = convertCalleeSaveRestoreToSPPrePostIncDec( 1271 MBB, MBBI, DL, TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI); 1272 NumBytes -= PrologueSaveSize; 1273 } 1274 assert(NumBytes >= 0 && "Negative stack allocation size!?"); 1275 1276 // Move past the saves of the callee-saved registers, fixing up the offsets 1277 // and pre-inc if we decided to combine the callee-save and local stack 1278 // pointer bump above. 1279 MachineBasicBlock::iterator End = MBB.end(); 1280 while (MBBI != End && MBBI->getFlag(MachineInstr::FrameSetup) && 1281 !IsSVECalleeSave(MBBI)) { 1282 if (CombineSPBump) 1283 fixupCalleeSaveRestoreStackOffset(*MBBI, AFI->getLocalStackSize(), 1284 NeedsWinCFI, &HasWinCFI); 1285 ++MBBI; 1286 } 1287 1288 // For funclets the FP belongs to the containing function. 1289 if (!IsFunclet && HasFP) { 1290 // Only set up FP if we actually need to. 1291 int64_t FPOffset = AFI->getCalleeSaveBaseToFrameRecordOffset(); 1292 1293 if (CombineSPBump) 1294 FPOffset += AFI->getLocalStackSize(); 1295 1296 if (AFI->hasSwiftAsyncContext()) { 1297 // Before we update the live FP we have to ensure there's a valid (or 1298 // null) asynchronous context in its slot just before FP in the frame 1299 // record, so store it now. 1300 const auto &Attrs = MF.getFunction().getAttributes(); 1301 bool HaveInitialContext = Attrs.hasAttrSomewhere(Attribute::SwiftAsync); 1302 if (HaveInitialContext) 1303 MBB.addLiveIn(AArch64::X22); 1304 BuildMI(MBB, MBBI, DL, TII->get(AArch64::StoreSwiftAsyncContext)) 1305 .addUse(HaveInitialContext ? AArch64::X22 : AArch64::XZR) 1306 .addUse(AArch64::SP) 1307 .addImm(FPOffset - 8) 1308 .setMIFlags(MachineInstr::FrameSetup); 1309 } 1310 1311 if (HomPrologEpilog) { 1312 auto Prolog = MBBI; 1313 --Prolog; 1314 assert(Prolog->getOpcode() == AArch64::HOM_Prolog); 1315 Prolog->addOperand(MachineOperand::CreateImm(FPOffset)); 1316 } else { 1317 // Issue sub fp, sp, FPOffset or 1318 // mov fp,sp when FPOffset is zero. 1319 // Note: All stores of callee-saved registers are marked as "FrameSetup". 1320 // This code marks the instruction(s) that set the FP also. 1321 emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, 1322 StackOffset::getFixed(FPOffset), TII, 1323 MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI); 1324 } 1325 } 1326 1327 if (windowsRequiresStackProbe(MF, NumBytes)) { 1328 uint64_t NumWords = NumBytes >> 4; 1329 if (NeedsWinCFI) { 1330 HasWinCFI = true; 1331 // alloc_l can hold at most 256MB, so assume that NumBytes doesn't 1332 // exceed this amount. We need to move at most 2^24 - 1 into x15. 1333 // This is at most two instructions, MOVZ follwed by MOVK. 1334 // TODO: Fix to use multiple stack alloc unwind codes for stacks 1335 // exceeding 256MB in size. 1336 if (NumBytes >= (1 << 28)) 1337 report_fatal_error("Stack size cannot exceed 256MB for stack " 1338 "unwinding purposes"); 1339 1340 uint32_t LowNumWords = NumWords & 0xFFFF; 1341 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVZXi), AArch64::X15) 1342 .addImm(LowNumWords) 1343 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)) 1344 .setMIFlag(MachineInstr::FrameSetup); 1345 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1346 .setMIFlag(MachineInstr::FrameSetup); 1347 if ((NumWords & 0xFFFF0000) != 0) { 1348 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVKXi), AArch64::X15) 1349 .addReg(AArch64::X15) 1350 .addImm((NumWords & 0xFFFF0000) >> 16) // High half 1351 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 16)) 1352 .setMIFlag(MachineInstr::FrameSetup); 1353 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1354 .setMIFlag(MachineInstr::FrameSetup); 1355 } 1356 } else { 1357 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm), AArch64::X15) 1358 .addImm(NumWords) 1359 .setMIFlags(MachineInstr::FrameSetup); 1360 } 1361 1362 switch (MF.getTarget().getCodeModel()) { 1363 case CodeModel::Tiny: 1364 case CodeModel::Small: 1365 case CodeModel::Medium: 1366 case CodeModel::Kernel: 1367 BuildMI(MBB, MBBI, DL, TII->get(AArch64::BL)) 1368 .addExternalSymbol("__chkstk") 1369 .addReg(AArch64::X15, RegState::Implicit) 1370 .addReg(AArch64::X16, RegState::Implicit | RegState::Define | RegState::Dead) 1371 .addReg(AArch64::X17, RegState::Implicit | RegState::Define | RegState::Dead) 1372 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define | RegState::Dead) 1373 .setMIFlags(MachineInstr::FrameSetup); 1374 if (NeedsWinCFI) { 1375 HasWinCFI = true; 1376 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1377 .setMIFlag(MachineInstr::FrameSetup); 1378 } 1379 break; 1380 case CodeModel::Large: 1381 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVaddrEXT)) 1382 .addReg(AArch64::X16, RegState::Define) 1383 .addExternalSymbol("__chkstk") 1384 .addExternalSymbol("__chkstk") 1385 .setMIFlags(MachineInstr::FrameSetup); 1386 if (NeedsWinCFI) { 1387 HasWinCFI = true; 1388 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1389 .setMIFlag(MachineInstr::FrameSetup); 1390 } 1391 1392 BuildMI(MBB, MBBI, DL, TII->get(getBLRCallOpcode(MF))) 1393 .addReg(AArch64::X16, RegState::Kill) 1394 .addReg(AArch64::X15, RegState::Implicit | RegState::Define) 1395 .addReg(AArch64::X16, RegState::Implicit | RegState::Define | RegState::Dead) 1396 .addReg(AArch64::X17, RegState::Implicit | RegState::Define | RegState::Dead) 1397 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define | RegState::Dead) 1398 .setMIFlags(MachineInstr::FrameSetup); 1399 if (NeedsWinCFI) { 1400 HasWinCFI = true; 1401 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1402 .setMIFlag(MachineInstr::FrameSetup); 1403 } 1404 break; 1405 } 1406 1407 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SUBXrx64), AArch64::SP) 1408 .addReg(AArch64::SP, RegState::Kill) 1409 .addReg(AArch64::X15, RegState::Kill) 1410 .addImm(AArch64_AM::getArithExtendImm(AArch64_AM::UXTX, 4)) 1411 .setMIFlags(MachineInstr::FrameSetup); 1412 if (NeedsWinCFI) { 1413 HasWinCFI = true; 1414 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc)) 1415 .addImm(NumBytes) 1416 .setMIFlag(MachineInstr::FrameSetup); 1417 } 1418 NumBytes = 0; 1419 } 1420 1421 StackOffset AllocateBefore = SVEStackSize, AllocateAfter = {}; 1422 MachineBasicBlock::iterator CalleeSavesBegin = MBBI, CalleeSavesEnd = MBBI; 1423 1424 // Process the SVE callee-saves to determine what space needs to be 1425 // allocated. 1426 if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) { 1427 // Find callee save instructions in frame. 1428 CalleeSavesBegin = MBBI; 1429 assert(IsSVECalleeSave(CalleeSavesBegin) && "Unexpected instruction"); 1430 while (IsSVECalleeSave(MBBI) && MBBI != MBB.getFirstTerminator()) 1431 ++MBBI; 1432 CalleeSavesEnd = MBBI; 1433 1434 AllocateBefore = StackOffset::getScalable(CalleeSavedSize); 1435 AllocateAfter = SVEStackSize - AllocateBefore; 1436 } 1437 1438 // Allocate space for the callee saves (if any). 1439 emitFrameOffset(MBB, CalleeSavesBegin, DL, AArch64::SP, AArch64::SP, 1440 -AllocateBefore, TII, 1441 MachineInstr::FrameSetup); 1442 1443 // Finally allocate remaining SVE stack space. 1444 emitFrameOffset(MBB, CalleeSavesEnd, DL, AArch64::SP, AArch64::SP, 1445 -AllocateAfter, TII, 1446 MachineInstr::FrameSetup); 1447 1448 // Allocate space for the rest of the frame. 1449 if (NumBytes) { 1450 // Alignment is required for the parent frame, not the funclet 1451 const bool NeedsRealignment = 1452 !IsFunclet && RegInfo->hasStackRealignment(MF); 1453 unsigned scratchSPReg = AArch64::SP; 1454 1455 if (NeedsRealignment) { 1456 scratchSPReg = findScratchNonCalleeSaveRegister(&MBB); 1457 assert(scratchSPReg != AArch64::NoRegister); 1458 } 1459 1460 // If we're a leaf function, try using the red zone. 1461 if (!canUseRedZone(MF)) 1462 // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have 1463 // the correct value here, as NumBytes also includes padding bytes, 1464 // which shouldn't be counted here. 1465 emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP, 1466 StackOffset::getFixed(-NumBytes), TII, 1467 MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI); 1468 1469 if (NeedsRealignment) { 1470 const unsigned NrBitsToZero = Log2(MFI.getMaxAlign()); 1471 assert(NrBitsToZero > 1); 1472 assert(scratchSPReg != AArch64::SP); 1473 1474 // SUB X9, SP, NumBytes 1475 // -- X9 is temporary register, so shouldn't contain any live data here, 1476 // -- free to use. This is already produced by emitFrameOffset above. 1477 // AND SP, X9, 0b11111...0000 1478 // The logical immediates have a non-trivial encoding. The following 1479 // formula computes the encoded immediate with all ones but 1480 // NrBitsToZero zero bits as least significant bits. 1481 uint32_t andMaskEncoded = (1 << 12) // = N 1482 | ((64 - NrBitsToZero) << 6) // immr 1483 | ((64 - NrBitsToZero - 1) << 0); // imms 1484 1485 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP) 1486 .addReg(scratchSPReg, RegState::Kill) 1487 .addImm(andMaskEncoded); 1488 AFI->setStackRealigned(true); 1489 if (NeedsWinCFI) { 1490 HasWinCFI = true; 1491 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc)) 1492 .addImm(NumBytes & andMaskEncoded) 1493 .setMIFlag(MachineInstr::FrameSetup); 1494 } 1495 } 1496 } 1497 1498 // If we need a base pointer, set it up here. It's whatever the value of the 1499 // stack pointer is at this point. Any variable size objects will be allocated 1500 // after this, so we can still use the base pointer to reference locals. 1501 // 1502 // FIXME: Clarify FrameSetup flags here. 1503 // Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is 1504 // needed. 1505 // For funclets the BP belongs to the containing function. 1506 if (!IsFunclet && RegInfo->hasBasePointer(MF)) { 1507 TII->copyPhysReg(MBB, MBBI, DL, RegInfo->getBaseRegister(), AArch64::SP, 1508 false); 1509 if (NeedsWinCFI) { 1510 HasWinCFI = true; 1511 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1512 .setMIFlag(MachineInstr::FrameSetup); 1513 } 1514 } 1515 1516 // The very last FrameSetup instruction indicates the end of prologue. Emit a 1517 // SEH opcode indicating the prologue end. 1518 if (NeedsWinCFI && HasWinCFI) { 1519 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd)) 1520 .setMIFlag(MachineInstr::FrameSetup); 1521 } 1522 1523 // SEH funclets are passed the frame pointer in X1. If the parent 1524 // function uses the base register, then the base register is used 1525 // directly, and is not retrieved from X1. 1526 if (IsFunclet && F.hasPersonalityFn()) { 1527 EHPersonality Per = classifyEHPersonality(F.getPersonalityFn()); 1528 if (isAsynchronousEHPersonality(Per)) { 1529 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), AArch64::FP) 1530 .addReg(AArch64::X1) 1531 .setMIFlag(MachineInstr::FrameSetup); 1532 MBB.addLiveIn(AArch64::X1); 1533 } 1534 } 1535 1536 if (needsFrameMoves) { 1537 // An example of the prologue: 1538 // 1539 // .globl __foo 1540 // .align 2 1541 // __foo: 1542 // Ltmp0: 1543 // .cfi_startproc 1544 // .cfi_personality 155, ___gxx_personality_v0 1545 // Leh_func_begin: 1546 // .cfi_lsda 16, Lexception33 1547 // 1548 // stp xa,bx, [sp, -#offset]! 1549 // ... 1550 // stp x28, x27, [sp, #offset-32] 1551 // stp fp, lr, [sp, #offset-16] 1552 // add fp, sp, #offset - 16 1553 // sub sp, sp, #1360 1554 // 1555 // The Stack: 1556 // +-------------------------------------------+ 1557 // 10000 | ........ | ........ | ........ | ........ | 1558 // 10004 | ........ | ........ | ........ | ........ | 1559 // +-------------------------------------------+ 1560 // 10008 | ........ | ........ | ........ | ........ | 1561 // 1000c | ........ | ........ | ........ | ........ | 1562 // +===========================================+ 1563 // 10010 | X28 Register | 1564 // 10014 | X28 Register | 1565 // +-------------------------------------------+ 1566 // 10018 | X27 Register | 1567 // 1001c | X27 Register | 1568 // +===========================================+ 1569 // 10020 | Frame Pointer | 1570 // 10024 | Frame Pointer | 1571 // +-------------------------------------------+ 1572 // 10028 | Link Register | 1573 // 1002c | Link Register | 1574 // +===========================================+ 1575 // 10030 | ........ | ........ | ........ | ........ | 1576 // 10034 | ........ | ........ | ........ | ........ | 1577 // +-------------------------------------------+ 1578 // 10038 | ........ | ........ | ........ | ........ | 1579 // 1003c | ........ | ........ | ........ | ........ | 1580 // +-------------------------------------------+ 1581 // 1582 // [sp] = 10030 :: >>initial value<< 1583 // sp = 10020 :: stp fp, lr, [sp, #-16]! 1584 // fp = sp == 10020 :: mov fp, sp 1585 // [sp] == 10020 :: stp x28, x27, [sp, #-16]! 1586 // sp == 10010 :: >>final value<< 1587 // 1588 // The frame pointer (w29) points to address 10020. If we use an offset of 1589 // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24 1590 // for w27, and -32 for w28: 1591 // 1592 // Ltmp1: 1593 // .cfi_def_cfa w29, 16 1594 // Ltmp2: 1595 // .cfi_offset w30, -8 1596 // Ltmp3: 1597 // .cfi_offset w29, -16 1598 // Ltmp4: 1599 // .cfi_offset w27, -24 1600 // Ltmp5: 1601 // .cfi_offset w28, -32 1602 1603 if (HasFP) { 1604 const int OffsetToFirstCalleeSaveFromFP = 1605 AFI->getCalleeSaveBaseToFrameRecordOffset() - 1606 AFI->getCalleeSavedStackSize(); 1607 Register FramePtr = RegInfo->getFrameRegister(MF); 1608 1609 // Define the current CFA rule to use the provided FP. 1610 unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true); 1611 unsigned CFIIndex = MF.addFrameInst( 1612 MCCFIInstruction::cfiDefCfa(nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP)); 1613 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1614 .addCFIIndex(CFIIndex) 1615 .setMIFlags(MachineInstr::FrameSetup); 1616 } else { 1617 unsigned CFIIndex; 1618 if (SVEStackSize) { 1619 const TargetSubtargetInfo &STI = MF.getSubtarget(); 1620 const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); 1621 StackOffset TotalSize = 1622 SVEStackSize + StackOffset::getFixed((int64_t)MFI.getStackSize()); 1623 CFIIndex = MF.addFrameInst(createDefCFAExpressionFromSP(TRI, TotalSize)); 1624 } else { 1625 // Encode the stack size of the leaf function. 1626 CFIIndex = MF.addFrameInst( 1627 MCCFIInstruction::cfiDefCfaOffset(nullptr, MFI.getStackSize())); 1628 } 1629 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1630 .addCFIIndex(CFIIndex) 1631 .setMIFlags(MachineInstr::FrameSetup); 1632 } 1633 1634 // Now emit the moves for whatever callee saved regs we have (including FP, 1635 // LR if those are saved). 1636 emitCalleeSavedFrameMoves(MBB, MBBI); 1637 } 1638 } 1639 1640 static void InsertReturnAddressAuth(MachineFunction &MF, 1641 MachineBasicBlock &MBB) { 1642 const auto &MFI = *MF.getInfo<AArch64FunctionInfo>(); 1643 if (!MFI.shouldSignReturnAddress()) 1644 return; 1645 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1646 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 1647 1648 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 1649 DebugLoc DL; 1650 if (MBBI != MBB.end()) 1651 DL = MBBI->getDebugLoc(); 1652 1653 // The AUTIASP instruction assembles to a hint instruction before v8.3a so 1654 // this instruction can safely used for any v8a architecture. 1655 // From v8.3a onwards there are optimised authenticate LR and return 1656 // instructions, namely RETA{A,B}, that can be used instead. 1657 if (Subtarget.hasPAuth() && MBBI != MBB.end() && 1658 MBBI->getOpcode() == AArch64::RET_ReallyLR) { 1659 BuildMI(MBB, MBBI, DL, 1660 TII->get(MFI.shouldSignWithBKey() ? AArch64::RETAB : AArch64::RETAA)) 1661 .copyImplicitOps(*MBBI); 1662 MBB.erase(MBBI); 1663 } else { 1664 BuildMI( 1665 MBB, MBBI, DL, 1666 TII->get(MFI.shouldSignWithBKey() ? AArch64::AUTIBSP : AArch64::AUTIASP)) 1667 .setMIFlag(MachineInstr::FrameDestroy); 1668 } 1669 } 1670 1671 static bool isFuncletReturnInstr(const MachineInstr &MI) { 1672 switch (MI.getOpcode()) { 1673 default: 1674 return false; 1675 case AArch64::CATCHRET: 1676 case AArch64::CLEANUPRET: 1677 return true; 1678 } 1679 } 1680 1681 void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, 1682 MachineBasicBlock &MBB) const { 1683 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 1684 MachineFrameInfo &MFI = MF.getFrameInfo(); 1685 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1686 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 1687 DebugLoc DL; 1688 bool NeedsWinCFI = needsWinCFI(MF); 1689 bool HasWinCFI = false; 1690 bool IsFunclet = false; 1691 auto WinCFI = make_scope_exit([&]() { assert(HasWinCFI == MF.hasWinCFI()); }); 1692 1693 if (MBB.end() != MBBI) { 1694 DL = MBBI->getDebugLoc(); 1695 IsFunclet = isFuncletReturnInstr(*MBBI); 1696 } 1697 1698 int64_t NumBytes = IsFunclet ? getWinEHFuncletFrameSize(MF) 1699 : MFI.getStackSize(); 1700 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1701 1702 // All calls are tail calls in GHC calling conv, and functions have no 1703 // prologue/epilogue. 1704 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 1705 return; 1706 1707 // How much of the stack used by incoming arguments this function is expected 1708 // to restore in this particular epilogue. 1709 int64_t ArgumentStackToRestore = getArgumentStackToRestore(MF, MBB); 1710 1711 // The stack frame should be like below, 1712 // 1713 // ---------------------- --- 1714 // | | | 1715 // | BytesInStackArgArea| CalleeArgStackSize 1716 // | (NumReusableBytes) | (of tail call) 1717 // | | --- 1718 // | | | 1719 // ---------------------| --- | 1720 // | | | | 1721 // | CalleeSavedReg | | | 1722 // | (CalleeSavedStackSize)| | | 1723 // | | | | 1724 // ---------------------| | NumBytes 1725 // | | StackSize (StackAdjustUp) 1726 // | LocalStackSize | | | 1727 // | (covering callee | | | 1728 // | args) | | | 1729 // | | | | 1730 // ---------------------- --- --- 1731 // 1732 // So NumBytes = StackSize + BytesInStackArgArea - CalleeArgStackSize 1733 // = StackSize + ArgumentPopSize 1734 // 1735 // AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps 1736 // it as the 2nd argument of AArch64ISD::TC_RETURN. 1737 1738 auto Cleanup = make_scope_exit([&] { InsertReturnAddressAuth(MF, MBB); }); 1739 1740 bool IsWin64 = 1741 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 1742 unsigned FixedObject = getFixedObjectSize(MF, AFI, IsWin64, IsFunclet); 1743 1744 int64_t AfterCSRPopSize = ArgumentStackToRestore; 1745 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; 1746 // We cannot rely on the local stack size set in emitPrologue if the function 1747 // has funclets, as funclets have different local stack size requirements, and 1748 // the current value set in emitPrologue may be that of the containing 1749 // function. 1750 if (MF.hasEHFunclets()) 1751 AFI->setLocalStackSize(NumBytes - PrologueSaveSize); 1752 if (homogeneousPrologEpilog(MF, &MBB)) { 1753 assert(!NeedsWinCFI); 1754 auto LastPopI = MBB.getFirstTerminator(); 1755 if (LastPopI != MBB.begin()) { 1756 auto HomogeneousEpilog = std::prev(LastPopI); 1757 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog) 1758 LastPopI = HomogeneousEpilog; 1759 } 1760 1761 // Adjust local stack 1762 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, 1763 StackOffset::getFixed(AFI->getLocalStackSize()), TII, 1764 MachineInstr::FrameDestroy, false, NeedsWinCFI); 1765 1766 // SP has been already adjusted while restoring callee save regs. 1767 // We've bailed-out the case with adjusting SP for arguments. 1768 assert(AfterCSRPopSize == 0); 1769 return; 1770 } 1771 bool CombineSPBump = shouldCombineCSRLocalStackBumpInEpilogue(MBB, NumBytes); 1772 // Assume we can't combine the last pop with the sp restore. 1773 1774 if (!CombineSPBump && PrologueSaveSize != 0) { 1775 MachineBasicBlock::iterator Pop = std::prev(MBB.getFirstTerminator()); 1776 while (AArch64InstrInfo::isSEHInstruction(*Pop)) 1777 Pop = std::prev(Pop); 1778 // Converting the last ldp to a post-index ldp is valid only if the last 1779 // ldp's offset is 0. 1780 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1); 1781 // If the offset is 0 and the AfterCSR pop is not actually trying to 1782 // allocate more stack for arguments (in space that an untimely interrupt 1783 // may clobber), convert it to a post-index ldp. 1784 if (OffsetOp.getImm() == 0 && AfterCSRPopSize >= 0) 1785 convertCalleeSaveRestoreToSPPrePostIncDec( 1786 MBB, Pop, DL, TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, false); 1787 else { 1788 // If not, make sure to emit an add after the last ldp. 1789 // We're doing this by transfering the size to be restored from the 1790 // adjustment *before* the CSR pops to the adjustment *after* the CSR 1791 // pops. 1792 AfterCSRPopSize += PrologueSaveSize; 1793 } 1794 } 1795 1796 // Move past the restores of the callee-saved registers. 1797 // If we plan on combining the sp bump of the local stack size and the callee 1798 // save stack size, we might need to adjust the CSR save and restore offsets. 1799 MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator(); 1800 MachineBasicBlock::iterator Begin = MBB.begin(); 1801 while (LastPopI != Begin) { 1802 --LastPopI; 1803 if (!LastPopI->getFlag(MachineInstr::FrameDestroy) || 1804 IsSVECalleeSave(LastPopI)) { 1805 ++LastPopI; 1806 break; 1807 } else if (CombineSPBump) 1808 fixupCalleeSaveRestoreStackOffset(*LastPopI, AFI->getLocalStackSize(), 1809 NeedsWinCFI, &HasWinCFI); 1810 } 1811 1812 if (MF.hasWinCFI()) { 1813 // If the prologue didn't contain any SEH opcodes and didn't set the 1814 // MF.hasWinCFI() flag, assume the epilogue won't either, and skip the 1815 // EpilogStart - to avoid generating CFI for functions that don't need it. 1816 // (And as we didn't generate any prologue at all, it would be asymmetrical 1817 // to the epilogue.) By the end of the function, we assert that 1818 // HasWinCFI is equal to MF.hasWinCFI(), to verify this assumption. 1819 HasWinCFI = true; 1820 BuildMI(MBB, LastPopI, DL, TII->get(AArch64::SEH_EpilogStart)) 1821 .setMIFlag(MachineInstr::FrameDestroy); 1822 } 1823 1824 if (hasFP(MF) && AFI->hasSwiftAsyncContext()) { 1825 // We need to reset FP to its untagged state on return. Bit 60 is currently 1826 // used to show the presence of an extended frame. 1827 1828 // BIC x29, x29, #0x1000_0000_0000_0000 1829 BuildMI(MBB, MBB.getFirstTerminator(), DL, TII->get(AArch64::ANDXri), 1830 AArch64::FP) 1831 .addUse(AArch64::FP) 1832 .addImm(0x10fe) 1833 .setMIFlag(MachineInstr::FrameDestroy); 1834 } 1835 1836 const StackOffset &SVEStackSize = getSVEStackSize(MF); 1837 1838 // If there is a single SP update, insert it before the ret and we're done. 1839 if (CombineSPBump) { 1840 assert(!SVEStackSize && "Cannot combine SP bump with SVE"); 1841 emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP, 1842 StackOffset::getFixed(NumBytes + (int64_t)AfterCSRPopSize), 1843 TII, MachineInstr::FrameDestroy, false, NeedsWinCFI, 1844 &HasWinCFI); 1845 if (HasWinCFI) 1846 BuildMI(MBB, MBB.getFirstTerminator(), DL, 1847 TII->get(AArch64::SEH_EpilogEnd)) 1848 .setMIFlag(MachineInstr::FrameDestroy); 1849 return; 1850 } 1851 1852 NumBytes -= PrologueSaveSize; 1853 assert(NumBytes >= 0 && "Negative stack allocation size!?"); 1854 1855 // Process the SVE callee-saves to determine what space needs to be 1856 // deallocated. 1857 StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize; 1858 MachineBasicBlock::iterator RestoreBegin = LastPopI, RestoreEnd = LastPopI; 1859 if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) { 1860 RestoreBegin = std::prev(RestoreEnd); 1861 while (RestoreBegin != MBB.begin() && 1862 IsSVECalleeSave(std::prev(RestoreBegin))) 1863 --RestoreBegin; 1864 1865 assert(IsSVECalleeSave(RestoreBegin) && 1866 IsSVECalleeSave(std::prev(RestoreEnd)) && "Unexpected instruction"); 1867 1868 StackOffset CalleeSavedSizeAsOffset = 1869 StackOffset::getScalable(CalleeSavedSize); 1870 DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset; 1871 DeallocateAfter = CalleeSavedSizeAsOffset; 1872 } 1873 1874 // Deallocate the SVE area. 1875 if (SVEStackSize) { 1876 if (AFI->isStackRealigned()) { 1877 if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) 1878 // Set SP to start of SVE callee-save area from which they can 1879 // be reloaded. The code below will deallocate the stack space 1880 // space by moving FP -> SP. 1881 emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::FP, 1882 StackOffset::getScalable(-CalleeSavedSize), TII, 1883 MachineInstr::FrameDestroy); 1884 } else { 1885 if (AFI->getSVECalleeSavedStackSize()) { 1886 // Deallocate the non-SVE locals first before we can deallocate (and 1887 // restore callee saves) from the SVE area. 1888 emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, 1889 StackOffset::getFixed(NumBytes), TII, 1890 MachineInstr::FrameDestroy); 1891 NumBytes = 0; 1892 } 1893 1894 emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, 1895 DeallocateBefore, TII, MachineInstr::FrameDestroy); 1896 1897 emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP, 1898 DeallocateAfter, TII, MachineInstr::FrameDestroy); 1899 } 1900 } 1901 1902 if (!hasFP(MF)) { 1903 bool RedZone = canUseRedZone(MF); 1904 // If this was a redzone leaf function, we don't need to restore the 1905 // stack pointer (but we may need to pop stack args for fastcc). 1906 if (RedZone && AfterCSRPopSize == 0) 1907 return; 1908 1909 bool NoCalleeSaveRestore = PrologueSaveSize == 0; 1910 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes; 1911 if (NoCalleeSaveRestore) 1912 StackRestoreBytes += AfterCSRPopSize; 1913 1914 // If we were able to combine the local stack pop with the argument pop, 1915 // then we're done. 1916 bool Done = NoCalleeSaveRestore || AfterCSRPopSize == 0; 1917 1918 // If we're done after this, make sure to help the load store optimizer. 1919 if (Done) 1920 adaptForLdStOpt(MBB, MBB.getFirstTerminator(), LastPopI); 1921 1922 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, 1923 StackOffset::getFixed(StackRestoreBytes), TII, 1924 MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI); 1925 if (Done) { 1926 if (HasWinCFI) { 1927 BuildMI(MBB, MBB.getFirstTerminator(), DL, 1928 TII->get(AArch64::SEH_EpilogEnd)) 1929 .setMIFlag(MachineInstr::FrameDestroy); 1930 } 1931 return; 1932 } 1933 1934 NumBytes = 0; 1935 } 1936 1937 // Restore the original stack pointer. 1938 // FIXME: Rather than doing the math here, we should instead just use 1939 // non-post-indexed loads for the restores if we aren't actually going to 1940 // be able to save any instructions. 1941 if (!IsFunclet && (MFI.hasVarSizedObjects() || AFI->isStackRealigned())) { 1942 emitFrameOffset( 1943 MBB, LastPopI, DL, AArch64::SP, AArch64::FP, 1944 StackOffset::getFixed(-AFI->getCalleeSaveBaseToFrameRecordOffset()), 1945 TII, MachineInstr::FrameDestroy, false, NeedsWinCFI); 1946 } else if (NumBytes) 1947 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, 1948 StackOffset::getFixed(NumBytes), TII, 1949 MachineInstr::FrameDestroy, false, NeedsWinCFI); 1950 1951 // This must be placed after the callee-save restore code because that code 1952 // assumes the SP is at the same location as it was after the callee-save save 1953 // code in the prologue. 1954 if (AfterCSRPopSize) { 1955 assert(AfterCSRPopSize > 0 && "attempting to reallocate arg stack that an " 1956 "interrupt may have clobbered"); 1957 // Find an insertion point for the first ldp so that it goes before the 1958 // shadow call stack epilog instruction. This ensures that the restore of 1959 // lr from x18 is placed after the restore from sp. 1960 auto FirstSPPopI = MBB.getFirstTerminator(); 1961 while (FirstSPPopI != Begin) { 1962 auto Prev = std::prev(FirstSPPopI); 1963 if (Prev->getOpcode() != AArch64::LDRXpre || 1964 Prev->getOperand(0).getReg() == AArch64::SP) 1965 break; 1966 FirstSPPopI = Prev; 1967 } 1968 1969 adaptForLdStOpt(MBB, FirstSPPopI, LastPopI); 1970 1971 emitFrameOffset(MBB, FirstSPPopI, DL, AArch64::SP, AArch64::SP, 1972 StackOffset::getFixed(AfterCSRPopSize), TII, 1973 MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI); 1974 } 1975 if (HasWinCFI) 1976 BuildMI(MBB, MBB.getFirstTerminator(), DL, TII->get(AArch64::SEH_EpilogEnd)) 1977 .setMIFlag(MachineInstr::FrameDestroy); 1978 } 1979 1980 /// getFrameIndexReference - Provide a base+offset reference to an FI slot for 1981 /// debug info. It's the same as what we use for resolving the code-gen 1982 /// references for now. FIXME: This can go wrong when references are 1983 /// SP-relative and simple call frames aren't used. 1984 StackOffset 1985 AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, 1986 Register &FrameReg) const { 1987 return resolveFrameIndexReference( 1988 MF, FI, FrameReg, 1989 /*PreferFP=*/ 1990 MF.getFunction().hasFnAttribute(Attribute::SanitizeHWAddress), 1991 /*ForSimm=*/false); 1992 } 1993 1994 StackOffset 1995 AArch64FrameLowering::getNonLocalFrameIndexReference(const MachineFunction &MF, 1996 int FI) const { 1997 return StackOffset::getFixed(getSEHFrameIndexOffset(MF, FI)); 1998 } 1999 2000 static StackOffset getFPOffset(const MachineFunction &MF, 2001 int64_t ObjectOffset) { 2002 const auto *AFI = MF.getInfo<AArch64FunctionInfo>(); 2003 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 2004 bool IsWin64 = 2005 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 2006 unsigned FixedObject = 2007 getFixedObjectSize(MF, AFI, IsWin64, /*IsFunclet=*/false); 2008 int64_t CalleeSaveSize = AFI->getCalleeSavedStackSize(MF.getFrameInfo()); 2009 int64_t FPAdjust = 2010 CalleeSaveSize - AFI->getCalleeSaveBaseToFrameRecordOffset(); 2011 return StackOffset::getFixed(ObjectOffset + FixedObject + FPAdjust); 2012 } 2013 2014 static StackOffset getStackOffset(const MachineFunction &MF, 2015 int64_t ObjectOffset) { 2016 const auto &MFI = MF.getFrameInfo(); 2017 return StackOffset::getFixed(ObjectOffset + (int64_t)MFI.getStackSize()); 2018 } 2019 2020 // TODO: This function currently does not work for scalable vectors. 2021 int AArch64FrameLowering::getSEHFrameIndexOffset(const MachineFunction &MF, 2022 int FI) const { 2023 const auto *RegInfo = static_cast<const AArch64RegisterInfo *>( 2024 MF.getSubtarget().getRegisterInfo()); 2025 int ObjectOffset = MF.getFrameInfo().getObjectOffset(FI); 2026 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP 2027 ? getFPOffset(MF, ObjectOffset).getFixed() 2028 : getStackOffset(MF, ObjectOffset).getFixed(); 2029 } 2030 2031 StackOffset AArch64FrameLowering::resolveFrameIndexReference( 2032 const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, 2033 bool ForSimm) const { 2034 const auto &MFI = MF.getFrameInfo(); 2035 int64_t ObjectOffset = MFI.getObjectOffset(FI); 2036 bool isFixed = MFI.isFixedObjectIndex(FI); 2037 bool isSVE = MFI.getStackID(FI) == TargetStackID::ScalableVector; 2038 return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, isSVE, FrameReg, 2039 PreferFP, ForSimm); 2040 } 2041 2042 StackOffset AArch64FrameLowering::resolveFrameOffsetReference( 2043 const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE, 2044 Register &FrameReg, bool PreferFP, bool ForSimm) const { 2045 const auto &MFI = MF.getFrameInfo(); 2046 const auto *RegInfo = static_cast<const AArch64RegisterInfo *>( 2047 MF.getSubtarget().getRegisterInfo()); 2048 const auto *AFI = MF.getInfo<AArch64FunctionInfo>(); 2049 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 2050 2051 int64_t FPOffset = getFPOffset(MF, ObjectOffset).getFixed(); 2052 int64_t Offset = getStackOffset(MF, ObjectOffset).getFixed(); 2053 bool isCSR = 2054 !isFixed && ObjectOffset >= -((int)AFI->getCalleeSavedStackSize(MFI)); 2055 2056 const StackOffset &SVEStackSize = getSVEStackSize(MF); 2057 2058 // Use frame pointer to reference fixed objects. Use it for locals if 2059 // there are VLAs or a dynamically realigned SP (and thus the SP isn't 2060 // reliable as a base). Make sure useFPForScavengingIndex() does the 2061 // right thing for the emergency spill slot. 2062 bool UseFP = false; 2063 if (AFI->hasStackFrame() && !isSVE) { 2064 // We shouldn't prefer using the FP when there is an SVE area 2065 // in between the FP and the non-SVE locals/spills. 2066 PreferFP &= !SVEStackSize; 2067 2068 // Note: Keeping the following as multiple 'if' statements rather than 2069 // merging to a single expression for readability. 2070 // 2071 // Argument access should always use the FP. 2072 if (isFixed) { 2073 UseFP = hasFP(MF); 2074 } else if (isCSR && RegInfo->hasStackRealignment(MF)) { 2075 // References to the CSR area must use FP if we're re-aligning the stack 2076 // since the dynamically-sized alignment padding is between the SP/BP and 2077 // the CSR area. 2078 assert(hasFP(MF) && "Re-aligned stack must have frame pointer"); 2079 UseFP = true; 2080 } else if (hasFP(MF) && !RegInfo->hasStackRealignment(MF)) { 2081 // If the FPOffset is negative and we're producing a signed immediate, we 2082 // have to keep in mind that the available offset range for negative 2083 // offsets is smaller than for positive ones. If an offset is available 2084 // via the FP and the SP, use whichever is closest. 2085 bool FPOffsetFits = !ForSimm || FPOffset >= -256; 2086 PreferFP |= Offset > -FPOffset; 2087 2088 if (MFI.hasVarSizedObjects()) { 2089 // If we have variable sized objects, we can use either FP or BP, as the 2090 // SP offset is unknown. We can use the base pointer if we have one and 2091 // FP is not preferred. If not, we're stuck with using FP. 2092 bool CanUseBP = RegInfo->hasBasePointer(MF); 2093 if (FPOffsetFits && CanUseBP) // Both are ok. Pick the best. 2094 UseFP = PreferFP; 2095 else if (!CanUseBP) // Can't use BP. Forced to use FP. 2096 UseFP = true; 2097 // else we can use BP and FP, but the offset from FP won't fit. 2098 // That will make us scavenge registers which we can probably avoid by 2099 // using BP. If it won't fit for BP either, we'll scavenge anyway. 2100 } else if (FPOffset >= 0) { 2101 // Use SP or FP, whichever gives us the best chance of the offset 2102 // being in range for direct access. If the FPOffset is positive, 2103 // that'll always be best, as the SP will be even further away. 2104 UseFP = true; 2105 } else if (MF.hasEHFunclets() && !RegInfo->hasBasePointer(MF)) { 2106 // Funclets access the locals contained in the parent's stack frame 2107 // via the frame pointer, so we have to use the FP in the parent 2108 // function. 2109 (void) Subtarget; 2110 assert( 2111 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()) && 2112 "Funclets should only be present on Win64"); 2113 UseFP = true; 2114 } else { 2115 // We have the choice between FP and (SP or BP). 2116 if (FPOffsetFits && PreferFP) // If FP is the best fit, use it. 2117 UseFP = true; 2118 } 2119 } 2120 } 2121 2122 assert( 2123 ((isFixed || isCSR) || !RegInfo->hasStackRealignment(MF) || !UseFP) && 2124 "In the presence of dynamic stack pointer realignment, " 2125 "non-argument/CSR objects cannot be accessed through the frame pointer"); 2126 2127 if (isSVE) { 2128 StackOffset FPOffset = 2129 StackOffset::get(-AFI->getCalleeSaveBaseToFrameRecordOffset(), ObjectOffset); 2130 StackOffset SPOffset = 2131 SVEStackSize + 2132 StackOffset::get(MFI.getStackSize() - AFI->getCalleeSavedStackSize(), 2133 ObjectOffset); 2134 // Always use the FP for SVE spills if available and beneficial. 2135 if (hasFP(MF) && (SPOffset.getFixed() || 2136 FPOffset.getScalable() < SPOffset.getScalable() || 2137 RegInfo->hasStackRealignment(MF))) { 2138 FrameReg = RegInfo->getFrameRegister(MF); 2139 return FPOffset; 2140 } 2141 2142 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister() 2143 : (unsigned)AArch64::SP; 2144 return SPOffset; 2145 } 2146 2147 StackOffset ScalableOffset = {}; 2148 if (UseFP && !(isFixed || isCSR)) 2149 ScalableOffset = -SVEStackSize; 2150 if (!UseFP && (isFixed || isCSR)) 2151 ScalableOffset = SVEStackSize; 2152 2153 if (UseFP) { 2154 FrameReg = RegInfo->getFrameRegister(MF); 2155 return StackOffset::getFixed(FPOffset) + ScalableOffset; 2156 } 2157 2158 // Use the base pointer if we have one. 2159 if (RegInfo->hasBasePointer(MF)) 2160 FrameReg = RegInfo->getBaseRegister(); 2161 else { 2162 assert(!MFI.hasVarSizedObjects() && 2163 "Can't use SP when we have var sized objects."); 2164 FrameReg = AArch64::SP; 2165 // If we're using the red zone for this function, the SP won't actually 2166 // be adjusted, so the offsets will be negative. They're also all 2167 // within range of the signed 9-bit immediate instructions. 2168 if (canUseRedZone(MF)) 2169 Offset -= AFI->getLocalStackSize(); 2170 } 2171 2172 return StackOffset::getFixed(Offset) + ScalableOffset; 2173 } 2174 2175 static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) { 2176 // Do not set a kill flag on values that are also marked as live-in. This 2177 // happens with the @llvm-returnaddress intrinsic and with arguments passed in 2178 // callee saved registers. 2179 // Omitting the kill flags is conservatively correct even if the live-in 2180 // is not used after all. 2181 bool IsLiveIn = MF.getRegInfo().isLiveIn(Reg); 2182 return getKillRegState(!IsLiveIn); 2183 } 2184 2185 static bool produceCompactUnwindFrame(MachineFunction &MF) { 2186 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 2187 AttributeList Attrs = MF.getFunction().getAttributes(); 2188 return Subtarget.isTargetMachO() && 2189 !(Subtarget.getTargetLowering()->supportSwiftError() && 2190 Attrs.hasAttrSomewhere(Attribute::SwiftError)) && 2191 MF.getFunction().getCallingConv() != CallingConv::SwiftTail; 2192 } 2193 2194 static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2, 2195 bool NeedsWinCFI, bool IsFirst) { 2196 // If we are generating register pairs for a Windows function that requires 2197 // EH support, then pair consecutive registers only. There are no unwind 2198 // opcodes for saves/restores of non-consectuve register pairs. 2199 // The unwind opcodes are save_regp, save_regp_x, save_fregp, save_frepg_x, 2200 // save_lrpair. 2201 // https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling 2202 2203 if (Reg2 == AArch64::FP) 2204 return true; 2205 if (!NeedsWinCFI) 2206 return false; 2207 if (Reg2 == Reg1 + 1) 2208 return false; 2209 // If pairing a GPR with LR, the pair can be described by the save_lrpair 2210 // opcode. If this is the first register pair, it would end up with a 2211 // predecrement, but there's no save_lrpair_x opcode, so we can only do this 2212 // if LR is paired with something else than the first register. 2213 // The save_lrpair opcode requires the first register to be an odd one. 2214 if (Reg1 >= AArch64::X19 && Reg1 <= AArch64::X27 && 2215 (Reg1 - AArch64::X19) % 2 == 0 && Reg2 == AArch64::LR && !IsFirst) 2216 return false; 2217 return true; 2218 } 2219 2220 /// Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction. 2221 /// WindowsCFI requires that only consecutive registers can be paired. 2222 /// LR and FP need to be allocated together when the frame needs to save 2223 /// the frame-record. This means any other register pairing with LR is invalid. 2224 static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2, 2225 bool UsesWinAAPCS, bool NeedsWinCFI, 2226 bool NeedsFrameRecord, bool IsFirst) { 2227 if (UsesWinAAPCS) 2228 return invalidateWindowsRegisterPairing(Reg1, Reg2, NeedsWinCFI, IsFirst); 2229 2230 // If we need to store the frame record, don't pair any register 2231 // with LR other than FP. 2232 if (NeedsFrameRecord) 2233 return Reg2 == AArch64::LR; 2234 2235 return false; 2236 } 2237 2238 namespace { 2239 2240 struct RegPairInfo { 2241 unsigned Reg1 = AArch64::NoRegister; 2242 unsigned Reg2 = AArch64::NoRegister; 2243 int FrameIdx; 2244 int Offset; 2245 enum RegType { GPR, FPR64, FPR128, PPR, ZPR } Type; 2246 2247 RegPairInfo() = default; 2248 2249 bool isPaired() const { return Reg2 != AArch64::NoRegister; } 2250 2251 unsigned getScale() const { 2252 switch (Type) { 2253 case PPR: 2254 return 2; 2255 case GPR: 2256 case FPR64: 2257 return 8; 2258 case ZPR: 2259 case FPR128: 2260 return 16; 2261 } 2262 llvm_unreachable("Unsupported type"); 2263 } 2264 2265 bool isScalable() const { return Type == PPR || Type == ZPR; } 2266 }; 2267 2268 } // end anonymous namespace 2269 2270 static void computeCalleeSaveRegisterPairs( 2271 MachineFunction &MF, ArrayRef<CalleeSavedInfo> CSI, 2272 const TargetRegisterInfo *TRI, SmallVectorImpl<RegPairInfo> &RegPairs, 2273 bool &NeedShadowCallStackProlog, bool NeedsFrameRecord) { 2274 2275 if (CSI.empty()) 2276 return; 2277 2278 bool IsWindows = isTargetWindows(MF); 2279 bool NeedsWinCFI = needsWinCFI(MF); 2280 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 2281 MachineFrameInfo &MFI = MF.getFrameInfo(); 2282 CallingConv::ID CC = MF.getFunction().getCallingConv(); 2283 unsigned Count = CSI.size(); 2284 (void)CC; 2285 // MachO's compact unwind format relies on all registers being stored in 2286 // pairs. 2287 assert((!produceCompactUnwindFrame(MF) || 2288 CC == CallingConv::PreserveMost || CC == CallingConv::CXX_FAST_TLS || 2289 (Count & 1) == 0) && 2290 "Odd number of callee-saved regs to spill!"); 2291 int ByteOffset = AFI->getCalleeSavedStackSize(); 2292 int StackFillDir = -1; 2293 int RegInc = 1; 2294 unsigned FirstReg = 0; 2295 if (NeedsWinCFI) { 2296 // For WinCFI, fill the stack from the bottom up. 2297 ByteOffset = 0; 2298 StackFillDir = 1; 2299 // As the CSI array is reversed to match PrologEpilogInserter, iterate 2300 // backwards, to pair up registers starting from lower numbered registers. 2301 RegInc = -1; 2302 FirstReg = Count - 1; 2303 } 2304 int ScalableByteOffset = AFI->getSVECalleeSavedStackSize(); 2305 bool NeedGapToAlignStack = AFI->hasCalleeSaveStackFreeSpace(); 2306 2307 // When iterating backwards, the loop condition relies on unsigned wraparound. 2308 for (unsigned i = FirstReg; i < Count; i += RegInc) { 2309 RegPairInfo RPI; 2310 RPI.Reg1 = CSI[i].getReg(); 2311 2312 if (AArch64::GPR64RegClass.contains(RPI.Reg1)) 2313 RPI.Type = RegPairInfo::GPR; 2314 else if (AArch64::FPR64RegClass.contains(RPI.Reg1)) 2315 RPI.Type = RegPairInfo::FPR64; 2316 else if (AArch64::FPR128RegClass.contains(RPI.Reg1)) 2317 RPI.Type = RegPairInfo::FPR128; 2318 else if (AArch64::ZPRRegClass.contains(RPI.Reg1)) 2319 RPI.Type = RegPairInfo::ZPR; 2320 else if (AArch64::PPRRegClass.contains(RPI.Reg1)) 2321 RPI.Type = RegPairInfo::PPR; 2322 else 2323 llvm_unreachable("Unsupported register class."); 2324 2325 // Add the next reg to the pair if it is in the same register class. 2326 if (unsigned(i + RegInc) < Count) { 2327 Register NextReg = CSI[i + RegInc].getReg(); 2328 bool IsFirst = i == FirstReg; 2329 switch (RPI.Type) { 2330 case RegPairInfo::GPR: 2331 if (AArch64::GPR64RegClass.contains(NextReg) && 2332 !invalidateRegisterPairing(RPI.Reg1, NextReg, IsWindows, 2333 NeedsWinCFI, NeedsFrameRecord, IsFirst)) 2334 RPI.Reg2 = NextReg; 2335 break; 2336 case RegPairInfo::FPR64: 2337 if (AArch64::FPR64RegClass.contains(NextReg) && 2338 !invalidateWindowsRegisterPairing(RPI.Reg1, NextReg, NeedsWinCFI, 2339 IsFirst)) 2340 RPI.Reg2 = NextReg; 2341 break; 2342 case RegPairInfo::FPR128: 2343 if (AArch64::FPR128RegClass.contains(NextReg)) 2344 RPI.Reg2 = NextReg; 2345 break; 2346 case RegPairInfo::PPR: 2347 case RegPairInfo::ZPR: 2348 break; 2349 } 2350 } 2351 2352 // If either of the registers to be saved is the lr register, it means that 2353 // we also need to save lr in the shadow call stack. 2354 if ((RPI.Reg1 == AArch64::LR || RPI.Reg2 == AArch64::LR) && 2355 MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack)) { 2356 if (!MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(18)) 2357 report_fatal_error("Must reserve x18 to use shadow call stack"); 2358 NeedShadowCallStackProlog = true; 2359 } 2360 2361 // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI 2362 // list to come in sorted by frame index so that we can issue the store 2363 // pair instructions directly. Assert if we see anything otherwise. 2364 // 2365 // The order of the registers in the list is controlled by 2366 // getCalleeSavedRegs(), so they will always be in-order, as well. 2367 assert((!RPI.isPaired() || 2368 (CSI[i].getFrameIdx() + RegInc == CSI[i + RegInc].getFrameIdx())) && 2369 "Out of order callee saved regs!"); 2370 2371 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP || 2372 RPI.Reg1 == AArch64::LR) && 2373 "FrameRecord must be allocated together with LR"); 2374 2375 // Windows AAPCS has FP and LR reversed. 2376 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP || 2377 RPI.Reg2 == AArch64::LR) && 2378 "FrameRecord must be allocated together with LR"); 2379 2380 // MachO's compact unwind format relies on all registers being stored in 2381 // adjacent register pairs. 2382 assert((!produceCompactUnwindFrame(MF) || 2383 CC == CallingConv::PreserveMost || CC == CallingConv::CXX_FAST_TLS || 2384 (RPI.isPaired() && 2385 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) || 2386 RPI.Reg1 + 1 == RPI.Reg2))) && 2387 "Callee-save registers not saved as adjacent register pair!"); 2388 2389 RPI.FrameIdx = CSI[i].getFrameIdx(); 2390 if (NeedsWinCFI && 2391 RPI.isPaired()) // RPI.FrameIdx must be the lower index of the pair 2392 RPI.FrameIdx = CSI[i + RegInc].getFrameIdx(); 2393 2394 int Scale = RPI.getScale(); 2395 2396 int OffsetPre = RPI.isScalable() ? ScalableByteOffset : ByteOffset; 2397 assert(OffsetPre % Scale == 0); 2398 2399 if (RPI.isScalable()) 2400 ScalableByteOffset += StackFillDir * Scale; 2401 else 2402 ByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale); 2403 2404 // Swift's async context is directly before FP, so allocate an extra 2405 // 8 bytes for it. 2406 if (NeedsFrameRecord && AFI->hasSwiftAsyncContext() && 2407 RPI.Reg2 == AArch64::FP) 2408 ByteOffset += StackFillDir * 8; 2409 2410 assert(!(RPI.isScalable() && RPI.isPaired()) && 2411 "Paired spill/fill instructions don't exist for SVE vectors"); 2412 2413 // Round up size of non-pair to pair size if we need to pad the 2414 // callee-save area to ensure 16-byte alignment. 2415 if (NeedGapToAlignStack && !NeedsWinCFI && 2416 !RPI.isScalable() && RPI.Type != RegPairInfo::FPR128 && 2417 !RPI.isPaired() && ByteOffset % 16 != 0) { 2418 ByteOffset += 8 * StackFillDir; 2419 assert(MFI.getObjectAlign(RPI.FrameIdx) <= Align(16)); 2420 // A stack frame with a gap looks like this, bottom up: 2421 // d9, d8. x21, gap, x20, x19. 2422 // Set extra alignment on the x21 object to create the gap above it. 2423 MFI.setObjectAlignment(RPI.FrameIdx, Align(16)); 2424 NeedGapToAlignStack = false; 2425 } 2426 2427 int OffsetPost = RPI.isScalable() ? ScalableByteOffset : ByteOffset; 2428 assert(OffsetPost % Scale == 0); 2429 // If filling top down (default), we want the offset after incrementing it. 2430 // If fillibg bootom up (WinCFI) we need the original offset. 2431 int Offset = NeedsWinCFI ? OffsetPre : OffsetPost; 2432 2433 // The FP, LR pair goes 8 bytes into our expanded 24-byte slot so that the 2434 // Swift context can directly precede FP. 2435 if (NeedsFrameRecord && AFI->hasSwiftAsyncContext() && 2436 RPI.Reg2 == AArch64::FP) 2437 Offset += 8; 2438 RPI.Offset = Offset / Scale; 2439 2440 assert(((!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) || 2441 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) && 2442 "Offset out of bounds for LDP/STP immediate"); 2443 2444 // Save the offset to frame record so that the FP register can point to the 2445 // innermost frame record (spilled FP and LR registers). 2446 if (NeedsFrameRecord && ((!IsWindows && RPI.Reg1 == AArch64::LR && 2447 RPI.Reg2 == AArch64::FP) || 2448 (IsWindows && RPI.Reg1 == AArch64::FP && 2449 RPI.Reg2 == AArch64::LR))) 2450 AFI->setCalleeSaveBaseToFrameRecordOffset(Offset); 2451 2452 RegPairs.push_back(RPI); 2453 if (RPI.isPaired()) 2454 i += RegInc; 2455 } 2456 if (NeedsWinCFI) { 2457 // If we need an alignment gap in the stack, align the topmost stack 2458 // object. A stack frame with a gap looks like this, bottom up: 2459 // x19, d8. d9, gap. 2460 // Set extra alignment on the topmost stack object (the first element in 2461 // CSI, which goes top down), to create the gap above it. 2462 if (AFI->hasCalleeSaveStackFreeSpace()) 2463 MFI.setObjectAlignment(CSI[0].getFrameIdx(), Align(16)); 2464 // We iterated bottom up over the registers; flip RegPairs back to top 2465 // down order. 2466 std::reverse(RegPairs.begin(), RegPairs.end()); 2467 } 2468 } 2469 2470 bool AArch64FrameLowering::spillCalleeSavedRegisters( 2471 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 2472 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 2473 MachineFunction &MF = *MBB.getParent(); 2474 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 2475 bool NeedsWinCFI = needsWinCFI(MF); 2476 DebugLoc DL; 2477 SmallVector<RegPairInfo, 8> RegPairs; 2478 2479 bool NeedShadowCallStackProlog = false; 2480 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs, 2481 NeedShadowCallStackProlog, hasFP(MF)); 2482 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2483 2484 if (NeedShadowCallStackProlog) { 2485 // Shadow call stack prolog: str x30, [x18], #8 2486 BuildMI(MBB, MI, DL, TII.get(AArch64::STRXpost)) 2487 .addReg(AArch64::X18, RegState::Define) 2488 .addReg(AArch64::LR) 2489 .addReg(AArch64::X18) 2490 .addImm(8) 2491 .setMIFlag(MachineInstr::FrameSetup); 2492 2493 if (NeedsWinCFI) 2494 BuildMI(MBB, MI, DL, TII.get(AArch64::SEH_Nop)) 2495 .setMIFlag(MachineInstr::FrameSetup); 2496 2497 // Emit a CFI instruction that causes 8 to be subtracted from the value of 2498 // x18 when unwinding past this frame. 2499 static const char CFIInst[] = { 2500 dwarf::DW_CFA_val_expression, 2501 18, // register 2502 2, // length 2503 static_cast<char>(unsigned(dwarf::DW_OP_breg18)), 2504 static_cast<char>(-8) & 0x7f, // addend (sleb128) 2505 }; 2506 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createEscape( 2507 nullptr, StringRef(CFIInst, sizeof(CFIInst)))); 2508 BuildMI(MBB, MI, DL, TII.get(AArch64::CFI_INSTRUCTION)) 2509 .addCFIIndex(CFIIndex) 2510 .setMIFlag(MachineInstr::FrameSetup); 2511 2512 // This instruction also makes x18 live-in to the entry block. 2513 MBB.addLiveIn(AArch64::X18); 2514 } 2515 2516 if (homogeneousPrologEpilog(MF)) { 2517 auto MIB = BuildMI(MBB, MI, DL, TII.get(AArch64::HOM_Prolog)) 2518 .setMIFlag(MachineInstr::FrameSetup); 2519 2520 for (auto &RPI : RegPairs) { 2521 MIB.addReg(RPI.Reg1); 2522 MIB.addReg(RPI.Reg2); 2523 2524 // Update register live in. 2525 if (!MRI.isReserved(RPI.Reg1)) 2526 MBB.addLiveIn(RPI.Reg1); 2527 if (!MRI.isReserved(RPI.Reg2)) 2528 MBB.addLiveIn(RPI.Reg2); 2529 } 2530 return true; 2531 } 2532 for (const RegPairInfo &RPI : llvm::reverse(RegPairs)) { 2533 unsigned Reg1 = RPI.Reg1; 2534 unsigned Reg2 = RPI.Reg2; 2535 unsigned StrOpc; 2536 2537 // Issue sequence of spills for cs regs. The first spill may be converted 2538 // to a pre-decrement store later by emitPrologue if the callee-save stack 2539 // area allocation can't be combined with the local stack area allocation. 2540 // For example: 2541 // stp x22, x21, [sp, #0] // addImm(+0) 2542 // stp x20, x19, [sp, #16] // addImm(+2) 2543 // stp fp, lr, [sp, #32] // addImm(+4) 2544 // Rationale: This sequence saves uop updates compared to a sequence of 2545 // pre-increment spills like stp xi,xj,[sp,#-16]! 2546 // Note: Similar rationale and sequence for restores in epilog. 2547 unsigned Size; 2548 Align Alignment; 2549 switch (RPI.Type) { 2550 case RegPairInfo::GPR: 2551 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui; 2552 Size = 8; 2553 Alignment = Align(8); 2554 break; 2555 case RegPairInfo::FPR64: 2556 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui; 2557 Size = 8; 2558 Alignment = Align(8); 2559 break; 2560 case RegPairInfo::FPR128: 2561 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui; 2562 Size = 16; 2563 Alignment = Align(16); 2564 break; 2565 case RegPairInfo::ZPR: 2566 StrOpc = AArch64::STR_ZXI; 2567 Size = 16; 2568 Alignment = Align(16); 2569 break; 2570 case RegPairInfo::PPR: 2571 StrOpc = AArch64::STR_PXI; 2572 Size = 2; 2573 Alignment = Align(2); 2574 break; 2575 } 2576 LLVM_DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI); 2577 if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI); 2578 dbgs() << ") -> fi#(" << RPI.FrameIdx; 2579 if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1; 2580 dbgs() << ")\n"); 2581 2582 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) && 2583 "Windows unwdinding requires a consecutive (FP,LR) pair"); 2584 // Windows unwind codes require consecutive registers if registers are 2585 // paired. Make the switch here, so that the code below will save (x,x+1) 2586 // and not (x+1,x). 2587 unsigned FrameIdxReg1 = RPI.FrameIdx; 2588 unsigned FrameIdxReg2 = RPI.FrameIdx + 1; 2589 if (NeedsWinCFI && RPI.isPaired()) { 2590 std::swap(Reg1, Reg2); 2591 std::swap(FrameIdxReg1, FrameIdxReg2); 2592 } 2593 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc)); 2594 if (!MRI.isReserved(Reg1)) 2595 MBB.addLiveIn(Reg1); 2596 if (RPI.isPaired()) { 2597 if (!MRI.isReserved(Reg2)) 2598 MBB.addLiveIn(Reg2); 2599 MIB.addReg(Reg2, getPrologueDeath(MF, Reg2)); 2600 MIB.addMemOperand(MF.getMachineMemOperand( 2601 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2), 2602 MachineMemOperand::MOStore, Size, Alignment)); 2603 } 2604 MIB.addReg(Reg1, getPrologueDeath(MF, Reg1)) 2605 .addReg(AArch64::SP) 2606 .addImm(RPI.Offset) // [sp, #offset*scale], 2607 // where factor*scale is implicit 2608 .setMIFlag(MachineInstr::FrameSetup); 2609 MIB.addMemOperand(MF.getMachineMemOperand( 2610 MachinePointerInfo::getFixedStack(MF, FrameIdxReg1), 2611 MachineMemOperand::MOStore, Size, Alignment)); 2612 if (NeedsWinCFI) 2613 InsertSEH(MIB, TII, MachineInstr::FrameSetup); 2614 2615 // Update the StackIDs of the SVE stack slots. 2616 MachineFrameInfo &MFI = MF.getFrameInfo(); 2617 if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR) 2618 MFI.setStackID(RPI.FrameIdx, TargetStackID::ScalableVector); 2619 2620 } 2621 return true; 2622 } 2623 2624 bool AArch64FrameLowering::restoreCalleeSavedRegisters( 2625 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 2626 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 2627 MachineFunction &MF = *MBB.getParent(); 2628 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 2629 DebugLoc DL; 2630 SmallVector<RegPairInfo, 8> RegPairs; 2631 bool NeedsWinCFI = needsWinCFI(MF); 2632 2633 if (MI != MBB.end()) 2634 DL = MI->getDebugLoc(); 2635 2636 bool NeedShadowCallStackProlog = false; 2637 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs, 2638 NeedShadowCallStackProlog, hasFP(MF)); 2639 2640 auto EmitMI = [&](const RegPairInfo &RPI) { 2641 unsigned Reg1 = RPI.Reg1; 2642 unsigned Reg2 = RPI.Reg2; 2643 2644 // Issue sequence of restores for cs regs. The last restore may be converted 2645 // to a post-increment load later by emitEpilogue if the callee-save stack 2646 // area allocation can't be combined with the local stack area allocation. 2647 // For example: 2648 // ldp fp, lr, [sp, #32] // addImm(+4) 2649 // ldp x20, x19, [sp, #16] // addImm(+2) 2650 // ldp x22, x21, [sp, #0] // addImm(+0) 2651 // Note: see comment in spillCalleeSavedRegisters() 2652 unsigned LdrOpc; 2653 unsigned Size; 2654 Align Alignment; 2655 switch (RPI.Type) { 2656 case RegPairInfo::GPR: 2657 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui; 2658 Size = 8; 2659 Alignment = Align(8); 2660 break; 2661 case RegPairInfo::FPR64: 2662 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui; 2663 Size = 8; 2664 Alignment = Align(8); 2665 break; 2666 case RegPairInfo::FPR128: 2667 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui; 2668 Size = 16; 2669 Alignment = Align(16); 2670 break; 2671 case RegPairInfo::ZPR: 2672 LdrOpc = AArch64::LDR_ZXI; 2673 Size = 16; 2674 Alignment = Align(16); 2675 break; 2676 case RegPairInfo::PPR: 2677 LdrOpc = AArch64::LDR_PXI; 2678 Size = 2; 2679 Alignment = Align(2); 2680 break; 2681 } 2682 LLVM_DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI); 2683 if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI); 2684 dbgs() << ") -> fi#(" << RPI.FrameIdx; 2685 if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1; 2686 dbgs() << ")\n"); 2687 2688 // Windows unwind codes require consecutive registers if registers are 2689 // paired. Make the switch here, so that the code below will save (x,x+1) 2690 // and not (x+1,x). 2691 unsigned FrameIdxReg1 = RPI.FrameIdx; 2692 unsigned FrameIdxReg2 = RPI.FrameIdx + 1; 2693 if (NeedsWinCFI && RPI.isPaired()) { 2694 std::swap(Reg1, Reg2); 2695 std::swap(FrameIdxReg1, FrameIdxReg2); 2696 } 2697 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc)); 2698 if (RPI.isPaired()) { 2699 MIB.addReg(Reg2, getDefRegState(true)); 2700 MIB.addMemOperand(MF.getMachineMemOperand( 2701 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2), 2702 MachineMemOperand::MOLoad, Size, Alignment)); 2703 } 2704 MIB.addReg(Reg1, getDefRegState(true)) 2705 .addReg(AArch64::SP) 2706 .addImm(RPI.Offset) // [sp, #offset*scale] 2707 // where factor*scale is implicit 2708 .setMIFlag(MachineInstr::FrameDestroy); 2709 MIB.addMemOperand(MF.getMachineMemOperand( 2710 MachinePointerInfo::getFixedStack(MF, FrameIdxReg1), 2711 MachineMemOperand::MOLoad, Size, Alignment)); 2712 if (NeedsWinCFI) 2713 InsertSEH(MIB, TII, MachineInstr::FrameDestroy); 2714 }; 2715 2716 // SVE objects are always restored in reverse order. 2717 for (const RegPairInfo &RPI : reverse(RegPairs)) 2718 if (RPI.isScalable()) 2719 EmitMI(RPI); 2720 2721 if (ReverseCSRRestoreSeq) { 2722 for (const RegPairInfo &RPI : reverse(RegPairs)) 2723 if (!RPI.isScalable()) 2724 EmitMI(RPI); 2725 } else if (homogeneousPrologEpilog(MF, &MBB)) { 2726 auto MIB = BuildMI(MBB, MI, DL, TII.get(AArch64::HOM_Epilog)) 2727 .setMIFlag(MachineInstr::FrameDestroy); 2728 for (auto &RPI : RegPairs) { 2729 MIB.addReg(RPI.Reg1, RegState::Define); 2730 MIB.addReg(RPI.Reg2, RegState::Define); 2731 } 2732 return true; 2733 } else 2734 for (const RegPairInfo &RPI : RegPairs) 2735 if (!RPI.isScalable()) 2736 EmitMI(RPI); 2737 2738 if (NeedShadowCallStackProlog) { 2739 // Shadow call stack epilog: ldr x30, [x18, #-8]! 2740 BuildMI(MBB, MI, DL, TII.get(AArch64::LDRXpre)) 2741 .addReg(AArch64::X18, RegState::Define) 2742 .addReg(AArch64::LR, RegState::Define) 2743 .addReg(AArch64::X18) 2744 .addImm(-8) 2745 .setMIFlag(MachineInstr::FrameDestroy); 2746 } 2747 2748 return true; 2749 } 2750 2751 void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, 2752 BitVector &SavedRegs, 2753 RegScavenger *RS) const { 2754 // All calls are tail calls in GHC calling conv, and functions have no 2755 // prologue/epilogue. 2756 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 2757 return; 2758 2759 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 2760 const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>( 2761 MF.getSubtarget().getRegisterInfo()); 2762 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 2763 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 2764 unsigned UnspilledCSGPR = AArch64::NoRegister; 2765 unsigned UnspilledCSGPRPaired = AArch64::NoRegister; 2766 2767 MachineFrameInfo &MFI = MF.getFrameInfo(); 2768 const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs(); 2769 2770 unsigned BasePointerReg = RegInfo->hasBasePointer(MF) 2771 ? RegInfo->getBaseRegister() 2772 : (unsigned)AArch64::NoRegister; 2773 2774 unsigned ExtraCSSpill = 0; 2775 // Figure out which callee-saved registers to save/restore. 2776 for (unsigned i = 0; CSRegs[i]; ++i) { 2777 const unsigned Reg = CSRegs[i]; 2778 2779 // Add the base pointer register to SavedRegs if it is callee-save. 2780 if (Reg == BasePointerReg) 2781 SavedRegs.set(Reg); 2782 2783 bool RegUsed = SavedRegs.test(Reg); 2784 unsigned PairedReg = AArch64::NoRegister; 2785 if (AArch64::GPR64RegClass.contains(Reg) || 2786 AArch64::FPR64RegClass.contains(Reg) || 2787 AArch64::FPR128RegClass.contains(Reg)) 2788 PairedReg = CSRegs[i ^ 1]; 2789 2790 if (!RegUsed) { 2791 if (AArch64::GPR64RegClass.contains(Reg) && 2792 !RegInfo->isReservedReg(MF, Reg)) { 2793 UnspilledCSGPR = Reg; 2794 UnspilledCSGPRPaired = PairedReg; 2795 } 2796 continue; 2797 } 2798 2799 // MachO's compact unwind format relies on all registers being stored in 2800 // pairs. 2801 // FIXME: the usual format is actually better if unwinding isn't needed. 2802 if (producePairRegisters(MF) && PairedReg != AArch64::NoRegister && 2803 !SavedRegs.test(PairedReg)) { 2804 SavedRegs.set(PairedReg); 2805 if (AArch64::GPR64RegClass.contains(PairedReg) && 2806 !RegInfo->isReservedReg(MF, PairedReg)) 2807 ExtraCSSpill = PairedReg; 2808 } 2809 } 2810 2811 if (MF.getFunction().getCallingConv() == CallingConv::Win64 && 2812 !Subtarget.isTargetWindows()) { 2813 // For Windows calling convention on a non-windows OS, where X18 is treated 2814 // as reserved, back up X18 when entering non-windows code (marked with the 2815 // Windows calling convention) and restore when returning regardless of 2816 // whether the individual function uses it - it might call other functions 2817 // that clobber it. 2818 SavedRegs.set(AArch64::X18); 2819 } 2820 2821 // Calculates the callee saved stack size. 2822 unsigned CSStackSize = 0; 2823 unsigned SVECSStackSize = 0; 2824 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 2825 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2826 for (unsigned Reg : SavedRegs.set_bits()) { 2827 auto RegSize = TRI->getRegSizeInBits(Reg, MRI) / 8; 2828 if (AArch64::PPRRegClass.contains(Reg) || 2829 AArch64::ZPRRegClass.contains(Reg)) 2830 SVECSStackSize += RegSize; 2831 else 2832 CSStackSize += RegSize; 2833 } 2834 2835 // Save number of saved regs, so we can easily update CSStackSize later. 2836 unsigned NumSavedRegs = SavedRegs.count(); 2837 2838 // The frame record needs to be created by saving the appropriate registers 2839 uint64_t EstimatedStackSize = MFI.estimateStackSize(MF); 2840 if (hasFP(MF) || 2841 windowsRequiresStackProbe(MF, EstimatedStackSize + CSStackSize + 16)) { 2842 SavedRegs.set(AArch64::FP); 2843 SavedRegs.set(AArch64::LR); 2844 } 2845 2846 LLVM_DEBUG(dbgs() << "*** determineCalleeSaves\nSaved CSRs:"; 2847 for (unsigned Reg 2848 : SavedRegs.set_bits()) dbgs() 2849 << ' ' << printReg(Reg, RegInfo); 2850 dbgs() << "\n";); 2851 2852 // If any callee-saved registers are used, the frame cannot be eliminated. 2853 int64_t SVEStackSize = 2854 alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16); 2855 bool CanEliminateFrame = (SavedRegs.count() == 0) && !SVEStackSize; 2856 2857 // The CSR spill slots have not been allocated yet, so estimateStackSize 2858 // won't include them. 2859 unsigned EstimatedStackSizeLimit = estimateRSStackSizeLimit(MF); 2860 2861 // Conservatively always assume BigStack when there are SVE spills. 2862 bool BigStack = SVEStackSize || 2863 (EstimatedStackSize + CSStackSize) > EstimatedStackSizeLimit; 2864 if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) 2865 AFI->setHasStackFrame(true); 2866 2867 // Estimate if we might need to scavenge a register at some point in order 2868 // to materialize a stack offset. If so, either spill one additional 2869 // callee-saved register or reserve a special spill slot to facilitate 2870 // register scavenging. If we already spilled an extra callee-saved register 2871 // above to keep the number of spills even, we don't need to do anything else 2872 // here. 2873 if (BigStack) { 2874 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) { 2875 LLVM_DEBUG(dbgs() << "Spilling " << printReg(UnspilledCSGPR, RegInfo) 2876 << " to get a scratch register.\n"); 2877 SavedRegs.set(UnspilledCSGPR); 2878 // MachO's compact unwind format relies on all registers being stored in 2879 // pairs, so if we need to spill one extra for BigStack, then we need to 2880 // store the pair. 2881 if (producePairRegisters(MF)) 2882 SavedRegs.set(UnspilledCSGPRPaired); 2883 ExtraCSSpill = UnspilledCSGPR; 2884 } 2885 2886 // If we didn't find an extra callee-saved register to spill, create 2887 // an emergency spill slot. 2888 if (!ExtraCSSpill || MF.getRegInfo().isPhysRegUsed(ExtraCSSpill)) { 2889 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 2890 const TargetRegisterClass &RC = AArch64::GPR64RegClass; 2891 unsigned Size = TRI->getSpillSize(RC); 2892 Align Alignment = TRI->getSpillAlign(RC); 2893 int FI = MFI.CreateStackObject(Size, Alignment, false); 2894 RS->addScavengingFrameIndex(FI); 2895 LLVM_DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI 2896 << " as the emergency spill slot.\n"); 2897 } 2898 } 2899 2900 // Adding the size of additional 64bit GPR saves. 2901 CSStackSize += 8 * (SavedRegs.count() - NumSavedRegs); 2902 2903 // A Swift asynchronous context extends the frame record with a pointer 2904 // directly before FP. 2905 if (hasFP(MF) && AFI->hasSwiftAsyncContext()) 2906 CSStackSize += 8; 2907 2908 uint64_t AlignedCSStackSize = alignTo(CSStackSize, 16); 2909 LLVM_DEBUG(dbgs() << "Estimated stack frame size: " 2910 << EstimatedStackSize + AlignedCSStackSize 2911 << " bytes.\n"); 2912 2913 assert((!MFI.isCalleeSavedInfoValid() || 2914 AFI->getCalleeSavedStackSize() == AlignedCSStackSize) && 2915 "Should not invalidate callee saved info"); 2916 2917 // Round up to register pair alignment to avoid additional SP adjustment 2918 // instructions. 2919 AFI->setCalleeSavedStackSize(AlignedCSStackSize); 2920 AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize); 2921 AFI->setSVECalleeSavedStackSize(alignTo(SVECSStackSize, 16)); 2922 } 2923 2924 bool AArch64FrameLowering::assignCalleeSavedSpillSlots( 2925 MachineFunction &MF, const TargetRegisterInfo *RegInfo, 2926 std::vector<CalleeSavedInfo> &CSI, unsigned &MinCSFrameIndex, 2927 unsigned &MaxCSFrameIndex) const { 2928 bool NeedsWinCFI = needsWinCFI(MF); 2929 // To match the canonical windows frame layout, reverse the list of 2930 // callee saved registers to get them laid out by PrologEpilogInserter 2931 // in the right order. (PrologEpilogInserter allocates stack objects top 2932 // down. Windows canonical prologs store higher numbered registers at 2933 // the top, thus have the CSI array start from the highest registers.) 2934 if (NeedsWinCFI) 2935 std::reverse(CSI.begin(), CSI.end()); 2936 2937 if (CSI.empty()) 2938 return true; // Early exit if no callee saved registers are modified! 2939 2940 // Now that we know which registers need to be saved and restored, allocate 2941 // stack slots for them. 2942 MachineFrameInfo &MFI = MF.getFrameInfo(); 2943 auto *AFI = MF.getInfo<AArch64FunctionInfo>(); 2944 for (auto &CS : CSI) { 2945 Register Reg = CS.getReg(); 2946 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); 2947 2948 unsigned Size = RegInfo->getSpillSize(*RC); 2949 Align Alignment(RegInfo->getSpillAlign(*RC)); 2950 int FrameIdx = MFI.CreateStackObject(Size, Alignment, true); 2951 CS.setFrameIdx(FrameIdx); 2952 2953 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 2954 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 2955 2956 // Grab 8 bytes below FP for the extended asynchronous frame info. 2957 if (hasFP(MF) && AFI->hasSwiftAsyncContext() && Reg == AArch64::FP) { 2958 FrameIdx = MFI.CreateStackObject(8, Alignment, true); 2959 AFI->setSwiftAsyncContextFrameIdx(FrameIdx); 2960 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 2961 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 2962 } 2963 } 2964 return true; 2965 } 2966 2967 bool AArch64FrameLowering::enableStackSlotScavenging( 2968 const MachineFunction &MF) const { 2969 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 2970 return AFI->hasCalleeSaveStackFreeSpace(); 2971 } 2972 2973 /// returns true if there are any SVE callee saves. 2974 static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, 2975 int &Min, int &Max) { 2976 Min = std::numeric_limits<int>::max(); 2977 Max = std::numeric_limits<int>::min(); 2978 2979 if (!MFI.isCalleeSavedInfoValid()) 2980 return false; 2981 2982 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 2983 for (auto &CS : CSI) { 2984 if (AArch64::ZPRRegClass.contains(CS.getReg()) || 2985 AArch64::PPRRegClass.contains(CS.getReg())) { 2986 assert((Max == std::numeric_limits<int>::min() || 2987 Max + 1 == CS.getFrameIdx()) && 2988 "SVE CalleeSaves are not consecutive"); 2989 2990 Min = std::min(Min, CS.getFrameIdx()); 2991 Max = std::max(Max, CS.getFrameIdx()); 2992 } 2993 } 2994 return Min != std::numeric_limits<int>::max(); 2995 } 2996 2997 // Process all the SVE stack objects and determine offsets for each 2998 // object. If AssignOffsets is true, the offsets get assigned. 2999 // Fills in the first and last callee-saved frame indices into 3000 // Min/MaxCSFrameIndex, respectively. 3001 // Returns the size of the stack. 3002 static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, 3003 int &MinCSFrameIndex, 3004 int &MaxCSFrameIndex, 3005 bool AssignOffsets) { 3006 #ifndef NDEBUG 3007 // First process all fixed stack objects. 3008 for (int I = MFI.getObjectIndexBegin(); I != 0; ++I) 3009 assert(MFI.getStackID(I) != TargetStackID::ScalableVector && 3010 "SVE vectors should never be passed on the stack by value, only by " 3011 "reference."); 3012 #endif 3013 3014 auto Assign = [&MFI](int FI, int64_t Offset) { 3015 LLVM_DEBUG(dbgs() << "alloc FI(" << FI << ") at SP[" << Offset << "]\n"); 3016 MFI.setObjectOffset(FI, Offset); 3017 }; 3018 3019 int64_t Offset = 0; 3020 3021 // Then process all callee saved slots. 3022 if (getSVECalleeSaveSlotRange(MFI, MinCSFrameIndex, MaxCSFrameIndex)) { 3023 // Assign offsets to the callee save slots. 3024 for (int I = MinCSFrameIndex; I <= MaxCSFrameIndex; ++I) { 3025 Offset += MFI.getObjectSize(I); 3026 Offset = alignTo(Offset, MFI.getObjectAlign(I)); 3027 if (AssignOffsets) 3028 Assign(I, -Offset); 3029 } 3030 } 3031 3032 // Ensure that the Callee-save area is aligned to 16bytes. 3033 Offset = alignTo(Offset, Align(16U)); 3034 3035 // Create a buffer of SVE objects to allocate and sort it. 3036 SmallVector<int, 8> ObjectsToAllocate; 3037 // If we have a stack protector, and we've previously decided that we have SVE 3038 // objects on the stack and thus need it to go in the SVE stack area, then it 3039 // needs to go first. 3040 int StackProtectorFI = -1; 3041 if (MFI.hasStackProtectorIndex()) { 3042 StackProtectorFI = MFI.getStackProtectorIndex(); 3043 if (MFI.getStackID(StackProtectorFI) == TargetStackID::ScalableVector) 3044 ObjectsToAllocate.push_back(StackProtectorFI); 3045 } 3046 for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) { 3047 unsigned StackID = MFI.getStackID(I); 3048 if (StackID != TargetStackID::ScalableVector) 3049 continue; 3050 if (I == StackProtectorFI) 3051 continue; 3052 if (MaxCSFrameIndex >= I && I >= MinCSFrameIndex) 3053 continue; 3054 if (MFI.isDeadObjectIndex(I)) 3055 continue; 3056 3057 ObjectsToAllocate.push_back(I); 3058 } 3059 3060 // Allocate all SVE locals and spills 3061 for (unsigned FI : ObjectsToAllocate) { 3062 Align Alignment = MFI.getObjectAlign(FI); 3063 // FIXME: Given that the length of SVE vectors is not necessarily a power of 3064 // two, we'd need to align every object dynamically at runtime if the 3065 // alignment is larger than 16. This is not yet supported. 3066 if (Alignment > Align(16)) 3067 report_fatal_error( 3068 "Alignment of scalable vectors > 16 bytes is not yet supported"); 3069 3070 Offset = alignTo(Offset + MFI.getObjectSize(FI), Alignment); 3071 if (AssignOffsets) 3072 Assign(FI, -Offset); 3073 } 3074 3075 return Offset; 3076 } 3077 3078 int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets( 3079 MachineFrameInfo &MFI) const { 3080 int MinCSFrameIndex, MaxCSFrameIndex; 3081 return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex, false); 3082 } 3083 3084 int64_t AArch64FrameLowering::assignSVEStackObjectOffsets( 3085 MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex) const { 3086 return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex, 3087 true); 3088 } 3089 3090 void AArch64FrameLowering::processFunctionBeforeFrameFinalized( 3091 MachineFunction &MF, RegScavenger *RS) const { 3092 MachineFrameInfo &MFI = MF.getFrameInfo(); 3093 3094 assert(getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown && 3095 "Upwards growing stack unsupported"); 3096 3097 int MinCSFrameIndex, MaxCSFrameIndex; 3098 int64_t SVEStackSize = 3099 assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex); 3100 3101 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 3102 AFI->setStackSizeSVE(alignTo(SVEStackSize, 16U)); 3103 AFI->setMinMaxSVECSFrameIndex(MinCSFrameIndex, MaxCSFrameIndex); 3104 3105 // If this function isn't doing Win64-style C++ EH, we don't need to do 3106 // anything. 3107 if (!MF.hasEHFunclets()) 3108 return; 3109 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 3110 WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo(); 3111 3112 MachineBasicBlock &MBB = MF.front(); 3113 auto MBBI = MBB.begin(); 3114 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) 3115 ++MBBI; 3116 3117 // Create an UnwindHelp object. 3118 // The UnwindHelp object is allocated at the start of the fixed object area 3119 int64_t FixedObject = 3120 getFixedObjectSize(MF, AFI, /*IsWin64*/ true, /*IsFunclet*/ false); 3121 int UnwindHelpFI = MFI.CreateFixedObject(/*Size*/ 8, 3122 /*SPOffset*/ -FixedObject, 3123 /*IsImmutable=*/false); 3124 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI; 3125 3126 // We need to store -2 into the UnwindHelp object at the start of the 3127 // function. 3128 DebugLoc DL; 3129 RS->enterBasicBlockEnd(MBB); 3130 RS->backward(std::prev(MBBI)); 3131 Register DstReg = RS->FindUnusedReg(&AArch64::GPR64commonRegClass); 3132 assert(DstReg && "There must be a free register after frame setup"); 3133 BuildMI(MBB, MBBI, DL, TII.get(AArch64::MOVi64imm), DstReg).addImm(-2); 3134 BuildMI(MBB, MBBI, DL, TII.get(AArch64::STURXi)) 3135 .addReg(DstReg, getKillRegState(true)) 3136 .addFrameIndex(UnwindHelpFI) 3137 .addImm(0); 3138 } 3139 3140 namespace { 3141 struct TagStoreInstr { 3142 MachineInstr *MI; 3143 int64_t Offset, Size; 3144 explicit TagStoreInstr(MachineInstr *MI, int64_t Offset, int64_t Size) 3145 : MI(MI), Offset(Offset), Size(Size) {} 3146 }; 3147 3148 class TagStoreEdit { 3149 MachineFunction *MF; 3150 MachineBasicBlock *MBB; 3151 MachineRegisterInfo *MRI; 3152 // Tag store instructions that are being replaced. 3153 SmallVector<TagStoreInstr, 8> TagStores; 3154 // Combined memref arguments of the above instructions. 3155 SmallVector<MachineMemOperand *, 8> CombinedMemRefs; 3156 3157 // Replace allocation tags in [FrameReg + FrameRegOffset, FrameReg + 3158 // FrameRegOffset + Size) with the address tag of SP. 3159 Register FrameReg; 3160 StackOffset FrameRegOffset; 3161 int64_t Size; 3162 // If not None, move FrameReg to (FrameReg + FrameRegUpdate) at the end. 3163 Optional<int64_t> FrameRegUpdate; 3164 // MIFlags for any FrameReg updating instructions. 3165 unsigned FrameRegUpdateFlags; 3166 3167 // Use zeroing instruction variants. 3168 bool ZeroData; 3169 DebugLoc DL; 3170 3171 void emitUnrolled(MachineBasicBlock::iterator InsertI); 3172 void emitLoop(MachineBasicBlock::iterator InsertI); 3173 3174 public: 3175 TagStoreEdit(MachineBasicBlock *MBB, bool ZeroData) 3176 : MBB(MBB), ZeroData(ZeroData) { 3177 MF = MBB->getParent(); 3178 MRI = &MF->getRegInfo(); 3179 } 3180 // Add an instruction to be replaced. Instructions must be added in the 3181 // ascending order of Offset, and have to be adjacent. 3182 void addInstruction(TagStoreInstr I) { 3183 assert((TagStores.empty() || 3184 TagStores.back().Offset + TagStores.back().Size == I.Offset) && 3185 "Non-adjacent tag store instructions."); 3186 TagStores.push_back(I); 3187 } 3188 void clear() { TagStores.clear(); } 3189 // Emit equivalent code at the given location, and erase the current set of 3190 // instructions. May skip if the replacement is not profitable. May invalidate 3191 // the input iterator and replace it with a valid one. 3192 void emitCode(MachineBasicBlock::iterator &InsertI, 3193 const AArch64FrameLowering *TFI, bool IsLast); 3194 }; 3195 3196 void TagStoreEdit::emitUnrolled(MachineBasicBlock::iterator InsertI) { 3197 const AArch64InstrInfo *TII = 3198 MF->getSubtarget<AArch64Subtarget>().getInstrInfo(); 3199 3200 const int64_t kMinOffset = -256 * 16; 3201 const int64_t kMaxOffset = 255 * 16; 3202 3203 Register BaseReg = FrameReg; 3204 int64_t BaseRegOffsetBytes = FrameRegOffset.getFixed(); 3205 if (BaseRegOffsetBytes < kMinOffset || 3206 BaseRegOffsetBytes + (Size - Size % 32) > kMaxOffset) { 3207 Register ScratchReg = MRI->createVirtualRegister(&AArch64::GPR64RegClass); 3208 emitFrameOffset(*MBB, InsertI, DL, ScratchReg, BaseReg, 3209 StackOffset::getFixed(BaseRegOffsetBytes), TII); 3210 BaseReg = ScratchReg; 3211 BaseRegOffsetBytes = 0; 3212 } 3213 3214 MachineInstr *LastI = nullptr; 3215 while (Size) { 3216 int64_t InstrSize = (Size > 16) ? 32 : 16; 3217 unsigned Opcode = 3218 InstrSize == 16 3219 ? (ZeroData ? AArch64::STZGOffset : AArch64::STGOffset) 3220 : (ZeroData ? AArch64::STZ2GOffset : AArch64::ST2GOffset); 3221 MachineInstr *I = BuildMI(*MBB, InsertI, DL, TII->get(Opcode)) 3222 .addReg(AArch64::SP) 3223 .addReg(BaseReg) 3224 .addImm(BaseRegOffsetBytes / 16) 3225 .setMemRefs(CombinedMemRefs); 3226 // A store to [BaseReg, #0] should go last for an opportunity to fold the 3227 // final SP adjustment in the epilogue. 3228 if (BaseRegOffsetBytes == 0) 3229 LastI = I; 3230 BaseRegOffsetBytes += InstrSize; 3231 Size -= InstrSize; 3232 } 3233 3234 if (LastI) 3235 MBB->splice(InsertI, MBB, LastI); 3236 } 3237 3238 void TagStoreEdit::emitLoop(MachineBasicBlock::iterator InsertI) { 3239 const AArch64InstrInfo *TII = 3240 MF->getSubtarget<AArch64Subtarget>().getInstrInfo(); 3241 3242 Register BaseReg = FrameRegUpdate 3243 ? FrameReg 3244 : MRI->createVirtualRegister(&AArch64::GPR64RegClass); 3245 Register SizeReg = MRI->createVirtualRegister(&AArch64::GPR64RegClass); 3246 3247 emitFrameOffset(*MBB, InsertI, DL, BaseReg, FrameReg, FrameRegOffset, TII); 3248 3249 int64_t LoopSize = Size; 3250 // If the loop size is not a multiple of 32, split off one 16-byte store at 3251 // the end to fold BaseReg update into. 3252 if (FrameRegUpdate && *FrameRegUpdate) 3253 LoopSize -= LoopSize % 32; 3254 MachineInstr *LoopI = BuildMI(*MBB, InsertI, DL, 3255 TII->get(ZeroData ? AArch64::STZGloop_wback 3256 : AArch64::STGloop_wback)) 3257 .addDef(SizeReg) 3258 .addDef(BaseReg) 3259 .addImm(LoopSize) 3260 .addReg(BaseReg) 3261 .setMemRefs(CombinedMemRefs); 3262 if (FrameRegUpdate) 3263 LoopI->setFlags(FrameRegUpdateFlags); 3264 3265 int64_t ExtraBaseRegUpdate = 3266 FrameRegUpdate ? (*FrameRegUpdate - FrameRegOffset.getFixed() - Size) : 0; 3267 if (LoopSize < Size) { 3268 assert(FrameRegUpdate); 3269 assert(Size - LoopSize == 16); 3270 // Tag 16 more bytes at BaseReg and update BaseReg. 3271 BuildMI(*MBB, InsertI, DL, 3272 TII->get(ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex)) 3273 .addDef(BaseReg) 3274 .addReg(BaseReg) 3275 .addReg(BaseReg) 3276 .addImm(1 + ExtraBaseRegUpdate / 16) 3277 .setMemRefs(CombinedMemRefs) 3278 .setMIFlags(FrameRegUpdateFlags); 3279 } else if (ExtraBaseRegUpdate) { 3280 // Update BaseReg. 3281 BuildMI( 3282 *MBB, InsertI, DL, 3283 TII->get(ExtraBaseRegUpdate > 0 ? AArch64::ADDXri : AArch64::SUBXri)) 3284 .addDef(BaseReg) 3285 .addReg(BaseReg) 3286 .addImm(std::abs(ExtraBaseRegUpdate)) 3287 .addImm(0) 3288 .setMIFlags(FrameRegUpdateFlags); 3289 } 3290 } 3291 3292 // Check if *II is a register update that can be merged into STGloop that ends 3293 // at (Reg + Size). RemainingOffset is the required adjustment to Reg after the 3294 // end of the loop. 3295 bool canMergeRegUpdate(MachineBasicBlock::iterator II, unsigned Reg, 3296 int64_t Size, int64_t *TotalOffset) { 3297 MachineInstr &MI = *II; 3298 if ((MI.getOpcode() == AArch64::ADDXri || 3299 MI.getOpcode() == AArch64::SUBXri) && 3300 MI.getOperand(0).getReg() == Reg && MI.getOperand(1).getReg() == Reg) { 3301 unsigned Shift = AArch64_AM::getShiftValue(MI.getOperand(3).getImm()); 3302 int64_t Offset = MI.getOperand(2).getImm() << Shift; 3303 if (MI.getOpcode() == AArch64::SUBXri) 3304 Offset = -Offset; 3305 int64_t AbsPostOffset = std::abs(Offset - Size); 3306 const int64_t kMaxOffset = 3307 0xFFF; // Max encoding for unshifted ADDXri / SUBXri 3308 if (AbsPostOffset <= kMaxOffset && AbsPostOffset % 16 == 0) { 3309 *TotalOffset = Offset; 3310 return true; 3311 } 3312 } 3313 return false; 3314 } 3315 3316 void mergeMemRefs(const SmallVectorImpl<TagStoreInstr> &TSE, 3317 SmallVectorImpl<MachineMemOperand *> &MemRefs) { 3318 MemRefs.clear(); 3319 for (auto &TS : TSE) { 3320 MachineInstr *MI = TS.MI; 3321 // An instruction without memory operands may access anything. Be 3322 // conservative and return an empty list. 3323 if (MI->memoperands_empty()) { 3324 MemRefs.clear(); 3325 return; 3326 } 3327 MemRefs.append(MI->memoperands_begin(), MI->memoperands_end()); 3328 } 3329 } 3330 3331 void TagStoreEdit::emitCode(MachineBasicBlock::iterator &InsertI, 3332 const AArch64FrameLowering *TFI, bool IsLast) { 3333 if (TagStores.empty()) 3334 return; 3335 TagStoreInstr &FirstTagStore = TagStores[0]; 3336 TagStoreInstr &LastTagStore = TagStores[TagStores.size() - 1]; 3337 Size = LastTagStore.Offset - FirstTagStore.Offset + LastTagStore.Size; 3338 DL = TagStores[0].MI->getDebugLoc(); 3339 3340 Register Reg; 3341 FrameRegOffset = TFI->resolveFrameOffsetReference( 3342 *MF, FirstTagStore.Offset, false /*isFixed*/, false /*isSVE*/, Reg, 3343 /*PreferFP=*/false, /*ForSimm=*/true); 3344 FrameReg = Reg; 3345 FrameRegUpdate = None; 3346 3347 mergeMemRefs(TagStores, CombinedMemRefs); 3348 3349 LLVM_DEBUG(dbgs() << "Replacing adjacent STG instructions:\n"; 3350 for (const auto &Instr 3351 : TagStores) { dbgs() << " " << *Instr.MI; }); 3352 3353 // Size threshold where a loop becomes shorter than a linear sequence of 3354 // tagging instructions. 3355 const int kSetTagLoopThreshold = 176; 3356 if (Size < kSetTagLoopThreshold) { 3357 if (TagStores.size() < 2) 3358 return; 3359 emitUnrolled(InsertI); 3360 } else { 3361 MachineInstr *UpdateInstr = nullptr; 3362 int64_t TotalOffset; 3363 if (IsLast) { 3364 // See if we can merge base register update into the STGloop. 3365 // This is done in AArch64LoadStoreOptimizer for "normal" stores, 3366 // but STGloop is way too unusual for that, and also it only 3367 // realistically happens in function epilogue. Also, STGloop is expanded 3368 // before that pass. 3369 if (InsertI != MBB->end() && 3370 canMergeRegUpdate(InsertI, FrameReg, FrameRegOffset.getFixed() + Size, 3371 &TotalOffset)) { 3372 UpdateInstr = &*InsertI++; 3373 LLVM_DEBUG(dbgs() << "Folding SP update into loop:\n " 3374 << *UpdateInstr); 3375 } 3376 } 3377 3378 if (!UpdateInstr && TagStores.size() < 2) 3379 return; 3380 3381 if (UpdateInstr) { 3382 FrameRegUpdate = TotalOffset; 3383 FrameRegUpdateFlags = UpdateInstr->getFlags(); 3384 } 3385 emitLoop(InsertI); 3386 if (UpdateInstr) 3387 UpdateInstr->eraseFromParent(); 3388 } 3389 3390 for (auto &TS : TagStores) 3391 TS.MI->eraseFromParent(); 3392 } 3393 3394 bool isMergeableStackTaggingInstruction(MachineInstr &MI, int64_t &Offset, 3395 int64_t &Size, bool &ZeroData) { 3396 MachineFunction &MF = *MI.getParent()->getParent(); 3397 const MachineFrameInfo &MFI = MF.getFrameInfo(); 3398 3399 unsigned Opcode = MI.getOpcode(); 3400 ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGOffset || 3401 Opcode == AArch64::STZ2GOffset); 3402 3403 if (Opcode == AArch64::STGloop || Opcode == AArch64::STZGloop) { 3404 if (!MI.getOperand(0).isDead() || !MI.getOperand(1).isDead()) 3405 return false; 3406 if (!MI.getOperand(2).isImm() || !MI.getOperand(3).isFI()) 3407 return false; 3408 Offset = MFI.getObjectOffset(MI.getOperand(3).getIndex()); 3409 Size = MI.getOperand(2).getImm(); 3410 return true; 3411 } 3412 3413 if (Opcode == AArch64::STGOffset || Opcode == AArch64::STZGOffset) 3414 Size = 16; 3415 else if (Opcode == AArch64::ST2GOffset || Opcode == AArch64::STZ2GOffset) 3416 Size = 32; 3417 else 3418 return false; 3419 3420 if (MI.getOperand(0).getReg() != AArch64::SP || !MI.getOperand(1).isFI()) 3421 return false; 3422 3423 Offset = MFI.getObjectOffset(MI.getOperand(1).getIndex()) + 3424 16 * MI.getOperand(2).getImm(); 3425 return true; 3426 } 3427 3428 // Detect a run of memory tagging instructions for adjacent stack frame slots, 3429 // and replace them with a shorter instruction sequence: 3430 // * replace STG + STG with ST2G 3431 // * replace STGloop + STGloop with STGloop 3432 // This code needs to run when stack slot offsets are already known, but before 3433 // FrameIndex operands in STG instructions are eliminated. 3434 MachineBasicBlock::iterator tryMergeAdjacentSTG(MachineBasicBlock::iterator II, 3435 const AArch64FrameLowering *TFI, 3436 RegScavenger *RS) { 3437 bool FirstZeroData; 3438 int64_t Size, Offset; 3439 MachineInstr &MI = *II; 3440 MachineBasicBlock *MBB = MI.getParent(); 3441 MachineBasicBlock::iterator NextI = ++II; 3442 if (&MI == &MBB->instr_back()) 3443 return II; 3444 if (!isMergeableStackTaggingInstruction(MI, Offset, Size, FirstZeroData)) 3445 return II; 3446 3447 SmallVector<TagStoreInstr, 4> Instrs; 3448 Instrs.emplace_back(&MI, Offset, Size); 3449 3450 constexpr int kScanLimit = 10; 3451 int Count = 0; 3452 for (MachineBasicBlock::iterator E = MBB->end(); 3453 NextI != E && Count < kScanLimit; ++NextI) { 3454 MachineInstr &MI = *NextI; 3455 bool ZeroData; 3456 int64_t Size, Offset; 3457 // Collect instructions that update memory tags with a FrameIndex operand 3458 // and (when applicable) constant size, and whose output registers are dead 3459 // (the latter is almost always the case in practice). Since these 3460 // instructions effectively have no inputs or outputs, we are free to skip 3461 // any non-aliasing instructions in between without tracking used registers. 3462 if (isMergeableStackTaggingInstruction(MI, Offset, Size, ZeroData)) { 3463 if (ZeroData != FirstZeroData) 3464 break; 3465 Instrs.emplace_back(&MI, Offset, Size); 3466 continue; 3467 } 3468 3469 // Only count non-transient, non-tagging instructions toward the scan 3470 // limit. 3471 if (!MI.isTransient()) 3472 ++Count; 3473 3474 // Just in case, stop before the epilogue code starts. 3475 if (MI.getFlag(MachineInstr::FrameSetup) || 3476 MI.getFlag(MachineInstr::FrameDestroy)) 3477 break; 3478 3479 // Reject anything that may alias the collected instructions. 3480 if (MI.mayLoadOrStore() || MI.hasUnmodeledSideEffects()) 3481 break; 3482 } 3483 3484 // New code will be inserted after the last tagging instruction we've found. 3485 MachineBasicBlock::iterator InsertI = Instrs.back().MI; 3486 InsertI++; 3487 3488 llvm::stable_sort(Instrs, 3489 [](const TagStoreInstr &Left, const TagStoreInstr &Right) { 3490 return Left.Offset < Right.Offset; 3491 }); 3492 3493 // Make sure that we don't have any overlapping stores. 3494 int64_t CurOffset = Instrs[0].Offset; 3495 for (auto &Instr : Instrs) { 3496 if (CurOffset > Instr.Offset) 3497 return NextI; 3498 CurOffset = Instr.Offset + Instr.Size; 3499 } 3500 3501 // Find contiguous runs of tagged memory and emit shorter instruction 3502 // sequencies for them when possible. 3503 TagStoreEdit TSE(MBB, FirstZeroData); 3504 Optional<int64_t> EndOffset; 3505 for (auto &Instr : Instrs) { 3506 if (EndOffset && *EndOffset != Instr.Offset) { 3507 // Found a gap. 3508 TSE.emitCode(InsertI, TFI, /*IsLast = */ false); 3509 TSE.clear(); 3510 } 3511 3512 TSE.addInstruction(Instr); 3513 EndOffset = Instr.Offset + Instr.Size; 3514 } 3515 3516 TSE.emitCode(InsertI, TFI, /*IsLast = */ true); 3517 3518 return InsertI; 3519 } 3520 } // namespace 3521 3522 void AArch64FrameLowering::processFunctionBeforeFrameIndicesReplaced( 3523 MachineFunction &MF, RegScavenger *RS = nullptr) const { 3524 if (StackTaggingMergeSetTag) 3525 for (auto &BB : MF) 3526 for (MachineBasicBlock::iterator II = BB.begin(); II != BB.end();) 3527 II = tryMergeAdjacentSTG(II, this, RS); 3528 } 3529 3530 /// For Win64 AArch64 EH, the offset to the Unwind object is from the SP 3531 /// before the update. This is easily retrieved as it is exactly the offset 3532 /// that is set in processFunctionBeforeFrameFinalized. 3533 StackOffset AArch64FrameLowering::getFrameIndexReferencePreferSP( 3534 const MachineFunction &MF, int FI, Register &FrameReg, 3535 bool IgnoreSPUpdates) const { 3536 const MachineFrameInfo &MFI = MF.getFrameInfo(); 3537 if (IgnoreSPUpdates) { 3538 LLVM_DEBUG(dbgs() << "Offset from the SP for " << FI << " is " 3539 << MFI.getObjectOffset(FI) << "\n"); 3540 FrameReg = AArch64::SP; 3541 return StackOffset::getFixed(MFI.getObjectOffset(FI)); 3542 } 3543 3544 // Go to common code if we cannot provide sp + offset. 3545 if (MFI.hasVarSizedObjects() || 3546 MF.getInfo<AArch64FunctionInfo>()->getStackSizeSVE() || 3547 MF.getSubtarget().getRegisterInfo()->hasStackRealignment(MF)) 3548 return getFrameIndexReference(MF, FI, FrameReg); 3549 3550 FrameReg = AArch64::SP; 3551 return getStackOffset(MF, MFI.getObjectOffset(FI)); 3552 } 3553 3554 /// The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve 3555 /// the parent's frame pointer 3556 unsigned AArch64FrameLowering::getWinEHParentFrameOffset( 3557 const MachineFunction &MF) const { 3558 return 0; 3559 } 3560 3561 /// Funclets only need to account for space for the callee saved registers, 3562 /// as the locals are accounted for in the parent's stack frame. 3563 unsigned AArch64FrameLowering::getWinEHFuncletFrameSize( 3564 const MachineFunction &MF) const { 3565 // This is the size of the pushed CSRs. 3566 unsigned CSSize = 3567 MF.getInfo<AArch64FunctionInfo>()->getCalleeSavedStackSize(); 3568 // This is the amount of stack a funclet needs to allocate. 3569 return alignTo(CSSize + MF.getFrameInfo().getMaxCallFrameSize(), 3570 getStackAlign()); 3571 } 3572 3573 namespace { 3574 struct FrameObject { 3575 bool IsValid = false; 3576 // Index of the object in MFI. 3577 int ObjectIndex = 0; 3578 // Group ID this object belongs to. 3579 int GroupIndex = -1; 3580 // This object should be placed first (closest to SP). 3581 bool ObjectFirst = false; 3582 // This object's group (which always contains the object with 3583 // ObjectFirst==true) should be placed first. 3584 bool GroupFirst = false; 3585 }; 3586 3587 class GroupBuilder { 3588 SmallVector<int, 8> CurrentMembers; 3589 int NextGroupIndex = 0; 3590 std::vector<FrameObject> &Objects; 3591 3592 public: 3593 GroupBuilder(std::vector<FrameObject> &Objects) : Objects(Objects) {} 3594 void AddMember(int Index) { CurrentMembers.push_back(Index); } 3595 void EndCurrentGroup() { 3596 if (CurrentMembers.size() > 1) { 3597 // Create a new group with the current member list. This might remove them 3598 // from their pre-existing groups. That's OK, dealing with overlapping 3599 // groups is too hard and unlikely to make a difference. 3600 LLVM_DEBUG(dbgs() << "group:"); 3601 for (int Index : CurrentMembers) { 3602 Objects[Index].GroupIndex = NextGroupIndex; 3603 LLVM_DEBUG(dbgs() << " " << Index); 3604 } 3605 LLVM_DEBUG(dbgs() << "\n"); 3606 NextGroupIndex++; 3607 } 3608 CurrentMembers.clear(); 3609 } 3610 }; 3611 3612 bool FrameObjectCompare(const FrameObject &A, const FrameObject &B) { 3613 // Objects at a lower index are closer to FP; objects at a higher index are 3614 // closer to SP. 3615 // 3616 // For consistency in our comparison, all invalid objects are placed 3617 // at the end. This also allows us to stop walking when we hit the 3618 // first invalid item after it's all sorted. 3619 // 3620 // The "first" object goes first (closest to SP), followed by the members of 3621 // the "first" group. 3622 // 3623 // The rest are sorted by the group index to keep the groups together. 3624 // Higher numbered groups are more likely to be around longer (i.e. untagged 3625 // in the function epilogue and not at some earlier point). Place them closer 3626 // to SP. 3627 // 3628 // If all else equal, sort by the object index to keep the objects in the 3629 // original order. 3630 return std::make_tuple(!A.IsValid, A.ObjectFirst, A.GroupFirst, A.GroupIndex, 3631 A.ObjectIndex) < 3632 std::make_tuple(!B.IsValid, B.ObjectFirst, B.GroupFirst, B.GroupIndex, 3633 B.ObjectIndex); 3634 } 3635 } // namespace 3636 3637 void AArch64FrameLowering::orderFrameObjects( 3638 const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const { 3639 if (!OrderFrameObjects || ObjectsToAllocate.empty()) 3640 return; 3641 3642 const MachineFrameInfo &MFI = MF.getFrameInfo(); 3643 std::vector<FrameObject> FrameObjects(MFI.getObjectIndexEnd()); 3644 for (auto &Obj : ObjectsToAllocate) { 3645 FrameObjects[Obj].IsValid = true; 3646 FrameObjects[Obj].ObjectIndex = Obj; 3647 } 3648 3649 // Identify stack slots that are tagged at the same time. 3650 GroupBuilder GB(FrameObjects); 3651 for (auto &MBB : MF) { 3652 for (auto &MI : MBB) { 3653 if (MI.isDebugInstr()) 3654 continue; 3655 int OpIndex; 3656 switch (MI.getOpcode()) { 3657 case AArch64::STGloop: 3658 case AArch64::STZGloop: 3659 OpIndex = 3; 3660 break; 3661 case AArch64::STGOffset: 3662 case AArch64::STZGOffset: 3663 case AArch64::ST2GOffset: 3664 case AArch64::STZ2GOffset: 3665 OpIndex = 1; 3666 break; 3667 default: 3668 OpIndex = -1; 3669 } 3670 3671 int TaggedFI = -1; 3672 if (OpIndex >= 0) { 3673 const MachineOperand &MO = MI.getOperand(OpIndex); 3674 if (MO.isFI()) { 3675 int FI = MO.getIndex(); 3676 if (FI >= 0 && FI < MFI.getObjectIndexEnd() && 3677 FrameObjects[FI].IsValid) 3678 TaggedFI = FI; 3679 } 3680 } 3681 3682 // If this is a stack tagging instruction for a slot that is not part of a 3683 // group yet, either start a new group or add it to the current one. 3684 if (TaggedFI >= 0) 3685 GB.AddMember(TaggedFI); 3686 else 3687 GB.EndCurrentGroup(); 3688 } 3689 // Groups should never span multiple basic blocks. 3690 GB.EndCurrentGroup(); 3691 } 3692 3693 // If the function's tagged base pointer is pinned to a stack slot, we want to 3694 // put that slot first when possible. This will likely place it at SP + 0, 3695 // and save one instruction when generating the base pointer because IRG does 3696 // not allow an immediate offset. 3697 const AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>(); 3698 Optional<int> TBPI = AFI.getTaggedBasePointerIndex(); 3699 if (TBPI) { 3700 FrameObjects[*TBPI].ObjectFirst = true; 3701 FrameObjects[*TBPI].GroupFirst = true; 3702 int FirstGroupIndex = FrameObjects[*TBPI].GroupIndex; 3703 if (FirstGroupIndex >= 0) 3704 for (FrameObject &Object : FrameObjects) 3705 if (Object.GroupIndex == FirstGroupIndex) 3706 Object.GroupFirst = true; 3707 } 3708 3709 llvm::stable_sort(FrameObjects, FrameObjectCompare); 3710 3711 int i = 0; 3712 for (auto &Obj : FrameObjects) { 3713 // All invalid items are sorted at the end, so it's safe to stop. 3714 if (!Obj.IsValid) 3715 break; 3716 ObjectsToAllocate[i++] = Obj.ObjectIndex; 3717 } 3718 3719 LLVM_DEBUG(dbgs() << "Final frame order:\n"; for (auto &Obj 3720 : FrameObjects) { 3721 if (!Obj.IsValid) 3722 break; 3723 dbgs() << " " << Obj.ObjectIndex << ": group " << Obj.GroupIndex; 3724 if (Obj.ObjectFirst) 3725 dbgs() << ", first"; 3726 if (Obj.GroupFirst) 3727 dbgs() << ", group-first"; 3728 dbgs() << "\n"; 3729 }); 3730 } 3731