1 //===- AArch64FrameLowering.cpp - AArch64 Frame Lowering -------*- C++ -*-====// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the AArch64 implementation of TargetFrameLowering class. 10 // 11 // On AArch64, stack frames are structured as follows: 12 // 13 // The stack grows downward. 14 // 15 // All of the individual frame areas on the frame below are optional, i.e. it's 16 // possible to create a function so that the particular area isn't present 17 // in the frame. 18 // 19 // At function entry, the "frame" looks as follows: 20 // 21 // | | Higher address 22 // |-----------------------------------| 23 // | | 24 // | arguments passed on the stack | 25 // | | 26 // |-----------------------------------| <- sp 27 // | | Lower address 28 // 29 // 30 // After the prologue has run, the frame has the following general structure. 31 // Note that this doesn't depict the case where a red-zone is used. Also, 32 // technically the last frame area (VLAs) doesn't get created until in the 33 // main function body, after the prologue is run. However, it's depicted here 34 // for completeness. 35 // 36 // | | Higher address 37 // |-----------------------------------| 38 // | | 39 // | arguments passed on the stack | 40 // | | 41 // |-----------------------------------| 42 // | | 43 // | (Win64 only) varargs from reg | 44 // | | 45 // |-----------------------------------| 46 // | | 47 // | callee-saved gpr registers | <--. 48 // | | | On Darwin platforms these 49 // |- - - - - - - - - - - - - - - - - -| | callee saves are swapped, 50 // | prev_lr | | (frame record first) 51 // | prev_fp | <--' 52 // | async context if needed | 53 // | (a.k.a. "frame record") | 54 // |-----------------------------------| <- fp(=x29) 55 // | | 56 // | callee-saved fp/simd/SVE regs | 57 // | | 58 // |-----------------------------------| 59 // | | 60 // | SVE stack objects | 61 // | | 62 // |-----------------------------------| 63 // |.empty.space.to.make.part.below....| 64 // |.aligned.in.case.it.needs.more.than| (size of this area is unknown at 65 // |.the.standard.16-byte.alignment....| compile time; if present) 66 // |-----------------------------------| 67 // | | 68 // | local variables of fixed size | 69 // | including spill slots | 70 // |-----------------------------------| <- bp(not defined by ABI, 71 // |.variable-sized.local.variables....| LLVM chooses X19) 72 // |.(VLAs)............................| (size of this area is unknown at 73 // |...................................| compile time) 74 // |-----------------------------------| <- sp 75 // | | Lower address 76 // 77 // 78 // To access the data in a frame, at-compile time, a constant offset must be 79 // computable from one of the pointers (fp, bp, sp) to access it. The size 80 // of the areas with a dotted background cannot be computed at compile-time 81 // if they are present, making it required to have all three of fp, bp and 82 // sp to be set up to be able to access all contents in the frame areas, 83 // assuming all of the frame areas are non-empty. 84 // 85 // For most functions, some of the frame areas are empty. For those functions, 86 // it may not be necessary to set up fp or bp: 87 // * A base pointer is definitely needed when there are both VLAs and local 88 // variables with more-than-default alignment requirements. 89 // * A frame pointer is definitely needed when there are local variables with 90 // more-than-default alignment requirements. 91 // 92 // For Darwin platforms the frame-record (fp, lr) is stored at the top of the 93 // callee-saved area, since the unwind encoding does not allow for encoding 94 // this dynamically and existing tools depend on this layout. For other 95 // platforms, the frame-record is stored at the bottom of the (gpr) callee-saved 96 // area to allow SVE stack objects (allocated directly below the callee-saves, 97 // if available) to be accessed directly from the framepointer. 98 // The SVE spill/fill instructions have VL-scaled addressing modes such 99 // as: 100 // ldr z8, [fp, #-7 mul vl] 101 // For SVE the size of the vector length (VL) is not known at compile-time, so 102 // '#-7 mul vl' is an offset that can only be evaluated at runtime. With this 103 // layout, we don't need to add an unscaled offset to the framepointer before 104 // accessing the SVE object in the frame. 105 // 106 // In some cases when a base pointer is not strictly needed, it is generated 107 // anyway when offsets from the frame pointer to access local variables become 108 // so large that the offset can't be encoded in the immediate fields of loads 109 // or stores. 110 // 111 // Outgoing function arguments must be at the bottom of the stack frame when 112 // calling another function. If we do not have variable-sized stack objects, we 113 // can allocate a "reserved call frame" area at the bottom of the local 114 // variable area, large enough for all outgoing calls. If we do have VLAs, then 115 // the stack pointer must be decremented and incremented around each call to 116 // make space for the arguments below the VLAs. 117 // 118 // FIXME: also explain the redzone concept. 119 // 120 // An example of the prologue: 121 // 122 // .globl __foo 123 // .align 2 124 // __foo: 125 // Ltmp0: 126 // .cfi_startproc 127 // .cfi_personality 155, ___gxx_personality_v0 128 // Leh_func_begin: 129 // .cfi_lsda 16, Lexception33 130 // 131 // stp xa,bx, [sp, -#offset]! 132 // ... 133 // stp x28, x27, [sp, #offset-32] 134 // stp fp, lr, [sp, #offset-16] 135 // add fp, sp, #offset - 16 136 // sub sp, sp, #1360 137 // 138 // The Stack: 139 // +-------------------------------------------+ 140 // 10000 | ........ | ........ | ........ | ........ | 141 // 10004 | ........ | ........ | ........ | ........ | 142 // +-------------------------------------------+ 143 // 10008 | ........ | ........ | ........ | ........ | 144 // 1000c | ........ | ........ | ........ | ........ | 145 // +===========================================+ 146 // 10010 | X28 Register | 147 // 10014 | X28 Register | 148 // +-------------------------------------------+ 149 // 10018 | X27 Register | 150 // 1001c | X27 Register | 151 // +===========================================+ 152 // 10020 | Frame Pointer | 153 // 10024 | Frame Pointer | 154 // +-------------------------------------------+ 155 // 10028 | Link Register | 156 // 1002c | Link Register | 157 // +===========================================+ 158 // 10030 | ........ | ........ | ........ | ........ | 159 // 10034 | ........ | ........ | ........ | ........ | 160 // +-------------------------------------------+ 161 // 10038 | ........ | ........ | ........ | ........ | 162 // 1003c | ........ | ........ | ........ | ........ | 163 // +-------------------------------------------+ 164 // 165 // [sp] = 10030 :: >>initial value<< 166 // sp = 10020 :: stp fp, lr, [sp, #-16]! 167 // fp = sp == 10020 :: mov fp, sp 168 // [sp] == 10020 :: stp x28, x27, [sp, #-16]! 169 // sp == 10010 :: >>final value<< 170 // 171 // The frame pointer (w29) points to address 10020. If we use an offset of 172 // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24 173 // for w27, and -32 for w28: 174 // 175 // Ltmp1: 176 // .cfi_def_cfa w29, 16 177 // Ltmp2: 178 // .cfi_offset w30, -8 179 // Ltmp3: 180 // .cfi_offset w29, -16 181 // Ltmp4: 182 // .cfi_offset w27, -24 183 // Ltmp5: 184 // .cfi_offset w28, -32 185 // 186 //===----------------------------------------------------------------------===// 187 188 #include "AArch64FrameLowering.h" 189 #include "AArch64InstrInfo.h" 190 #include "AArch64MachineFunctionInfo.h" 191 #include "AArch64RegisterInfo.h" 192 #include "AArch64Subtarget.h" 193 #include "AArch64TargetMachine.h" 194 #include "MCTargetDesc/AArch64AddressingModes.h" 195 #include "MCTargetDesc/AArch64MCTargetDesc.h" 196 #include "llvm/ADT/ScopeExit.h" 197 #include "llvm/ADT/SmallVector.h" 198 #include "llvm/ADT/Statistic.h" 199 #include "llvm/CodeGen/LivePhysRegs.h" 200 #include "llvm/CodeGen/MachineBasicBlock.h" 201 #include "llvm/CodeGen/MachineFrameInfo.h" 202 #include "llvm/CodeGen/MachineFunction.h" 203 #include "llvm/CodeGen/MachineInstr.h" 204 #include "llvm/CodeGen/MachineInstrBuilder.h" 205 #include "llvm/CodeGen/MachineMemOperand.h" 206 #include "llvm/CodeGen/MachineModuleInfo.h" 207 #include "llvm/CodeGen/MachineOperand.h" 208 #include "llvm/CodeGen/MachineRegisterInfo.h" 209 #include "llvm/CodeGen/RegisterScavenging.h" 210 #include "llvm/CodeGen/TargetInstrInfo.h" 211 #include "llvm/CodeGen/TargetRegisterInfo.h" 212 #include "llvm/CodeGen/TargetSubtargetInfo.h" 213 #include "llvm/CodeGen/WinEHFuncInfo.h" 214 #include "llvm/IR/Attributes.h" 215 #include "llvm/IR/CallingConv.h" 216 #include "llvm/IR/DataLayout.h" 217 #include "llvm/IR/DebugLoc.h" 218 #include "llvm/IR/Function.h" 219 #include "llvm/MC/MCAsmInfo.h" 220 #include "llvm/MC/MCDwarf.h" 221 #include "llvm/Support/CommandLine.h" 222 #include "llvm/Support/Debug.h" 223 #include "llvm/Support/ErrorHandling.h" 224 #include "llvm/Support/MathExtras.h" 225 #include "llvm/Support/raw_ostream.h" 226 #include "llvm/Target/TargetMachine.h" 227 #include "llvm/Target/TargetOptions.h" 228 #include <cassert> 229 #include <cstdint> 230 #include <iterator> 231 #include <optional> 232 #include <vector> 233 234 using namespace llvm; 235 236 #define DEBUG_TYPE "frame-info" 237 238 static cl::opt<bool> EnableRedZone("aarch64-redzone", 239 cl::desc("enable use of redzone on AArch64"), 240 cl::init(false), cl::Hidden); 241 242 static cl::opt<bool> 243 ReverseCSRRestoreSeq("reverse-csr-restore-seq", 244 cl::desc("reverse the CSR restore sequence"), 245 cl::init(false), cl::Hidden); 246 247 static cl::opt<bool> StackTaggingMergeSetTag( 248 "stack-tagging-merge-settag", 249 cl::desc("merge settag instruction in function epilog"), cl::init(true), 250 cl::Hidden); 251 252 static cl::opt<bool> OrderFrameObjects("aarch64-order-frame-objects", 253 cl::desc("sort stack allocations"), 254 cl::init(true), cl::Hidden); 255 256 cl::opt<bool> EnableHomogeneousPrologEpilog( 257 "homogeneous-prolog-epilog", cl::Hidden, 258 cl::desc("Emit homogeneous prologue and epilogue for the size " 259 "optimization (default = off)")); 260 261 STATISTIC(NumRedZoneFunctions, "Number of functions using red zone"); 262 263 /// Returns how much of the incoming argument stack area (in bytes) we should 264 /// clean up in an epilogue. For the C calling convention this will be 0, for 265 /// guaranteed tail call conventions it can be positive (a normal return or a 266 /// tail call to a function that uses less stack space for arguments) or 267 /// negative (for a tail call to a function that needs more stack space than us 268 /// for arguments). 269 static int64_t getArgumentStackToRestore(MachineFunction &MF, 270 MachineBasicBlock &MBB) { 271 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 272 bool IsTailCallReturn = false; 273 if (MBB.end() != MBBI) { 274 unsigned RetOpcode = MBBI->getOpcode(); 275 IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi || 276 RetOpcode == AArch64::TCRETURNri || 277 RetOpcode == AArch64::TCRETURNriBTI; 278 } 279 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 280 281 int64_t ArgumentPopSize = 0; 282 if (IsTailCallReturn) { 283 MachineOperand &StackAdjust = MBBI->getOperand(1); 284 285 // For a tail-call in a callee-pops-arguments environment, some or all of 286 // the stack may actually be in use for the call's arguments, this is 287 // calculated during LowerCall and consumed here... 288 ArgumentPopSize = StackAdjust.getImm(); 289 } else { 290 // ... otherwise the amount to pop is *all* of the argument space, 291 // conveniently stored in the MachineFunctionInfo by 292 // LowerFormalArguments. This will, of course, be zero for the C calling 293 // convention. 294 ArgumentPopSize = AFI->getArgumentStackToRestore(); 295 } 296 297 return ArgumentPopSize; 298 } 299 300 static bool produceCompactUnwindFrame(MachineFunction &MF); 301 static bool needsWinCFI(const MachineFunction &MF); 302 static StackOffset getSVEStackSize(const MachineFunction &MF); 303 static bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF); 304 305 /// Returns true if a homogeneous prolog or epilog code can be emitted 306 /// for the size optimization. If possible, a frame helper call is injected. 307 /// When Exit block is given, this check is for epilog. 308 bool AArch64FrameLowering::homogeneousPrologEpilog( 309 MachineFunction &MF, MachineBasicBlock *Exit) const { 310 if (!MF.getFunction().hasMinSize()) 311 return false; 312 if (!EnableHomogeneousPrologEpilog) 313 return false; 314 if (ReverseCSRRestoreSeq) 315 return false; 316 if (EnableRedZone) 317 return false; 318 319 // TODO: Window is supported yet. 320 if (needsWinCFI(MF)) 321 return false; 322 // TODO: SVE is not supported yet. 323 if (getSVEStackSize(MF)) 324 return false; 325 326 // Bail on stack adjustment needed on return for simplicity. 327 const MachineFrameInfo &MFI = MF.getFrameInfo(); 328 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 329 if (MFI.hasVarSizedObjects() || RegInfo->hasStackRealignment(MF)) 330 return false; 331 if (Exit && getArgumentStackToRestore(MF, *Exit)) 332 return false; 333 334 return true; 335 } 336 337 /// Returns true if CSRs should be paired. 338 bool AArch64FrameLowering::producePairRegisters(MachineFunction &MF) const { 339 return produceCompactUnwindFrame(MF) || homogeneousPrologEpilog(MF); 340 } 341 342 /// This is the biggest offset to the stack pointer we can encode in aarch64 343 /// instructions (without using a separate calculation and a temp register). 344 /// Note that the exception here are vector stores/loads which cannot encode any 345 /// displacements (see estimateRSStackSizeLimit(), isAArch64FrameOffsetLegal()). 346 static const unsigned DefaultSafeSPDisplacement = 255; 347 348 /// Look at each instruction that references stack frames and return the stack 349 /// size limit beyond which some of these instructions will require a scratch 350 /// register during their expansion later. 351 static unsigned estimateRSStackSizeLimit(MachineFunction &MF) { 352 // FIXME: For now, just conservatively guestimate based on unscaled indexing 353 // range. We'll end up allocating an unnecessary spill slot a lot, but 354 // realistically that's not a big deal at this stage of the game. 355 for (MachineBasicBlock &MBB : MF) { 356 for (MachineInstr &MI : MBB) { 357 if (MI.isDebugInstr() || MI.isPseudo() || 358 MI.getOpcode() == AArch64::ADDXri || 359 MI.getOpcode() == AArch64::ADDSXri) 360 continue; 361 362 for (const MachineOperand &MO : MI.operands()) { 363 if (!MO.isFI()) 364 continue; 365 366 StackOffset Offset; 367 if (isAArch64FrameOffsetLegal(MI, Offset, nullptr, nullptr, nullptr) == 368 AArch64FrameOffsetCannotUpdate) 369 return 0; 370 } 371 } 372 } 373 return DefaultSafeSPDisplacement; 374 } 375 376 TargetStackID::Value 377 AArch64FrameLowering::getStackIDForScalableVectors() const { 378 return TargetStackID::ScalableVector; 379 } 380 381 /// Returns the size of the fixed object area (allocated next to sp on entry) 382 /// On Win64 this may include a var args area and an UnwindHelp object for EH. 383 static unsigned getFixedObjectSize(const MachineFunction &MF, 384 const AArch64FunctionInfo *AFI, bool IsWin64, 385 bool IsFunclet) { 386 if (!IsWin64 || IsFunclet) { 387 return AFI->getTailCallReservedStack(); 388 } else { 389 if (AFI->getTailCallReservedStack() != 0) 390 report_fatal_error("cannot generate ABI-changing tail call for Win64"); 391 // Var args are stored here in the primary function. 392 const unsigned VarArgsArea = AFI->getVarArgsGPRSize(); 393 // To support EH funclets we allocate an UnwindHelp object 394 const unsigned UnwindHelpObject = (MF.hasEHFunclets() ? 8 : 0); 395 return alignTo(VarArgsArea + UnwindHelpObject, 16); 396 } 397 } 398 399 /// Returns the size of the entire SVE stackframe (calleesaves + spills). 400 static StackOffset getSVEStackSize(const MachineFunction &MF) { 401 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 402 return StackOffset::getScalable((int64_t)AFI->getStackSizeSVE()); 403 } 404 405 bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { 406 if (!EnableRedZone) 407 return false; 408 409 // Don't use the red zone if the function explicitly asks us not to. 410 // This is typically used for kernel code. 411 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 412 const unsigned RedZoneSize = 413 Subtarget.getTargetLowering()->getRedZoneSize(MF.getFunction()); 414 if (!RedZoneSize) 415 return false; 416 417 const MachineFrameInfo &MFI = MF.getFrameInfo(); 418 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 419 uint64_t NumBytes = AFI->getLocalStackSize(); 420 421 return !(MFI.hasCalls() || hasFP(MF) || NumBytes > RedZoneSize || 422 getSVEStackSize(MF)); 423 } 424 425 /// hasFP - Return true if the specified function should have a dedicated frame 426 /// pointer register. 427 bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const { 428 const MachineFrameInfo &MFI = MF.getFrameInfo(); 429 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 430 // Win64 EH requires a frame pointer if funclets are present, as the locals 431 // are accessed off the frame pointer in both the parent function and the 432 // funclets. 433 if (MF.hasEHFunclets()) 434 return true; 435 // Retain behavior of always omitting the FP for leaf functions when possible. 436 if (MF.getTarget().Options.DisableFramePointerElim(MF)) 437 return true; 438 if (MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || 439 MFI.hasStackMap() || MFI.hasPatchPoint() || 440 RegInfo->hasStackRealignment(MF)) 441 return true; 442 // With large callframes around we may need to use FP to access the scavenging 443 // emergency spillslot. 444 // 445 // Unfortunately some calls to hasFP() like machine verifier -> 446 // getReservedReg() -> hasFP in the middle of global isel are too early 447 // to know the max call frame size. Hopefully conservatively returning "true" 448 // in those cases is fine. 449 // DefaultSafeSPDisplacement is fine as we only emergency spill GP regs. 450 if (!MFI.isMaxCallFrameSizeComputed() || 451 MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement) 452 return true; 453 454 return false; 455 } 456 457 /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is 458 /// not required, we reserve argument space for call sites in the function 459 /// immediately on entry to the current function. This eliminates the need for 460 /// add/sub sp brackets around call sites. Returns true if the call frame is 461 /// included as part of the stack frame. 462 bool 463 AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 464 return !MF.getFrameInfo().hasVarSizedObjects(); 465 } 466 467 MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr( 468 MachineFunction &MF, MachineBasicBlock &MBB, 469 MachineBasicBlock::iterator I) const { 470 const AArch64InstrInfo *TII = 471 static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo()); 472 DebugLoc DL = I->getDebugLoc(); 473 unsigned Opc = I->getOpcode(); 474 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 475 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 476 477 if (!hasReservedCallFrame(MF)) { 478 int64_t Amount = I->getOperand(0).getImm(); 479 Amount = alignTo(Amount, getStackAlign()); 480 if (!IsDestroy) 481 Amount = -Amount; 482 483 // N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it 484 // doesn't have to pop anything), then the first operand will be zero too so 485 // this adjustment is a no-op. 486 if (CalleePopAmount == 0) { 487 // FIXME: in-function stack adjustment for calls is limited to 24-bits 488 // because there's no guaranteed temporary register available. 489 // 490 // ADD/SUB (immediate) has only LSL #0 and LSL #12 available. 491 // 1) For offset <= 12-bit, we use LSL #0 492 // 2) For 12-bit <= offset <= 24-bit, we use two instructions. One uses 493 // LSL #0, and the other uses LSL #12. 494 // 495 // Most call frames will be allocated at the start of a function so 496 // this is OK, but it is a limitation that needs dealing with. 497 assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large"); 498 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, 499 StackOffset::getFixed(Amount), TII); 500 } 501 } else if (CalleePopAmount != 0) { 502 // If the calling convention demands that the callee pops arguments from the 503 // stack, we want to add it back if we have a reserved call frame. 504 assert(CalleePopAmount < 0xffffff && "call frame too large"); 505 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, 506 StackOffset::getFixed(-(int64_t)CalleePopAmount), TII); 507 } 508 return MBB.erase(I); 509 } 510 511 void AArch64FrameLowering::emitCalleeSavedGPRLocations( 512 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 513 MachineFunction &MF = *MBB.getParent(); 514 MachineFrameInfo &MFI = MF.getFrameInfo(); 515 516 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 517 if (CSI.empty()) 518 return; 519 520 const TargetSubtargetInfo &STI = MF.getSubtarget(); 521 const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); 522 const TargetInstrInfo &TII = *STI.getInstrInfo(); 523 DebugLoc DL = MBB.findDebugLoc(MBBI); 524 525 for (const auto &Info : CSI) { 526 if (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector) 527 continue; 528 529 assert(!Info.isSpilledToReg() && "Spilling to registers not implemented"); 530 unsigned DwarfReg = TRI.getDwarfRegNum(Info.getReg(), true); 531 532 int64_t Offset = 533 MFI.getObjectOffset(Info.getFrameIdx()) - getOffsetOfLocalArea(); 534 unsigned CFIIndex = MF.addFrameInst( 535 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset)); 536 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 537 .addCFIIndex(CFIIndex) 538 .setMIFlags(MachineInstr::FrameSetup); 539 } 540 } 541 542 void AArch64FrameLowering::emitCalleeSavedSVELocations( 543 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 544 MachineFunction &MF = *MBB.getParent(); 545 MachineFrameInfo &MFI = MF.getFrameInfo(); 546 547 // Add callee saved registers to move list. 548 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 549 if (CSI.empty()) 550 return; 551 552 const TargetSubtargetInfo &STI = MF.getSubtarget(); 553 const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); 554 const TargetInstrInfo &TII = *STI.getInstrInfo(); 555 DebugLoc DL = MBB.findDebugLoc(MBBI); 556 AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>(); 557 558 for (const auto &Info : CSI) { 559 if (!(MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector)) 560 continue; 561 562 // Not all unwinders may know about SVE registers, so assume the lowest 563 // common demoninator. 564 assert(!Info.isSpilledToReg() && "Spilling to registers not implemented"); 565 unsigned Reg = Info.getReg(); 566 if (!static_cast<const AArch64RegisterInfo &>(TRI).regNeedsCFI(Reg, Reg)) 567 continue; 568 569 StackOffset Offset = 570 StackOffset::getScalable(MFI.getObjectOffset(Info.getFrameIdx())) - 571 StackOffset::getFixed(AFI.getCalleeSavedStackSize(MFI)); 572 573 unsigned CFIIndex = MF.addFrameInst(createCFAOffset(TRI, Reg, Offset)); 574 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 575 .addCFIIndex(CFIIndex) 576 .setMIFlags(MachineInstr::FrameSetup); 577 } 578 } 579 580 static void insertCFISameValue(const MCInstrDesc &Desc, MachineFunction &MF, 581 MachineBasicBlock &MBB, 582 MachineBasicBlock::iterator InsertPt, 583 unsigned DwarfReg) { 584 unsigned CFIIndex = 585 MF.addFrameInst(MCCFIInstruction::createSameValue(nullptr, DwarfReg)); 586 BuildMI(MBB, InsertPt, DebugLoc(), Desc).addCFIIndex(CFIIndex); 587 } 588 589 void AArch64FrameLowering::resetCFIToInitialState( 590 MachineBasicBlock &MBB) const { 591 592 MachineFunction &MF = *MBB.getParent(); 593 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 594 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 595 const auto &TRI = 596 static_cast<const AArch64RegisterInfo &>(*Subtarget.getRegisterInfo()); 597 const auto &MFI = *MF.getInfo<AArch64FunctionInfo>(); 598 599 const MCInstrDesc &CFIDesc = TII.get(TargetOpcode::CFI_INSTRUCTION); 600 DebugLoc DL; 601 602 // Reset the CFA to `SP + 0`. 603 MachineBasicBlock::iterator InsertPt = MBB.begin(); 604 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa( 605 nullptr, TRI.getDwarfRegNum(AArch64::SP, true), 0)); 606 BuildMI(MBB, InsertPt, DL, CFIDesc).addCFIIndex(CFIIndex); 607 608 // Flip the RA sign state. 609 if (MFI.shouldSignReturnAddress(MF)) { 610 CFIIndex = MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr)); 611 BuildMI(MBB, InsertPt, DL, CFIDesc).addCFIIndex(CFIIndex); 612 } 613 614 // Shadow call stack uses X18, reset it. 615 if (needsShadowCallStackPrologueEpilogue(MF)) 616 insertCFISameValue(CFIDesc, MF, MBB, InsertPt, 617 TRI.getDwarfRegNum(AArch64::X18, true)); 618 619 // Emit .cfi_same_value for callee-saved registers. 620 const std::vector<CalleeSavedInfo> &CSI = 621 MF.getFrameInfo().getCalleeSavedInfo(); 622 for (const auto &Info : CSI) { 623 unsigned Reg = Info.getReg(); 624 if (!TRI.regNeedsCFI(Reg, Reg)) 625 continue; 626 insertCFISameValue(CFIDesc, MF, MBB, InsertPt, 627 TRI.getDwarfRegNum(Reg, true)); 628 } 629 } 630 631 static void emitCalleeSavedRestores(MachineBasicBlock &MBB, 632 MachineBasicBlock::iterator MBBI, 633 bool SVE) { 634 MachineFunction &MF = *MBB.getParent(); 635 MachineFrameInfo &MFI = MF.getFrameInfo(); 636 637 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 638 if (CSI.empty()) 639 return; 640 641 const TargetSubtargetInfo &STI = MF.getSubtarget(); 642 const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); 643 const TargetInstrInfo &TII = *STI.getInstrInfo(); 644 DebugLoc DL = MBB.findDebugLoc(MBBI); 645 646 for (const auto &Info : CSI) { 647 if (SVE != 648 (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector)) 649 continue; 650 651 unsigned Reg = Info.getReg(); 652 if (SVE && 653 !static_cast<const AArch64RegisterInfo &>(TRI).regNeedsCFI(Reg, Reg)) 654 continue; 655 656 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createRestore( 657 nullptr, TRI.getDwarfRegNum(Info.getReg(), true))); 658 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 659 .addCFIIndex(CFIIndex) 660 .setMIFlags(MachineInstr::FrameDestroy); 661 } 662 } 663 664 void AArch64FrameLowering::emitCalleeSavedGPRRestores( 665 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 666 emitCalleeSavedRestores(MBB, MBBI, false); 667 } 668 669 void AArch64FrameLowering::emitCalleeSavedSVERestores( 670 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 671 emitCalleeSavedRestores(MBB, MBBI, true); 672 } 673 674 static MCRegister getRegisterOrZero(MCRegister Reg, bool HasSVE) { 675 switch (Reg.id()) { 676 default: 677 // The called routine is expected to preserve r19-r28 678 // r29 and r30 are used as frame pointer and link register resp. 679 return 0; 680 681 // GPRs 682 #define CASE(n) \ 683 case AArch64::W##n: \ 684 case AArch64::X##n: \ 685 return AArch64::X##n 686 CASE(0); 687 CASE(1); 688 CASE(2); 689 CASE(3); 690 CASE(4); 691 CASE(5); 692 CASE(6); 693 CASE(7); 694 CASE(8); 695 CASE(9); 696 CASE(10); 697 CASE(11); 698 CASE(12); 699 CASE(13); 700 CASE(14); 701 CASE(15); 702 CASE(16); 703 CASE(17); 704 CASE(18); 705 #undef CASE 706 707 // FPRs 708 #define CASE(n) \ 709 case AArch64::B##n: \ 710 case AArch64::H##n: \ 711 case AArch64::S##n: \ 712 case AArch64::D##n: \ 713 case AArch64::Q##n: \ 714 return HasSVE ? AArch64::Z##n : AArch64::Q##n 715 CASE(0); 716 CASE(1); 717 CASE(2); 718 CASE(3); 719 CASE(4); 720 CASE(5); 721 CASE(6); 722 CASE(7); 723 CASE(8); 724 CASE(9); 725 CASE(10); 726 CASE(11); 727 CASE(12); 728 CASE(13); 729 CASE(14); 730 CASE(15); 731 CASE(16); 732 CASE(17); 733 CASE(18); 734 CASE(19); 735 CASE(20); 736 CASE(21); 737 CASE(22); 738 CASE(23); 739 CASE(24); 740 CASE(25); 741 CASE(26); 742 CASE(27); 743 CASE(28); 744 CASE(29); 745 CASE(30); 746 CASE(31); 747 #undef CASE 748 } 749 } 750 751 void AArch64FrameLowering::emitZeroCallUsedRegs(BitVector RegsToZero, 752 MachineBasicBlock &MBB) const { 753 // Insertion point. 754 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 755 756 // Fake a debug loc. 757 DebugLoc DL; 758 if (MBBI != MBB.end()) 759 DL = MBBI->getDebugLoc(); 760 761 const MachineFunction &MF = *MBB.getParent(); 762 const AArch64Subtarget &STI = MF.getSubtarget<AArch64Subtarget>(); 763 const AArch64RegisterInfo &TRI = *STI.getRegisterInfo(); 764 765 BitVector GPRsToZero(TRI.getNumRegs()); 766 BitVector FPRsToZero(TRI.getNumRegs()); 767 bool HasSVE = STI.hasSVE(); 768 for (MCRegister Reg : RegsToZero.set_bits()) { 769 if (TRI.isGeneralPurposeRegister(MF, Reg)) { 770 // For GPRs, we only care to clear out the 64-bit register. 771 if (MCRegister XReg = getRegisterOrZero(Reg, HasSVE)) 772 GPRsToZero.set(XReg); 773 } else if (AArch64::FPR128RegClass.contains(Reg) || 774 AArch64::FPR64RegClass.contains(Reg) || 775 AArch64::FPR32RegClass.contains(Reg) || 776 AArch64::FPR16RegClass.contains(Reg) || 777 AArch64::FPR8RegClass.contains(Reg)) { 778 // For FPRs, 779 if (MCRegister XReg = getRegisterOrZero(Reg, HasSVE)) 780 FPRsToZero.set(XReg); 781 } 782 } 783 784 const AArch64InstrInfo &TII = *STI.getInstrInfo(); 785 786 // Zero out GPRs. 787 for (MCRegister Reg : GPRsToZero.set_bits()) 788 BuildMI(MBB, MBBI, DL, TII.get(AArch64::MOVi64imm), Reg).addImm(0); 789 790 // Zero out FP/vector registers. 791 for (MCRegister Reg : FPRsToZero.set_bits()) 792 if (HasSVE) 793 BuildMI(MBB, MBBI, DL, TII.get(AArch64::DUP_ZI_D), Reg) 794 .addImm(0) 795 .addImm(0); 796 else 797 BuildMI(MBB, MBBI, DL, TII.get(AArch64::MOVIv2d_ns), Reg).addImm(0); 798 799 if (HasSVE) { 800 for (MCRegister PReg : 801 {AArch64::P0, AArch64::P1, AArch64::P2, AArch64::P3, AArch64::P4, 802 AArch64::P5, AArch64::P6, AArch64::P7, AArch64::P8, AArch64::P9, 803 AArch64::P10, AArch64::P11, AArch64::P12, AArch64::P13, AArch64::P14, 804 AArch64::P15}) { 805 if (RegsToZero[PReg]) 806 BuildMI(MBB, MBBI, DL, TII.get(AArch64::PFALSE), PReg); 807 } 808 } 809 } 810 811 // Find a scratch register that we can use at the start of the prologue to 812 // re-align the stack pointer. We avoid using callee-save registers since they 813 // may appear to be free when this is called from canUseAsPrologue (during 814 // shrink wrapping), but then no longer be free when this is called from 815 // emitPrologue. 816 // 817 // FIXME: This is a bit conservative, since in the above case we could use one 818 // of the callee-save registers as a scratch temp to re-align the stack pointer, 819 // but we would then have to make sure that we were in fact saving at least one 820 // callee-save register in the prologue, which is additional complexity that 821 // doesn't seem worth the benefit. 822 static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB) { 823 MachineFunction *MF = MBB->getParent(); 824 825 // If MBB is an entry block, use X9 as the scratch register 826 if (&MF->front() == MBB) 827 return AArch64::X9; 828 829 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>(); 830 const AArch64RegisterInfo &TRI = *Subtarget.getRegisterInfo(); 831 LivePhysRegs LiveRegs(TRI); 832 LiveRegs.addLiveIns(*MBB); 833 834 // Mark callee saved registers as used so we will not choose them. 835 const MCPhysReg *CSRegs = MF->getRegInfo().getCalleeSavedRegs(); 836 for (unsigned i = 0; CSRegs[i]; ++i) 837 LiveRegs.addReg(CSRegs[i]); 838 839 // Prefer X9 since it was historically used for the prologue scratch reg. 840 const MachineRegisterInfo &MRI = MF->getRegInfo(); 841 if (LiveRegs.available(MRI, AArch64::X9)) 842 return AArch64::X9; 843 844 for (unsigned Reg : AArch64::GPR64RegClass) { 845 if (LiveRegs.available(MRI, Reg)) 846 return Reg; 847 } 848 return AArch64::NoRegister; 849 } 850 851 bool AArch64FrameLowering::canUseAsPrologue( 852 const MachineBasicBlock &MBB) const { 853 const MachineFunction *MF = MBB.getParent(); 854 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB); 855 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>(); 856 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 857 858 // Don't need a scratch register if we're not going to re-align the stack. 859 if (!RegInfo->hasStackRealignment(*MF)) 860 return true; 861 // Otherwise, we can use any block as long as it has a scratch register 862 // available. 863 return findScratchNonCalleeSaveRegister(TmpMBB) != AArch64::NoRegister; 864 } 865 866 static bool windowsRequiresStackProbe(MachineFunction &MF, 867 uint64_t StackSizeInBytes) { 868 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 869 if (!Subtarget.isTargetWindows()) 870 return false; 871 const Function &F = MF.getFunction(); 872 // TODO: When implementing stack protectors, take that into account 873 // for the probe threshold. 874 unsigned StackProbeSize = 875 F.getFnAttributeAsParsedInteger("stack-probe-size", 4096); 876 return (StackSizeInBytes >= StackProbeSize) && 877 !F.hasFnAttribute("no-stack-arg-probe"); 878 } 879 880 static bool needsWinCFI(const MachineFunction &MF) { 881 const Function &F = MF.getFunction(); 882 return MF.getTarget().getMCAsmInfo()->usesWindowsCFI() && 883 F.needsUnwindTableEntry(); 884 } 885 886 bool AArch64FrameLowering::shouldCombineCSRLocalStackBump( 887 MachineFunction &MF, uint64_t StackBumpBytes) const { 888 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 889 const MachineFrameInfo &MFI = MF.getFrameInfo(); 890 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 891 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 892 if (homogeneousPrologEpilog(MF)) 893 return false; 894 895 if (AFI->getLocalStackSize() == 0) 896 return false; 897 898 // For WinCFI, if optimizing for size, prefer to not combine the stack bump 899 // (to force a stp with predecrement) to match the packed unwind format, 900 // provided that there actually are any callee saved registers to merge the 901 // decrement with. 902 // This is potentially marginally slower, but allows using the packed 903 // unwind format for functions that both have a local area and callee saved 904 // registers. Using the packed unwind format notably reduces the size of 905 // the unwind info. 906 if (needsWinCFI(MF) && AFI->getCalleeSavedStackSize() > 0 && 907 MF.getFunction().hasOptSize()) 908 return false; 909 910 // 512 is the maximum immediate for stp/ldp that will be used for 911 // callee-save save/restores 912 if (StackBumpBytes >= 512 || windowsRequiresStackProbe(MF, StackBumpBytes)) 913 return false; 914 915 if (MFI.hasVarSizedObjects()) 916 return false; 917 918 if (RegInfo->hasStackRealignment(MF)) 919 return false; 920 921 // This isn't strictly necessary, but it simplifies things a bit since the 922 // current RedZone handling code assumes the SP is adjusted by the 923 // callee-save save/restore code. 924 if (canUseRedZone(MF)) 925 return false; 926 927 // When there is an SVE area on the stack, always allocate the 928 // callee-saves and spills/locals separately. 929 if (getSVEStackSize(MF)) 930 return false; 931 932 return true; 933 } 934 935 bool AArch64FrameLowering::shouldCombineCSRLocalStackBumpInEpilogue( 936 MachineBasicBlock &MBB, unsigned StackBumpBytes) const { 937 if (!shouldCombineCSRLocalStackBump(*MBB.getParent(), StackBumpBytes)) 938 return false; 939 940 if (MBB.empty()) 941 return true; 942 943 // Disable combined SP bump if the last instruction is an MTE tag store. It 944 // is almost always better to merge SP adjustment into those instructions. 945 MachineBasicBlock::iterator LastI = MBB.getFirstTerminator(); 946 MachineBasicBlock::iterator Begin = MBB.begin(); 947 while (LastI != Begin) { 948 --LastI; 949 if (LastI->isTransient()) 950 continue; 951 if (!LastI->getFlag(MachineInstr::FrameDestroy)) 952 break; 953 } 954 switch (LastI->getOpcode()) { 955 case AArch64::STGloop: 956 case AArch64::STZGloop: 957 case AArch64::STGi: 958 case AArch64::STZGi: 959 case AArch64::ST2Gi: 960 case AArch64::STZ2Gi: 961 return false; 962 default: 963 return true; 964 } 965 llvm_unreachable("unreachable"); 966 } 967 968 // Given a load or a store instruction, generate an appropriate unwinding SEH 969 // code on Windows. 970 static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI, 971 const TargetInstrInfo &TII, 972 MachineInstr::MIFlag Flag) { 973 unsigned Opc = MBBI->getOpcode(); 974 MachineBasicBlock *MBB = MBBI->getParent(); 975 MachineFunction &MF = *MBB->getParent(); 976 DebugLoc DL = MBBI->getDebugLoc(); 977 unsigned ImmIdx = MBBI->getNumOperands() - 1; 978 int Imm = MBBI->getOperand(ImmIdx).getImm(); 979 MachineInstrBuilder MIB; 980 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 981 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 982 983 switch (Opc) { 984 default: 985 llvm_unreachable("No SEH Opcode for this instruction"); 986 case AArch64::LDPDpost: 987 Imm = -Imm; 988 [[fallthrough]]; 989 case AArch64::STPDpre: { 990 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 991 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(2).getReg()); 992 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFRegP_X)) 993 .addImm(Reg0) 994 .addImm(Reg1) 995 .addImm(Imm * 8) 996 .setMIFlag(Flag); 997 break; 998 } 999 case AArch64::LDPXpost: 1000 Imm = -Imm; 1001 [[fallthrough]]; 1002 case AArch64::STPXpre: { 1003 Register Reg0 = MBBI->getOperand(1).getReg(); 1004 Register Reg1 = MBBI->getOperand(2).getReg(); 1005 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR) 1006 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR_X)) 1007 .addImm(Imm * 8) 1008 .setMIFlag(Flag); 1009 else 1010 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveRegP_X)) 1011 .addImm(RegInfo->getSEHRegNum(Reg0)) 1012 .addImm(RegInfo->getSEHRegNum(Reg1)) 1013 .addImm(Imm * 8) 1014 .setMIFlag(Flag); 1015 break; 1016 } 1017 case AArch64::LDRDpost: 1018 Imm = -Imm; 1019 [[fallthrough]]; 1020 case AArch64::STRDpre: { 1021 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 1022 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg_X)) 1023 .addImm(Reg) 1024 .addImm(Imm) 1025 .setMIFlag(Flag); 1026 break; 1027 } 1028 case AArch64::LDRXpost: 1029 Imm = -Imm; 1030 [[fallthrough]]; 1031 case AArch64::STRXpre: { 1032 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 1033 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg_X)) 1034 .addImm(Reg) 1035 .addImm(Imm) 1036 .setMIFlag(Flag); 1037 break; 1038 } 1039 case AArch64::STPDi: 1040 case AArch64::LDPDi: { 1041 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 1042 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 1043 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFRegP)) 1044 .addImm(Reg0) 1045 .addImm(Reg1) 1046 .addImm(Imm * 8) 1047 .setMIFlag(Flag); 1048 break; 1049 } 1050 case AArch64::STPXi: 1051 case AArch64::LDPXi: { 1052 Register Reg0 = MBBI->getOperand(0).getReg(); 1053 Register Reg1 = MBBI->getOperand(1).getReg(); 1054 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR) 1055 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR)) 1056 .addImm(Imm * 8) 1057 .setMIFlag(Flag); 1058 else 1059 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveRegP)) 1060 .addImm(RegInfo->getSEHRegNum(Reg0)) 1061 .addImm(RegInfo->getSEHRegNum(Reg1)) 1062 .addImm(Imm * 8) 1063 .setMIFlag(Flag); 1064 break; 1065 } 1066 case AArch64::STRXui: 1067 case AArch64::LDRXui: { 1068 int Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 1069 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg)) 1070 .addImm(Reg) 1071 .addImm(Imm * 8) 1072 .setMIFlag(Flag); 1073 break; 1074 } 1075 case AArch64::STRDui: 1076 case AArch64::LDRDui: { 1077 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 1078 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg)) 1079 .addImm(Reg) 1080 .addImm(Imm * 8) 1081 .setMIFlag(Flag); 1082 break; 1083 } 1084 } 1085 auto I = MBB->insertAfter(MBBI, MIB); 1086 return I; 1087 } 1088 1089 // Fix up the SEH opcode associated with the save/restore instruction. 1090 static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, 1091 unsigned LocalStackSize) { 1092 MachineOperand *ImmOpnd = nullptr; 1093 unsigned ImmIdx = MBBI->getNumOperands() - 1; 1094 switch (MBBI->getOpcode()) { 1095 default: 1096 llvm_unreachable("Fix the offset in the SEH instruction"); 1097 case AArch64::SEH_SaveFPLR: 1098 case AArch64::SEH_SaveRegP: 1099 case AArch64::SEH_SaveReg: 1100 case AArch64::SEH_SaveFRegP: 1101 case AArch64::SEH_SaveFReg: 1102 ImmOpnd = &MBBI->getOperand(ImmIdx); 1103 break; 1104 } 1105 if (ImmOpnd) 1106 ImmOpnd->setImm(ImmOpnd->getImm() + LocalStackSize); 1107 } 1108 1109 // Convert callee-save register save/restore instruction to do stack pointer 1110 // decrement/increment to allocate/deallocate the callee-save stack area by 1111 // converting store/load to use pre/post increment version. 1112 static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec( 1113 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 1114 const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc, 1115 bool NeedsWinCFI, bool *HasWinCFI, bool EmitCFI, 1116 MachineInstr::MIFlag FrameFlag = MachineInstr::FrameSetup, 1117 int CFAOffset = 0) { 1118 unsigned NewOpc; 1119 switch (MBBI->getOpcode()) { 1120 default: 1121 llvm_unreachable("Unexpected callee-save save/restore opcode!"); 1122 case AArch64::STPXi: 1123 NewOpc = AArch64::STPXpre; 1124 break; 1125 case AArch64::STPDi: 1126 NewOpc = AArch64::STPDpre; 1127 break; 1128 case AArch64::STPQi: 1129 NewOpc = AArch64::STPQpre; 1130 break; 1131 case AArch64::STRXui: 1132 NewOpc = AArch64::STRXpre; 1133 break; 1134 case AArch64::STRDui: 1135 NewOpc = AArch64::STRDpre; 1136 break; 1137 case AArch64::STRQui: 1138 NewOpc = AArch64::STRQpre; 1139 break; 1140 case AArch64::LDPXi: 1141 NewOpc = AArch64::LDPXpost; 1142 break; 1143 case AArch64::LDPDi: 1144 NewOpc = AArch64::LDPDpost; 1145 break; 1146 case AArch64::LDPQi: 1147 NewOpc = AArch64::LDPQpost; 1148 break; 1149 case AArch64::LDRXui: 1150 NewOpc = AArch64::LDRXpost; 1151 break; 1152 case AArch64::LDRDui: 1153 NewOpc = AArch64::LDRDpost; 1154 break; 1155 case AArch64::LDRQui: 1156 NewOpc = AArch64::LDRQpost; 1157 break; 1158 } 1159 // Get rid of the SEH code associated with the old instruction. 1160 if (NeedsWinCFI) { 1161 auto SEH = std::next(MBBI); 1162 if (AArch64InstrInfo::isSEHInstruction(*SEH)) 1163 SEH->eraseFromParent(); 1164 } 1165 1166 TypeSize Scale = TypeSize::Fixed(1); 1167 unsigned Width; 1168 int64_t MinOffset, MaxOffset; 1169 bool Success = static_cast<const AArch64InstrInfo *>(TII)->getMemOpInfo( 1170 NewOpc, Scale, Width, MinOffset, MaxOffset); 1171 (void)Success; 1172 assert(Success && "unknown load/store opcode"); 1173 1174 // If the first store isn't right where we want SP then we can't fold the 1175 // update in so create a normal arithmetic instruction instead. 1176 MachineFunction &MF = *MBB.getParent(); 1177 if (MBBI->getOperand(MBBI->getNumOperands() - 1).getImm() != 0 || 1178 CSStackSizeInc < MinOffset || CSStackSizeInc > MaxOffset) { 1179 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, 1180 StackOffset::getFixed(CSStackSizeInc), TII, FrameFlag, 1181 false, false, nullptr, EmitCFI, 1182 StackOffset::getFixed(CFAOffset)); 1183 1184 return std::prev(MBBI); 1185 } 1186 1187 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc)); 1188 MIB.addReg(AArch64::SP, RegState::Define); 1189 1190 // Copy all operands other than the immediate offset. 1191 unsigned OpndIdx = 0; 1192 for (unsigned OpndEnd = MBBI->getNumOperands() - 1; OpndIdx < OpndEnd; 1193 ++OpndIdx) 1194 MIB.add(MBBI->getOperand(OpndIdx)); 1195 1196 assert(MBBI->getOperand(OpndIdx).getImm() == 0 && 1197 "Unexpected immediate offset in first/last callee-save save/restore " 1198 "instruction!"); 1199 assert(MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP && 1200 "Unexpected base register in callee-save save/restore instruction!"); 1201 assert(CSStackSizeInc % Scale == 0); 1202 MIB.addImm(CSStackSizeInc / (int)Scale); 1203 1204 MIB.setMIFlags(MBBI->getFlags()); 1205 MIB.setMemRefs(MBBI->memoperands()); 1206 1207 // Generate a new SEH code that corresponds to the new instruction. 1208 if (NeedsWinCFI) { 1209 *HasWinCFI = true; 1210 InsertSEH(*MIB, *TII, FrameFlag); 1211 } 1212 1213 if (EmitCFI) { 1214 unsigned CFIIndex = MF.addFrameInst( 1215 MCCFIInstruction::cfiDefCfaOffset(nullptr, CFAOffset - CSStackSizeInc)); 1216 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1217 .addCFIIndex(CFIIndex) 1218 .setMIFlags(FrameFlag); 1219 } 1220 1221 return std::prev(MBB.erase(MBBI)); 1222 } 1223 1224 // Fixup callee-save register save/restore instructions to take into account 1225 // combined SP bump by adding the local stack size to the stack offsets. 1226 static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, 1227 uint64_t LocalStackSize, 1228 bool NeedsWinCFI, 1229 bool *HasWinCFI) { 1230 if (AArch64InstrInfo::isSEHInstruction(MI)) 1231 return; 1232 1233 unsigned Opc = MI.getOpcode(); 1234 unsigned Scale; 1235 switch (Opc) { 1236 case AArch64::STPXi: 1237 case AArch64::STRXui: 1238 case AArch64::STPDi: 1239 case AArch64::STRDui: 1240 case AArch64::LDPXi: 1241 case AArch64::LDRXui: 1242 case AArch64::LDPDi: 1243 case AArch64::LDRDui: 1244 Scale = 8; 1245 break; 1246 case AArch64::STPQi: 1247 case AArch64::STRQui: 1248 case AArch64::LDPQi: 1249 case AArch64::LDRQui: 1250 Scale = 16; 1251 break; 1252 default: 1253 llvm_unreachable("Unexpected callee-save save/restore opcode!"); 1254 } 1255 1256 unsigned OffsetIdx = MI.getNumExplicitOperands() - 1; 1257 assert(MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP && 1258 "Unexpected base register in callee-save save/restore instruction!"); 1259 // Last operand is immediate offset that needs fixing. 1260 MachineOperand &OffsetOpnd = MI.getOperand(OffsetIdx); 1261 // All generated opcodes have scaled offsets. 1262 assert(LocalStackSize % Scale == 0); 1263 OffsetOpnd.setImm(OffsetOpnd.getImm() + LocalStackSize / Scale); 1264 1265 if (NeedsWinCFI) { 1266 *HasWinCFI = true; 1267 auto MBBI = std::next(MachineBasicBlock::iterator(MI)); 1268 assert(MBBI != MI.getParent()->end() && "Expecting a valid instruction"); 1269 assert(AArch64InstrInfo::isSEHInstruction(*MBBI) && 1270 "Expecting a SEH instruction"); 1271 fixupSEHOpcode(MBBI, LocalStackSize); 1272 } 1273 } 1274 1275 static bool isTargetWindows(const MachineFunction &MF) { 1276 return MF.getSubtarget<AArch64Subtarget>().isTargetWindows(); 1277 } 1278 1279 // Convenience function to determine whether I is an SVE callee save. 1280 static bool IsSVECalleeSave(MachineBasicBlock::iterator I) { 1281 switch (I->getOpcode()) { 1282 default: 1283 return false; 1284 case AArch64::STR_ZXI: 1285 case AArch64::STR_PXI: 1286 case AArch64::LDR_ZXI: 1287 case AArch64::LDR_PXI: 1288 return I->getFlag(MachineInstr::FrameSetup) || 1289 I->getFlag(MachineInstr::FrameDestroy); 1290 } 1291 } 1292 1293 static bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) { 1294 if (!(llvm::any_of( 1295 MF.getFrameInfo().getCalleeSavedInfo(), 1296 [](const auto &Info) { return Info.getReg() == AArch64::LR; }) && 1297 MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack))) 1298 return false; 1299 1300 if (!MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(18)) 1301 report_fatal_error("Must reserve x18 to use shadow call stack"); 1302 1303 return true; 1304 } 1305 1306 static void emitShadowCallStackPrologue(const TargetInstrInfo &TII, 1307 MachineFunction &MF, 1308 MachineBasicBlock &MBB, 1309 MachineBasicBlock::iterator MBBI, 1310 const DebugLoc &DL, bool NeedsWinCFI, 1311 bool NeedsUnwindInfo) { 1312 // Shadow call stack prolog: str x30, [x18], #8 1313 BuildMI(MBB, MBBI, DL, TII.get(AArch64::STRXpost)) 1314 .addReg(AArch64::X18, RegState::Define) 1315 .addReg(AArch64::LR) 1316 .addReg(AArch64::X18) 1317 .addImm(8) 1318 .setMIFlag(MachineInstr::FrameSetup); 1319 1320 // This instruction also makes x18 live-in to the entry block. 1321 MBB.addLiveIn(AArch64::X18); 1322 1323 if (NeedsWinCFI) 1324 BuildMI(MBB, MBBI, DL, TII.get(AArch64::SEH_Nop)) 1325 .setMIFlag(MachineInstr::FrameSetup); 1326 1327 if (NeedsUnwindInfo) { 1328 // Emit a CFI instruction that causes 8 to be subtracted from the value of 1329 // x18 when unwinding past this frame. 1330 static const char CFIInst[] = { 1331 dwarf::DW_CFA_val_expression, 1332 18, // register 1333 2, // length 1334 static_cast<char>(unsigned(dwarf::DW_OP_breg18)), 1335 static_cast<char>(-8) & 0x7f, // addend (sleb128) 1336 }; 1337 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createEscape( 1338 nullptr, StringRef(CFIInst, sizeof(CFIInst)))); 1339 BuildMI(MBB, MBBI, DL, TII.get(AArch64::CFI_INSTRUCTION)) 1340 .addCFIIndex(CFIIndex) 1341 .setMIFlag(MachineInstr::FrameSetup); 1342 } 1343 } 1344 1345 static void emitShadowCallStackEpilogue(const TargetInstrInfo &TII, 1346 MachineFunction &MF, 1347 MachineBasicBlock &MBB, 1348 MachineBasicBlock::iterator MBBI, 1349 const DebugLoc &DL) { 1350 // Shadow call stack epilog: ldr x30, [x18, #-8]! 1351 BuildMI(MBB, MBBI, DL, TII.get(AArch64::LDRXpre)) 1352 .addReg(AArch64::X18, RegState::Define) 1353 .addReg(AArch64::LR, RegState::Define) 1354 .addReg(AArch64::X18) 1355 .addImm(-8) 1356 .setMIFlag(MachineInstr::FrameDestroy); 1357 1358 if (MF.getInfo<AArch64FunctionInfo>()->needsAsyncDwarfUnwindInfo(MF)) { 1359 unsigned CFIIndex = 1360 MF.addFrameInst(MCCFIInstruction::createRestore(nullptr, 18)); 1361 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 1362 .addCFIIndex(CFIIndex) 1363 .setMIFlags(MachineInstr::FrameDestroy); 1364 } 1365 } 1366 1367 // Define the current CFA rule to use the provided FP. 1368 static void emitDefineCFAWithFP(MachineFunction &MF, MachineBasicBlock &MBB, 1369 MachineBasicBlock::iterator MBBI, 1370 const DebugLoc &DL, unsigned FixedObject) { 1371 const AArch64Subtarget &STI = MF.getSubtarget<AArch64Subtarget>(); 1372 const AArch64RegisterInfo *TRI = STI.getRegisterInfo(); 1373 const TargetInstrInfo *TII = STI.getInstrInfo(); 1374 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1375 1376 const int OffsetToFirstCalleeSaveFromFP = 1377 AFI->getCalleeSaveBaseToFrameRecordOffset() - 1378 AFI->getCalleeSavedStackSize(); 1379 Register FramePtr = TRI->getFrameRegister(MF); 1380 unsigned Reg = TRI->getDwarfRegNum(FramePtr, true); 1381 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa( 1382 nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP)); 1383 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1384 .addCFIIndex(CFIIndex) 1385 .setMIFlags(MachineInstr::FrameSetup); 1386 } 1387 1388 void AArch64FrameLowering::emitPrologue(MachineFunction &MF, 1389 MachineBasicBlock &MBB) const { 1390 MachineBasicBlock::iterator MBBI = MBB.begin(); 1391 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1392 const Function &F = MF.getFunction(); 1393 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1394 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 1395 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 1396 MachineModuleInfo &MMI = MF.getMMI(); 1397 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1398 bool EmitCFI = AFI->needsDwarfUnwindInfo(MF); 1399 bool EmitAsyncCFI = AFI->needsAsyncDwarfUnwindInfo(MF); 1400 bool HasFP = hasFP(MF); 1401 bool NeedsWinCFI = needsWinCFI(MF); 1402 bool HasWinCFI = false; 1403 auto Cleanup = make_scope_exit([&]() { MF.setHasWinCFI(HasWinCFI); }); 1404 1405 bool IsFunclet = MBB.isEHFuncletEntry(); 1406 1407 // At this point, we're going to decide whether or not the function uses a 1408 // redzone. In most cases, the function doesn't have a redzone so let's 1409 // assume that's false and set it to true in the case that there's a redzone. 1410 AFI->setHasRedZone(false); 1411 1412 // Debug location must be unknown since the first debug location is used 1413 // to determine the end of the prologue. 1414 DebugLoc DL; 1415 1416 const auto &MFnI = *MF.getInfo<AArch64FunctionInfo>(); 1417 if (needsShadowCallStackPrologueEpilogue(MF)) 1418 emitShadowCallStackPrologue(*TII, MF, MBB, MBBI, DL, NeedsWinCFI, 1419 MFnI.needsDwarfUnwindInfo(MF)); 1420 1421 if (MFnI.shouldSignReturnAddress(MF)) { 1422 if (MFnI.shouldSignWithBKey()) { 1423 BuildMI(MBB, MBBI, DL, TII->get(AArch64::EMITBKEY)) 1424 .setMIFlag(MachineInstr::FrameSetup); 1425 } 1426 1427 // No SEH opcode for this one; it doesn't materialize into an 1428 // instruction on Windows. 1429 BuildMI(MBB, MBBI, DL, 1430 TII->get(MFnI.shouldSignWithBKey() ? AArch64::PACIBSP 1431 : AArch64::PACIASP)) 1432 .setMIFlag(MachineInstr::FrameSetup); 1433 1434 if (EmitCFI) { 1435 unsigned CFIIndex = 1436 MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr)); 1437 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1438 .addCFIIndex(CFIIndex) 1439 .setMIFlags(MachineInstr::FrameSetup); 1440 } else if (NeedsWinCFI) { 1441 HasWinCFI = true; 1442 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PACSignLR)) 1443 .setMIFlag(MachineInstr::FrameSetup); 1444 } 1445 } 1446 if (EmitCFI && MFnI.isMTETagged()) { 1447 BuildMI(MBB, MBBI, DL, TII->get(AArch64::EMITMTETAGGED)) 1448 .setMIFlag(MachineInstr::FrameSetup); 1449 } 1450 1451 // We signal the presence of a Swift extended frame to external tools by 1452 // storing FP with 0b0001 in bits 63:60. In normal userland operation a simple 1453 // ORR is sufficient, it is assumed a Swift kernel would initialize the TBI 1454 // bits so that is still true. 1455 if (HasFP && AFI->hasSwiftAsyncContext()) { 1456 switch (MF.getTarget().Options.SwiftAsyncFramePointer) { 1457 case SwiftAsyncFramePointerMode::DeploymentBased: 1458 if (Subtarget.swiftAsyncContextIsDynamicallySet()) { 1459 // The special symbol below is absolute and has a *value* that can be 1460 // combined with the frame pointer to signal an extended frame. 1461 BuildMI(MBB, MBBI, DL, TII->get(AArch64::LOADgot), AArch64::X16) 1462 .addExternalSymbol("swift_async_extendedFramePointerFlags", 1463 AArch64II::MO_GOT); 1464 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ORRXrs), AArch64::FP) 1465 .addUse(AArch64::FP) 1466 .addUse(AArch64::X16) 1467 .addImm(Subtarget.isTargetILP32() ? 32 : 0); 1468 break; 1469 } 1470 [[fallthrough]]; 1471 1472 case SwiftAsyncFramePointerMode::Always: 1473 // ORR x29, x29, #0x1000_0000_0000_0000 1474 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ORRXri), AArch64::FP) 1475 .addUse(AArch64::FP) 1476 .addImm(0x1100) 1477 .setMIFlag(MachineInstr::FrameSetup); 1478 break; 1479 1480 case SwiftAsyncFramePointerMode::Never: 1481 break; 1482 } 1483 } 1484 1485 // All calls are tail calls in GHC calling conv, and functions have no 1486 // prologue/epilogue. 1487 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 1488 return; 1489 1490 // Set tagged base pointer to the requested stack slot. 1491 // Ideally it should match SP value after prologue. 1492 std::optional<int> TBPI = AFI->getTaggedBasePointerIndex(); 1493 if (TBPI) 1494 AFI->setTaggedBasePointerOffset(-MFI.getObjectOffset(*TBPI)); 1495 else 1496 AFI->setTaggedBasePointerOffset(MFI.getStackSize()); 1497 1498 const StackOffset &SVEStackSize = getSVEStackSize(MF); 1499 1500 // getStackSize() includes all the locals in its size calculation. We don't 1501 // include these locals when computing the stack size of a funclet, as they 1502 // are allocated in the parent's stack frame and accessed via the frame 1503 // pointer from the funclet. We only save the callee saved registers in the 1504 // funclet, which are really the callee saved registers of the parent 1505 // function, including the funclet. 1506 int64_t NumBytes = IsFunclet ? getWinEHFuncletFrameSize(MF) 1507 : MFI.getStackSize(); 1508 if (!AFI->hasStackFrame() && !windowsRequiresStackProbe(MF, NumBytes)) { 1509 assert(!HasFP && "unexpected function without stack frame but with FP"); 1510 assert(!SVEStackSize && 1511 "unexpected function without stack frame but with SVE objects"); 1512 // All of the stack allocation is for locals. 1513 AFI->setLocalStackSize(NumBytes); 1514 if (!NumBytes) 1515 return; 1516 // REDZONE: If the stack size is less than 128 bytes, we don't need 1517 // to actually allocate. 1518 if (canUseRedZone(MF)) { 1519 AFI->setHasRedZone(true); 1520 ++NumRedZoneFunctions; 1521 } else { 1522 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, 1523 StackOffset::getFixed(-NumBytes), TII, 1524 MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI); 1525 if (EmitCFI) { 1526 // Label used to tie together the PROLOG_LABEL and the MachineMoves. 1527 MCSymbol *FrameLabel = MMI.getContext().createTempSymbol(); 1528 // Encode the stack size of the leaf function. 1529 unsigned CFIIndex = MF.addFrameInst( 1530 MCCFIInstruction::cfiDefCfaOffset(FrameLabel, NumBytes)); 1531 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1532 .addCFIIndex(CFIIndex) 1533 .setMIFlags(MachineInstr::FrameSetup); 1534 } 1535 } 1536 1537 if (NeedsWinCFI) { 1538 HasWinCFI = true; 1539 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd)) 1540 .setMIFlag(MachineInstr::FrameSetup); 1541 } 1542 1543 return; 1544 } 1545 1546 bool IsWin64 = 1547 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 1548 unsigned FixedObject = getFixedObjectSize(MF, AFI, IsWin64, IsFunclet); 1549 1550 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; 1551 // All of the remaining stack allocations are for locals. 1552 AFI->setLocalStackSize(NumBytes - PrologueSaveSize); 1553 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes); 1554 bool HomPrologEpilog = homogeneousPrologEpilog(MF); 1555 if (CombineSPBump) { 1556 assert(!SVEStackSize && "Cannot combine SP bump with SVE"); 1557 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, 1558 StackOffset::getFixed(-NumBytes), TII, 1559 MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI, 1560 EmitAsyncCFI); 1561 NumBytes = 0; 1562 } else if (HomPrologEpilog) { 1563 // Stack has been already adjusted. 1564 NumBytes -= PrologueSaveSize; 1565 } else if (PrologueSaveSize != 0) { 1566 MBBI = convertCalleeSaveRestoreToSPPrePostIncDec( 1567 MBB, MBBI, DL, TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI, 1568 EmitAsyncCFI); 1569 NumBytes -= PrologueSaveSize; 1570 } 1571 assert(NumBytes >= 0 && "Negative stack allocation size!?"); 1572 1573 // Move past the saves of the callee-saved registers, fixing up the offsets 1574 // and pre-inc if we decided to combine the callee-save and local stack 1575 // pointer bump above. 1576 MachineBasicBlock::iterator End = MBB.end(); 1577 while (MBBI != End && MBBI->getFlag(MachineInstr::FrameSetup) && 1578 !IsSVECalleeSave(MBBI)) { 1579 if (CombineSPBump) 1580 fixupCalleeSaveRestoreStackOffset(*MBBI, AFI->getLocalStackSize(), 1581 NeedsWinCFI, &HasWinCFI); 1582 ++MBBI; 1583 } 1584 1585 // For funclets the FP belongs to the containing function. 1586 if (!IsFunclet && HasFP) { 1587 // Only set up FP if we actually need to. 1588 int64_t FPOffset = AFI->getCalleeSaveBaseToFrameRecordOffset(); 1589 1590 if (CombineSPBump) 1591 FPOffset += AFI->getLocalStackSize(); 1592 1593 if (AFI->hasSwiftAsyncContext()) { 1594 // Before we update the live FP we have to ensure there's a valid (or 1595 // null) asynchronous context in its slot just before FP in the frame 1596 // record, so store it now. 1597 const auto &Attrs = MF.getFunction().getAttributes(); 1598 bool HaveInitialContext = Attrs.hasAttrSomewhere(Attribute::SwiftAsync); 1599 if (HaveInitialContext) 1600 MBB.addLiveIn(AArch64::X22); 1601 BuildMI(MBB, MBBI, DL, TII->get(AArch64::StoreSwiftAsyncContext)) 1602 .addUse(HaveInitialContext ? AArch64::X22 : AArch64::XZR) 1603 .addUse(AArch64::SP) 1604 .addImm(FPOffset - 8) 1605 .setMIFlags(MachineInstr::FrameSetup); 1606 } 1607 1608 if (HomPrologEpilog) { 1609 auto Prolog = MBBI; 1610 --Prolog; 1611 assert(Prolog->getOpcode() == AArch64::HOM_Prolog); 1612 Prolog->addOperand(MachineOperand::CreateImm(FPOffset)); 1613 } else { 1614 // Issue sub fp, sp, FPOffset or 1615 // mov fp,sp when FPOffset is zero. 1616 // Note: All stores of callee-saved registers are marked as "FrameSetup". 1617 // This code marks the instruction(s) that set the FP also. 1618 emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, 1619 StackOffset::getFixed(FPOffset), TII, 1620 MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI); 1621 if (NeedsWinCFI && HasWinCFI) { 1622 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd)) 1623 .setMIFlag(MachineInstr::FrameSetup); 1624 // After setting up the FP, the rest of the prolog doesn't need to be 1625 // included in the SEH unwind info. 1626 NeedsWinCFI = false; 1627 } 1628 } 1629 if (EmitAsyncCFI) 1630 emitDefineCFAWithFP(MF, MBB, MBBI, DL, FixedObject); 1631 } 1632 1633 // Now emit the moves for whatever callee saved regs we have (including FP, 1634 // LR if those are saved). Frame instructions for SVE register are emitted 1635 // later, after the instruction which actually save SVE regs. 1636 if (EmitAsyncCFI) 1637 emitCalleeSavedGPRLocations(MBB, MBBI); 1638 1639 // Alignment is required for the parent frame, not the funclet 1640 const bool NeedsRealignment = 1641 NumBytes && !IsFunclet && RegInfo->hasStackRealignment(MF); 1642 int64_t RealignmentPadding = 1643 (NeedsRealignment && MFI.getMaxAlign() > Align(16)) 1644 ? MFI.getMaxAlign().value() - 16 1645 : 0; 1646 1647 if (windowsRequiresStackProbe(MF, NumBytes + RealignmentPadding)) { 1648 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4; 1649 if (NeedsWinCFI) { 1650 HasWinCFI = true; 1651 // alloc_l can hold at most 256MB, so assume that NumBytes doesn't 1652 // exceed this amount. We need to move at most 2^24 - 1 into x15. 1653 // This is at most two instructions, MOVZ follwed by MOVK. 1654 // TODO: Fix to use multiple stack alloc unwind codes for stacks 1655 // exceeding 256MB in size. 1656 if (NumBytes >= (1 << 28)) 1657 report_fatal_error("Stack size cannot exceed 256MB for stack " 1658 "unwinding purposes"); 1659 1660 uint32_t LowNumWords = NumWords & 0xFFFF; 1661 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVZXi), AArch64::X15) 1662 .addImm(LowNumWords) 1663 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)) 1664 .setMIFlag(MachineInstr::FrameSetup); 1665 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1666 .setMIFlag(MachineInstr::FrameSetup); 1667 if ((NumWords & 0xFFFF0000) != 0) { 1668 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVKXi), AArch64::X15) 1669 .addReg(AArch64::X15) 1670 .addImm((NumWords & 0xFFFF0000) >> 16) // High half 1671 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 16)) 1672 .setMIFlag(MachineInstr::FrameSetup); 1673 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1674 .setMIFlag(MachineInstr::FrameSetup); 1675 } 1676 } else { 1677 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm), AArch64::X15) 1678 .addImm(NumWords) 1679 .setMIFlags(MachineInstr::FrameSetup); 1680 } 1681 1682 const char* ChkStk = Subtarget.getChkStkName(); 1683 switch (MF.getTarget().getCodeModel()) { 1684 case CodeModel::Tiny: 1685 case CodeModel::Small: 1686 case CodeModel::Medium: 1687 case CodeModel::Kernel: 1688 BuildMI(MBB, MBBI, DL, TII->get(AArch64::BL)) 1689 .addExternalSymbol(ChkStk) 1690 .addReg(AArch64::X15, RegState::Implicit) 1691 .addReg(AArch64::X16, RegState::Implicit | RegState::Define | RegState::Dead) 1692 .addReg(AArch64::X17, RegState::Implicit | RegState::Define | RegState::Dead) 1693 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define | RegState::Dead) 1694 .setMIFlags(MachineInstr::FrameSetup); 1695 if (NeedsWinCFI) { 1696 HasWinCFI = true; 1697 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1698 .setMIFlag(MachineInstr::FrameSetup); 1699 } 1700 break; 1701 case CodeModel::Large: 1702 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVaddrEXT)) 1703 .addReg(AArch64::X16, RegState::Define) 1704 .addExternalSymbol(ChkStk) 1705 .addExternalSymbol(ChkStk) 1706 .setMIFlags(MachineInstr::FrameSetup); 1707 if (NeedsWinCFI) { 1708 HasWinCFI = true; 1709 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1710 .setMIFlag(MachineInstr::FrameSetup); 1711 } 1712 1713 BuildMI(MBB, MBBI, DL, TII->get(getBLRCallOpcode(MF))) 1714 .addReg(AArch64::X16, RegState::Kill) 1715 .addReg(AArch64::X15, RegState::Implicit | RegState::Define) 1716 .addReg(AArch64::X16, RegState::Implicit | RegState::Define | RegState::Dead) 1717 .addReg(AArch64::X17, RegState::Implicit | RegState::Define | RegState::Dead) 1718 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define | RegState::Dead) 1719 .setMIFlags(MachineInstr::FrameSetup); 1720 if (NeedsWinCFI) { 1721 HasWinCFI = true; 1722 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1723 .setMIFlag(MachineInstr::FrameSetup); 1724 } 1725 break; 1726 } 1727 1728 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SUBXrx64), AArch64::SP) 1729 .addReg(AArch64::SP, RegState::Kill) 1730 .addReg(AArch64::X15, RegState::Kill) 1731 .addImm(AArch64_AM::getArithExtendImm(AArch64_AM::UXTX, 4)) 1732 .setMIFlags(MachineInstr::FrameSetup); 1733 if (NeedsWinCFI) { 1734 HasWinCFI = true; 1735 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc)) 1736 .addImm(NumBytes) 1737 .setMIFlag(MachineInstr::FrameSetup); 1738 } 1739 NumBytes = 0; 1740 1741 if (RealignmentPadding > 0) { 1742 if (RealignmentPadding >= 4096) { 1743 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm)) 1744 .addReg(AArch64::X16, RegState::Define) 1745 .addImm(RealignmentPadding) 1746 .setMIFlags(MachineInstr::FrameSetup); 1747 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ADDXrx64), AArch64::X15) 1748 .addReg(AArch64::SP) 1749 .addReg(AArch64::X16, RegState::Kill) 1750 .addImm(AArch64_AM::getArithExtendImm(AArch64_AM::UXTX, 0)) 1751 .setMIFlag(MachineInstr::FrameSetup); 1752 } else { 1753 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ADDXri), AArch64::X15) 1754 .addReg(AArch64::SP) 1755 .addImm(RealignmentPadding) 1756 .addImm(0) 1757 .setMIFlag(MachineInstr::FrameSetup); 1758 } 1759 1760 uint64_t AndMask = ~(MFI.getMaxAlign().value() - 1); 1761 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP) 1762 .addReg(AArch64::X15, RegState::Kill) 1763 .addImm(AArch64_AM::encodeLogicalImmediate(AndMask, 64)); 1764 AFI->setStackRealigned(true); 1765 1766 // No need for SEH instructions here; if we're realigning the stack, 1767 // we've set a frame pointer and already finished the SEH prologue. 1768 assert(!NeedsWinCFI); 1769 } 1770 } 1771 1772 StackOffset AllocateBefore = SVEStackSize, AllocateAfter = {}; 1773 MachineBasicBlock::iterator CalleeSavesBegin = MBBI, CalleeSavesEnd = MBBI; 1774 1775 // Process the SVE callee-saves to determine what space needs to be 1776 // allocated. 1777 if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) { 1778 // Find callee save instructions in frame. 1779 CalleeSavesBegin = MBBI; 1780 assert(IsSVECalleeSave(CalleeSavesBegin) && "Unexpected instruction"); 1781 while (IsSVECalleeSave(MBBI) && MBBI != MBB.getFirstTerminator()) 1782 ++MBBI; 1783 CalleeSavesEnd = MBBI; 1784 1785 AllocateBefore = StackOffset::getScalable(CalleeSavedSize); 1786 AllocateAfter = SVEStackSize - AllocateBefore; 1787 } 1788 1789 // Allocate space for the callee saves (if any). 1790 emitFrameOffset( 1791 MBB, CalleeSavesBegin, DL, AArch64::SP, AArch64::SP, -AllocateBefore, TII, 1792 MachineInstr::FrameSetup, false, false, nullptr, 1793 EmitAsyncCFI && !HasFP && AllocateBefore, 1794 StackOffset::getFixed((int64_t)MFI.getStackSize() - NumBytes)); 1795 1796 if (EmitAsyncCFI) 1797 emitCalleeSavedSVELocations(MBB, CalleeSavesEnd); 1798 1799 // Finally allocate remaining SVE stack space. 1800 emitFrameOffset(MBB, CalleeSavesEnd, DL, AArch64::SP, AArch64::SP, 1801 -AllocateAfter, TII, MachineInstr::FrameSetup, false, false, 1802 nullptr, EmitAsyncCFI && !HasFP && AllocateAfter, 1803 AllocateBefore + StackOffset::getFixed( 1804 (int64_t)MFI.getStackSize() - NumBytes)); 1805 1806 // Allocate space for the rest of the frame. 1807 if (NumBytes) { 1808 unsigned scratchSPReg = AArch64::SP; 1809 1810 if (NeedsRealignment) { 1811 scratchSPReg = findScratchNonCalleeSaveRegister(&MBB); 1812 assert(scratchSPReg != AArch64::NoRegister); 1813 } 1814 1815 // If we're a leaf function, try using the red zone. 1816 if (!canUseRedZone(MF)) { 1817 // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have 1818 // the correct value here, as NumBytes also includes padding bytes, 1819 // which shouldn't be counted here. 1820 emitFrameOffset( 1821 MBB, MBBI, DL, scratchSPReg, AArch64::SP, 1822 StackOffset::getFixed(-NumBytes), TII, MachineInstr::FrameSetup, 1823 false, NeedsWinCFI, &HasWinCFI, EmitAsyncCFI && !HasFP, 1824 SVEStackSize + 1825 StackOffset::getFixed((int64_t)MFI.getStackSize() - NumBytes)); 1826 } 1827 if (NeedsRealignment) { 1828 assert(MFI.getMaxAlign() > Align(1)); 1829 assert(scratchSPReg != AArch64::SP); 1830 1831 // SUB X9, SP, NumBytes 1832 // -- X9 is temporary register, so shouldn't contain any live data here, 1833 // -- free to use. This is already produced by emitFrameOffset above. 1834 // AND SP, X9, 0b11111...0000 1835 uint64_t AndMask = ~(MFI.getMaxAlign().value() - 1); 1836 1837 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP) 1838 .addReg(scratchSPReg, RegState::Kill) 1839 .addImm(AArch64_AM::encodeLogicalImmediate(AndMask, 64)); 1840 AFI->setStackRealigned(true); 1841 1842 // No need for SEH instructions here; if we're realigning the stack, 1843 // we've set a frame pointer and already finished the SEH prologue. 1844 assert(!NeedsWinCFI); 1845 } 1846 } 1847 1848 // If we need a base pointer, set it up here. It's whatever the value of the 1849 // stack pointer is at this point. Any variable size objects will be allocated 1850 // after this, so we can still use the base pointer to reference locals. 1851 // 1852 // FIXME: Clarify FrameSetup flags here. 1853 // Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is 1854 // needed. 1855 // For funclets the BP belongs to the containing function. 1856 if (!IsFunclet && RegInfo->hasBasePointer(MF)) { 1857 TII->copyPhysReg(MBB, MBBI, DL, RegInfo->getBaseRegister(), AArch64::SP, 1858 false); 1859 if (NeedsWinCFI) { 1860 HasWinCFI = true; 1861 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1862 .setMIFlag(MachineInstr::FrameSetup); 1863 } 1864 } 1865 1866 // The very last FrameSetup instruction indicates the end of prologue. Emit a 1867 // SEH opcode indicating the prologue end. 1868 if (NeedsWinCFI && HasWinCFI) { 1869 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd)) 1870 .setMIFlag(MachineInstr::FrameSetup); 1871 } 1872 1873 // SEH funclets are passed the frame pointer in X1. If the parent 1874 // function uses the base register, then the base register is used 1875 // directly, and is not retrieved from X1. 1876 if (IsFunclet && F.hasPersonalityFn()) { 1877 EHPersonality Per = classifyEHPersonality(F.getPersonalityFn()); 1878 if (isAsynchronousEHPersonality(Per)) { 1879 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), AArch64::FP) 1880 .addReg(AArch64::X1) 1881 .setMIFlag(MachineInstr::FrameSetup); 1882 MBB.addLiveIn(AArch64::X1); 1883 } 1884 } 1885 1886 if (EmitCFI && !EmitAsyncCFI) { 1887 if (HasFP) { 1888 emitDefineCFAWithFP(MF, MBB, MBBI, DL, FixedObject); 1889 } else { 1890 StackOffset TotalSize = 1891 SVEStackSize + StackOffset::getFixed((int64_t)MFI.getStackSize()); 1892 unsigned CFIIndex = MF.addFrameInst(createDefCFA( 1893 *RegInfo, /*FrameReg=*/AArch64::SP, /*Reg=*/AArch64::SP, TotalSize, 1894 /*LastAdjustmentWasScalable=*/false)); 1895 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1896 .addCFIIndex(CFIIndex) 1897 .setMIFlags(MachineInstr::FrameSetup); 1898 } 1899 emitCalleeSavedGPRLocations(MBB, MBBI); 1900 emitCalleeSavedSVELocations(MBB, MBBI); 1901 } 1902 } 1903 1904 static void InsertReturnAddressAuth(MachineFunction &MF, MachineBasicBlock &MBB, 1905 bool NeedsWinCFI, bool *HasWinCFI) { 1906 const auto &MFI = *MF.getInfo<AArch64FunctionInfo>(); 1907 if (!MFI.shouldSignReturnAddress(MF)) 1908 return; 1909 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1910 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 1911 bool EmitAsyncCFI = MFI.needsAsyncDwarfUnwindInfo(MF); 1912 1913 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 1914 DebugLoc DL; 1915 if (MBBI != MBB.end()) 1916 DL = MBBI->getDebugLoc(); 1917 1918 // The AUTIASP instruction assembles to a hint instruction before v8.3a so 1919 // this instruction can safely used for any v8a architecture. 1920 // From v8.3a onwards there are optimised authenticate LR and return 1921 // instructions, namely RETA{A,B}, that can be used instead. In this case the 1922 // DW_CFA_AARCH64_negate_ra_state can't be emitted. 1923 if (Subtarget.hasPAuth() && 1924 !MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack) && 1925 MBBI != MBB.end() && MBBI->getOpcode() == AArch64::RET_ReallyLR && 1926 !NeedsWinCFI) { 1927 BuildMI(MBB, MBBI, DL, 1928 TII->get(MFI.shouldSignWithBKey() ? AArch64::RETAB : AArch64::RETAA)) 1929 .copyImplicitOps(*MBBI); 1930 MBB.erase(MBBI); 1931 } else { 1932 BuildMI( 1933 MBB, MBBI, DL, 1934 TII->get(MFI.shouldSignWithBKey() ? AArch64::AUTIBSP : AArch64::AUTIASP)) 1935 .setMIFlag(MachineInstr::FrameDestroy); 1936 1937 if (EmitAsyncCFI) { 1938 unsigned CFIIndex = 1939 MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr)); 1940 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1941 .addCFIIndex(CFIIndex) 1942 .setMIFlags(MachineInstr::FrameDestroy); 1943 } 1944 if (NeedsWinCFI) { 1945 *HasWinCFI = true; 1946 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PACSignLR)) 1947 .setMIFlag(MachineInstr::FrameDestroy); 1948 } 1949 } 1950 } 1951 1952 static bool isFuncletReturnInstr(const MachineInstr &MI) { 1953 switch (MI.getOpcode()) { 1954 default: 1955 return false; 1956 case AArch64::CATCHRET: 1957 case AArch64::CLEANUPRET: 1958 return true; 1959 } 1960 } 1961 1962 void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, 1963 MachineBasicBlock &MBB) const { 1964 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 1965 MachineFrameInfo &MFI = MF.getFrameInfo(); 1966 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1967 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 1968 DebugLoc DL; 1969 bool NeedsWinCFI = needsWinCFI(MF); 1970 bool EmitCFI = 1971 MF.getInfo<AArch64FunctionInfo>()->needsAsyncDwarfUnwindInfo(MF); 1972 bool HasWinCFI = false; 1973 bool IsFunclet = false; 1974 auto WinCFI = make_scope_exit([&]() { assert(HasWinCFI == MF.hasWinCFI()); }); 1975 1976 if (MBB.end() != MBBI) { 1977 DL = MBBI->getDebugLoc(); 1978 IsFunclet = isFuncletReturnInstr(*MBBI); 1979 } 1980 1981 auto FinishingTouches = make_scope_exit([&]() { 1982 InsertReturnAddressAuth(MF, MBB, NeedsWinCFI, &HasWinCFI); 1983 if (needsShadowCallStackPrologueEpilogue(MF)) 1984 emitShadowCallStackEpilogue(*TII, MF, MBB, MBB.getFirstTerminator(), DL); 1985 if (EmitCFI) 1986 emitCalleeSavedGPRRestores(MBB, MBB.getFirstTerminator()); 1987 if (HasWinCFI) 1988 BuildMI(MBB, MBB.getFirstTerminator(), DL, 1989 TII->get(AArch64::SEH_EpilogEnd)) 1990 .setMIFlag(MachineInstr::FrameDestroy); 1991 }); 1992 1993 int64_t NumBytes = IsFunclet ? getWinEHFuncletFrameSize(MF) 1994 : MFI.getStackSize(); 1995 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1996 1997 // All calls are tail calls in GHC calling conv, and functions have no 1998 // prologue/epilogue. 1999 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 2000 return; 2001 2002 // How much of the stack used by incoming arguments this function is expected 2003 // to restore in this particular epilogue. 2004 int64_t ArgumentStackToRestore = getArgumentStackToRestore(MF, MBB); 2005 bool IsWin64 = 2006 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 2007 unsigned FixedObject = getFixedObjectSize(MF, AFI, IsWin64, IsFunclet); 2008 2009 int64_t AfterCSRPopSize = ArgumentStackToRestore; 2010 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; 2011 // We cannot rely on the local stack size set in emitPrologue if the function 2012 // has funclets, as funclets have different local stack size requirements, and 2013 // the current value set in emitPrologue may be that of the containing 2014 // function. 2015 if (MF.hasEHFunclets()) 2016 AFI->setLocalStackSize(NumBytes - PrologueSaveSize); 2017 if (homogeneousPrologEpilog(MF, &MBB)) { 2018 assert(!NeedsWinCFI); 2019 auto LastPopI = MBB.getFirstTerminator(); 2020 if (LastPopI != MBB.begin()) { 2021 auto HomogeneousEpilog = std::prev(LastPopI); 2022 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog) 2023 LastPopI = HomogeneousEpilog; 2024 } 2025 2026 // Adjust local stack 2027 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, 2028 StackOffset::getFixed(AFI->getLocalStackSize()), TII, 2029 MachineInstr::FrameDestroy, false, NeedsWinCFI); 2030 2031 // SP has been already adjusted while restoring callee save regs. 2032 // We've bailed-out the case with adjusting SP for arguments. 2033 assert(AfterCSRPopSize == 0); 2034 return; 2035 } 2036 bool CombineSPBump = shouldCombineCSRLocalStackBumpInEpilogue(MBB, NumBytes); 2037 // Assume we can't combine the last pop with the sp restore. 2038 2039 bool CombineAfterCSRBump = false; 2040 if (!CombineSPBump && PrologueSaveSize != 0) { 2041 MachineBasicBlock::iterator Pop = std::prev(MBB.getFirstTerminator()); 2042 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION || 2043 AArch64InstrInfo::isSEHInstruction(*Pop)) 2044 Pop = std::prev(Pop); 2045 // Converting the last ldp to a post-index ldp is valid only if the last 2046 // ldp's offset is 0. 2047 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1); 2048 // If the offset is 0 and the AfterCSR pop is not actually trying to 2049 // allocate more stack for arguments (in space that an untimely interrupt 2050 // may clobber), convert it to a post-index ldp. 2051 if (OffsetOp.getImm() == 0 && AfterCSRPopSize >= 0) { 2052 convertCalleeSaveRestoreToSPPrePostIncDec( 2053 MBB, Pop, DL, TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, EmitCFI, 2054 MachineInstr::FrameDestroy, PrologueSaveSize); 2055 } else { 2056 // If not, make sure to emit an add after the last ldp. 2057 // We're doing this by transfering the size to be restored from the 2058 // adjustment *before* the CSR pops to the adjustment *after* the CSR 2059 // pops. 2060 AfterCSRPopSize += PrologueSaveSize; 2061 CombineAfterCSRBump = true; 2062 } 2063 } 2064 2065 // Move past the restores of the callee-saved registers. 2066 // If we plan on combining the sp bump of the local stack size and the callee 2067 // save stack size, we might need to adjust the CSR save and restore offsets. 2068 MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator(); 2069 MachineBasicBlock::iterator Begin = MBB.begin(); 2070 while (LastPopI != Begin) { 2071 --LastPopI; 2072 if (!LastPopI->getFlag(MachineInstr::FrameDestroy) || 2073 IsSVECalleeSave(LastPopI)) { 2074 ++LastPopI; 2075 break; 2076 } else if (CombineSPBump) 2077 fixupCalleeSaveRestoreStackOffset(*LastPopI, AFI->getLocalStackSize(), 2078 NeedsWinCFI, &HasWinCFI); 2079 } 2080 2081 if (MF.hasWinCFI()) { 2082 // If the prologue didn't contain any SEH opcodes and didn't set the 2083 // MF.hasWinCFI() flag, assume the epilogue won't either, and skip the 2084 // EpilogStart - to avoid generating CFI for functions that don't need it. 2085 // (And as we didn't generate any prologue at all, it would be asymmetrical 2086 // to the epilogue.) By the end of the function, we assert that 2087 // HasWinCFI is equal to MF.hasWinCFI(), to verify this assumption. 2088 HasWinCFI = true; 2089 BuildMI(MBB, LastPopI, DL, TII->get(AArch64::SEH_EpilogStart)) 2090 .setMIFlag(MachineInstr::FrameDestroy); 2091 } 2092 2093 if (hasFP(MF) && AFI->hasSwiftAsyncContext()) { 2094 switch (MF.getTarget().Options.SwiftAsyncFramePointer) { 2095 case SwiftAsyncFramePointerMode::DeploymentBased: 2096 // Avoid the reload as it is GOT relative, and instead fall back to the 2097 // hardcoded value below. This allows a mismatch between the OS and 2098 // application without immediately terminating on the difference. 2099 [[fallthrough]]; 2100 case SwiftAsyncFramePointerMode::Always: 2101 // We need to reset FP to its untagged state on return. Bit 60 is 2102 // currently used to show the presence of an extended frame. 2103 2104 // BIC x29, x29, #0x1000_0000_0000_0000 2105 BuildMI(MBB, MBB.getFirstTerminator(), DL, TII->get(AArch64::ANDXri), 2106 AArch64::FP) 2107 .addUse(AArch64::FP) 2108 .addImm(0x10fe) 2109 .setMIFlag(MachineInstr::FrameDestroy); 2110 break; 2111 2112 case SwiftAsyncFramePointerMode::Never: 2113 break; 2114 } 2115 } 2116 2117 const StackOffset &SVEStackSize = getSVEStackSize(MF); 2118 2119 // If there is a single SP update, insert it before the ret and we're done. 2120 if (CombineSPBump) { 2121 assert(!SVEStackSize && "Cannot combine SP bump with SVE"); 2122 2123 // When we are about to restore the CSRs, the CFA register is SP again. 2124 if (EmitCFI && hasFP(MF)) { 2125 const AArch64RegisterInfo &RegInfo = *Subtarget.getRegisterInfo(); 2126 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP, true); 2127 unsigned CFIIndex = 2128 MF.addFrameInst(MCCFIInstruction::cfiDefCfa(nullptr, Reg, NumBytes)); 2129 BuildMI(MBB, LastPopI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 2130 .addCFIIndex(CFIIndex) 2131 .setMIFlags(MachineInstr::FrameDestroy); 2132 } 2133 2134 emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP, 2135 StackOffset::getFixed(NumBytes + (int64_t)AfterCSRPopSize), 2136 TII, MachineInstr::FrameDestroy, false, NeedsWinCFI, 2137 &HasWinCFI, EmitCFI, StackOffset::getFixed(NumBytes)); 2138 return; 2139 } 2140 2141 NumBytes -= PrologueSaveSize; 2142 assert(NumBytes >= 0 && "Negative stack allocation size!?"); 2143 2144 // Process the SVE callee-saves to determine what space needs to be 2145 // deallocated. 2146 StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize; 2147 MachineBasicBlock::iterator RestoreBegin = LastPopI, RestoreEnd = LastPopI; 2148 if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) { 2149 RestoreBegin = std::prev(RestoreEnd); 2150 while (RestoreBegin != MBB.begin() && 2151 IsSVECalleeSave(std::prev(RestoreBegin))) 2152 --RestoreBegin; 2153 2154 assert(IsSVECalleeSave(RestoreBegin) && 2155 IsSVECalleeSave(std::prev(RestoreEnd)) && "Unexpected instruction"); 2156 2157 StackOffset CalleeSavedSizeAsOffset = 2158 StackOffset::getScalable(CalleeSavedSize); 2159 DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset; 2160 DeallocateAfter = CalleeSavedSizeAsOffset; 2161 } 2162 2163 // Deallocate the SVE area. 2164 if (SVEStackSize) { 2165 // If we have stack realignment or variable sized objects on the stack, 2166 // restore the stack pointer from the frame pointer prior to SVE CSR 2167 // restoration. 2168 if (AFI->isStackRealigned() || MFI.hasVarSizedObjects()) { 2169 if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) { 2170 // Set SP to start of SVE callee-save area from which they can 2171 // be reloaded. The code below will deallocate the stack space 2172 // space by moving FP -> SP. 2173 emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::FP, 2174 StackOffset::getScalable(-CalleeSavedSize), TII, 2175 MachineInstr::FrameDestroy); 2176 } 2177 } else { 2178 if (AFI->getSVECalleeSavedStackSize()) { 2179 // Deallocate the non-SVE locals first before we can deallocate (and 2180 // restore callee saves) from the SVE area. 2181 emitFrameOffset( 2182 MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, 2183 StackOffset::getFixed(NumBytes), TII, MachineInstr::FrameDestroy, 2184 false, false, nullptr, EmitCFI && !hasFP(MF), 2185 SVEStackSize + StackOffset::getFixed(NumBytes + PrologueSaveSize)); 2186 NumBytes = 0; 2187 } 2188 2189 emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, 2190 DeallocateBefore, TII, MachineInstr::FrameDestroy, false, 2191 false, nullptr, EmitCFI && !hasFP(MF), 2192 SVEStackSize + 2193 StackOffset::getFixed(NumBytes + PrologueSaveSize)); 2194 2195 emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP, 2196 DeallocateAfter, TII, MachineInstr::FrameDestroy, false, 2197 false, nullptr, EmitCFI && !hasFP(MF), 2198 DeallocateAfter + 2199 StackOffset::getFixed(NumBytes + PrologueSaveSize)); 2200 } 2201 if (EmitCFI) 2202 emitCalleeSavedSVERestores(MBB, RestoreEnd); 2203 } 2204 2205 if (!hasFP(MF)) { 2206 bool RedZone = canUseRedZone(MF); 2207 // If this was a redzone leaf function, we don't need to restore the 2208 // stack pointer (but we may need to pop stack args for fastcc). 2209 if (RedZone && AfterCSRPopSize == 0) 2210 return; 2211 2212 // Pop the local variables off the stack. If there are no callee-saved 2213 // registers, it means we are actually positioned at the terminator and can 2214 // combine stack increment for the locals and the stack increment for 2215 // callee-popped arguments into (possibly) a single instruction and be done. 2216 bool NoCalleeSaveRestore = PrologueSaveSize == 0; 2217 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes; 2218 if (NoCalleeSaveRestore) 2219 StackRestoreBytes += AfterCSRPopSize; 2220 2221 emitFrameOffset( 2222 MBB, LastPopI, DL, AArch64::SP, AArch64::SP, 2223 StackOffset::getFixed(StackRestoreBytes), TII, 2224 MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI, EmitCFI, 2225 StackOffset::getFixed((RedZone ? 0 : NumBytes) + PrologueSaveSize)); 2226 2227 // If we were able to combine the local stack pop with the argument pop, 2228 // then we're done. 2229 if (NoCalleeSaveRestore || AfterCSRPopSize == 0) { 2230 return; 2231 } 2232 2233 NumBytes = 0; 2234 } 2235 2236 // Restore the original stack pointer. 2237 // FIXME: Rather than doing the math here, we should instead just use 2238 // non-post-indexed loads for the restores if we aren't actually going to 2239 // be able to save any instructions. 2240 if (!IsFunclet && (MFI.hasVarSizedObjects() || AFI->isStackRealigned())) { 2241 emitFrameOffset( 2242 MBB, LastPopI, DL, AArch64::SP, AArch64::FP, 2243 StackOffset::getFixed(-AFI->getCalleeSaveBaseToFrameRecordOffset()), 2244 TII, MachineInstr::FrameDestroy, false, NeedsWinCFI); 2245 } else if (NumBytes) 2246 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, 2247 StackOffset::getFixed(NumBytes), TII, 2248 MachineInstr::FrameDestroy, false, NeedsWinCFI); 2249 2250 // When we are about to restore the CSRs, the CFA register is SP again. 2251 if (EmitCFI && hasFP(MF)) { 2252 const AArch64RegisterInfo &RegInfo = *Subtarget.getRegisterInfo(); 2253 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP, true); 2254 unsigned CFIIndex = MF.addFrameInst( 2255 MCCFIInstruction::cfiDefCfa(nullptr, Reg, PrologueSaveSize)); 2256 BuildMI(MBB, LastPopI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 2257 .addCFIIndex(CFIIndex) 2258 .setMIFlags(MachineInstr::FrameDestroy); 2259 } 2260 2261 // This must be placed after the callee-save restore code because that code 2262 // assumes the SP is at the same location as it was after the callee-save save 2263 // code in the prologue. 2264 if (AfterCSRPopSize) { 2265 assert(AfterCSRPopSize > 0 && "attempting to reallocate arg stack that an " 2266 "interrupt may have clobbered"); 2267 2268 emitFrameOffset( 2269 MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP, 2270 StackOffset::getFixed(AfterCSRPopSize), TII, MachineInstr::FrameDestroy, 2271 false, NeedsWinCFI, &HasWinCFI, EmitCFI, 2272 StackOffset::getFixed(CombineAfterCSRBump ? PrologueSaveSize : 0)); 2273 } 2274 } 2275 2276 bool AArch64FrameLowering::enableCFIFixup(MachineFunction &MF) const { 2277 return TargetFrameLowering::enableCFIFixup(MF) && 2278 MF.getInfo<AArch64FunctionInfo>()->needsAsyncDwarfUnwindInfo(MF); 2279 } 2280 2281 /// getFrameIndexReference - Provide a base+offset reference to an FI slot for 2282 /// debug info. It's the same as what we use for resolving the code-gen 2283 /// references for now. FIXME: This can go wrong when references are 2284 /// SP-relative and simple call frames aren't used. 2285 StackOffset 2286 AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, 2287 Register &FrameReg) const { 2288 return resolveFrameIndexReference( 2289 MF, FI, FrameReg, 2290 /*PreferFP=*/ 2291 MF.getFunction().hasFnAttribute(Attribute::SanitizeHWAddress), 2292 /*ForSimm=*/false); 2293 } 2294 2295 StackOffset 2296 AArch64FrameLowering::getNonLocalFrameIndexReference(const MachineFunction &MF, 2297 int FI) const { 2298 return StackOffset::getFixed(getSEHFrameIndexOffset(MF, FI)); 2299 } 2300 2301 static StackOffset getFPOffset(const MachineFunction &MF, 2302 int64_t ObjectOffset) { 2303 const auto *AFI = MF.getInfo<AArch64FunctionInfo>(); 2304 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 2305 bool IsWin64 = 2306 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 2307 unsigned FixedObject = 2308 getFixedObjectSize(MF, AFI, IsWin64, /*IsFunclet=*/false); 2309 int64_t CalleeSaveSize = AFI->getCalleeSavedStackSize(MF.getFrameInfo()); 2310 int64_t FPAdjust = 2311 CalleeSaveSize - AFI->getCalleeSaveBaseToFrameRecordOffset(); 2312 return StackOffset::getFixed(ObjectOffset + FixedObject + FPAdjust); 2313 } 2314 2315 static StackOffset getStackOffset(const MachineFunction &MF, 2316 int64_t ObjectOffset) { 2317 const auto &MFI = MF.getFrameInfo(); 2318 return StackOffset::getFixed(ObjectOffset + (int64_t)MFI.getStackSize()); 2319 } 2320 2321 // TODO: This function currently does not work for scalable vectors. 2322 int AArch64FrameLowering::getSEHFrameIndexOffset(const MachineFunction &MF, 2323 int FI) const { 2324 const auto *RegInfo = static_cast<const AArch64RegisterInfo *>( 2325 MF.getSubtarget().getRegisterInfo()); 2326 int ObjectOffset = MF.getFrameInfo().getObjectOffset(FI); 2327 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP 2328 ? getFPOffset(MF, ObjectOffset).getFixed() 2329 : getStackOffset(MF, ObjectOffset).getFixed(); 2330 } 2331 2332 StackOffset AArch64FrameLowering::resolveFrameIndexReference( 2333 const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, 2334 bool ForSimm) const { 2335 const auto &MFI = MF.getFrameInfo(); 2336 int64_t ObjectOffset = MFI.getObjectOffset(FI); 2337 bool isFixed = MFI.isFixedObjectIndex(FI); 2338 bool isSVE = MFI.getStackID(FI) == TargetStackID::ScalableVector; 2339 return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, isSVE, FrameReg, 2340 PreferFP, ForSimm); 2341 } 2342 2343 StackOffset AArch64FrameLowering::resolveFrameOffsetReference( 2344 const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE, 2345 Register &FrameReg, bool PreferFP, bool ForSimm) const { 2346 const auto &MFI = MF.getFrameInfo(); 2347 const auto *RegInfo = static_cast<const AArch64RegisterInfo *>( 2348 MF.getSubtarget().getRegisterInfo()); 2349 const auto *AFI = MF.getInfo<AArch64FunctionInfo>(); 2350 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 2351 2352 int64_t FPOffset = getFPOffset(MF, ObjectOffset).getFixed(); 2353 int64_t Offset = getStackOffset(MF, ObjectOffset).getFixed(); 2354 bool isCSR = 2355 !isFixed && ObjectOffset >= -((int)AFI->getCalleeSavedStackSize(MFI)); 2356 2357 const StackOffset &SVEStackSize = getSVEStackSize(MF); 2358 2359 // Use frame pointer to reference fixed objects. Use it for locals if 2360 // there are VLAs or a dynamically realigned SP (and thus the SP isn't 2361 // reliable as a base). Make sure useFPForScavengingIndex() does the 2362 // right thing for the emergency spill slot. 2363 bool UseFP = false; 2364 if (AFI->hasStackFrame() && !isSVE) { 2365 // We shouldn't prefer using the FP to access fixed-sized stack objects when 2366 // there are scalable (SVE) objects in between the FP and the fixed-sized 2367 // objects. 2368 PreferFP &= !SVEStackSize; 2369 2370 // Note: Keeping the following as multiple 'if' statements rather than 2371 // merging to a single expression for readability. 2372 // 2373 // Argument access should always use the FP. 2374 if (isFixed) { 2375 UseFP = hasFP(MF); 2376 } else if (isCSR && RegInfo->hasStackRealignment(MF)) { 2377 // References to the CSR area must use FP if we're re-aligning the stack 2378 // since the dynamically-sized alignment padding is between the SP/BP and 2379 // the CSR area. 2380 assert(hasFP(MF) && "Re-aligned stack must have frame pointer"); 2381 UseFP = true; 2382 } else if (hasFP(MF) && !RegInfo->hasStackRealignment(MF)) { 2383 // If the FPOffset is negative and we're producing a signed immediate, we 2384 // have to keep in mind that the available offset range for negative 2385 // offsets is smaller than for positive ones. If an offset is available 2386 // via the FP and the SP, use whichever is closest. 2387 bool FPOffsetFits = !ForSimm || FPOffset >= -256; 2388 PreferFP |= Offset > -FPOffset && !SVEStackSize; 2389 2390 if (MFI.hasVarSizedObjects()) { 2391 // If we have variable sized objects, we can use either FP or BP, as the 2392 // SP offset is unknown. We can use the base pointer if we have one and 2393 // FP is not preferred. If not, we're stuck with using FP. 2394 bool CanUseBP = RegInfo->hasBasePointer(MF); 2395 if (FPOffsetFits && CanUseBP) // Both are ok. Pick the best. 2396 UseFP = PreferFP; 2397 else if (!CanUseBP) // Can't use BP. Forced to use FP. 2398 UseFP = true; 2399 // else we can use BP and FP, but the offset from FP won't fit. 2400 // That will make us scavenge registers which we can probably avoid by 2401 // using BP. If it won't fit for BP either, we'll scavenge anyway. 2402 } else if (FPOffset >= 0) { 2403 // Use SP or FP, whichever gives us the best chance of the offset 2404 // being in range for direct access. If the FPOffset is positive, 2405 // that'll always be best, as the SP will be even further away. 2406 UseFP = true; 2407 } else if (MF.hasEHFunclets() && !RegInfo->hasBasePointer(MF)) { 2408 // Funclets access the locals contained in the parent's stack frame 2409 // via the frame pointer, so we have to use the FP in the parent 2410 // function. 2411 (void) Subtarget; 2412 assert( 2413 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()) && 2414 "Funclets should only be present on Win64"); 2415 UseFP = true; 2416 } else { 2417 // We have the choice between FP and (SP or BP). 2418 if (FPOffsetFits && PreferFP) // If FP is the best fit, use it. 2419 UseFP = true; 2420 } 2421 } 2422 } 2423 2424 assert( 2425 ((isFixed || isCSR) || !RegInfo->hasStackRealignment(MF) || !UseFP) && 2426 "In the presence of dynamic stack pointer realignment, " 2427 "non-argument/CSR objects cannot be accessed through the frame pointer"); 2428 2429 if (isSVE) { 2430 StackOffset FPOffset = 2431 StackOffset::get(-AFI->getCalleeSaveBaseToFrameRecordOffset(), ObjectOffset); 2432 StackOffset SPOffset = 2433 SVEStackSize + 2434 StackOffset::get(MFI.getStackSize() - AFI->getCalleeSavedStackSize(), 2435 ObjectOffset); 2436 // Always use the FP for SVE spills if available and beneficial. 2437 if (hasFP(MF) && (SPOffset.getFixed() || 2438 FPOffset.getScalable() < SPOffset.getScalable() || 2439 RegInfo->hasStackRealignment(MF))) { 2440 FrameReg = RegInfo->getFrameRegister(MF); 2441 return FPOffset; 2442 } 2443 2444 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister() 2445 : (unsigned)AArch64::SP; 2446 return SPOffset; 2447 } 2448 2449 StackOffset ScalableOffset = {}; 2450 if (UseFP && !(isFixed || isCSR)) 2451 ScalableOffset = -SVEStackSize; 2452 if (!UseFP && (isFixed || isCSR)) 2453 ScalableOffset = SVEStackSize; 2454 2455 if (UseFP) { 2456 FrameReg = RegInfo->getFrameRegister(MF); 2457 return StackOffset::getFixed(FPOffset) + ScalableOffset; 2458 } 2459 2460 // Use the base pointer if we have one. 2461 if (RegInfo->hasBasePointer(MF)) 2462 FrameReg = RegInfo->getBaseRegister(); 2463 else { 2464 assert(!MFI.hasVarSizedObjects() && 2465 "Can't use SP when we have var sized objects."); 2466 FrameReg = AArch64::SP; 2467 // If we're using the red zone for this function, the SP won't actually 2468 // be adjusted, so the offsets will be negative. They're also all 2469 // within range of the signed 9-bit immediate instructions. 2470 if (canUseRedZone(MF)) 2471 Offset -= AFI->getLocalStackSize(); 2472 } 2473 2474 return StackOffset::getFixed(Offset) + ScalableOffset; 2475 } 2476 2477 static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) { 2478 // Do not set a kill flag on values that are also marked as live-in. This 2479 // happens with the @llvm-returnaddress intrinsic and with arguments passed in 2480 // callee saved registers. 2481 // Omitting the kill flags is conservatively correct even if the live-in 2482 // is not used after all. 2483 bool IsLiveIn = MF.getRegInfo().isLiveIn(Reg); 2484 return getKillRegState(!IsLiveIn); 2485 } 2486 2487 static bool produceCompactUnwindFrame(MachineFunction &MF) { 2488 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 2489 AttributeList Attrs = MF.getFunction().getAttributes(); 2490 return Subtarget.isTargetMachO() && 2491 !(Subtarget.getTargetLowering()->supportSwiftError() && 2492 Attrs.hasAttrSomewhere(Attribute::SwiftError)) && 2493 MF.getFunction().getCallingConv() != CallingConv::SwiftTail; 2494 } 2495 2496 static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2, 2497 bool NeedsWinCFI, bool IsFirst, 2498 const TargetRegisterInfo *TRI) { 2499 // If we are generating register pairs for a Windows function that requires 2500 // EH support, then pair consecutive registers only. There are no unwind 2501 // opcodes for saves/restores of non-consectuve register pairs. 2502 // The unwind opcodes are save_regp, save_regp_x, save_fregp, save_frepg_x, 2503 // save_lrpair. 2504 // https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling 2505 2506 if (Reg2 == AArch64::FP) 2507 return true; 2508 if (!NeedsWinCFI) 2509 return false; 2510 if (TRI->getEncodingValue(Reg2) == TRI->getEncodingValue(Reg1) + 1) 2511 return false; 2512 // If pairing a GPR with LR, the pair can be described by the save_lrpair 2513 // opcode. If this is the first register pair, it would end up with a 2514 // predecrement, but there's no save_lrpair_x opcode, so we can only do this 2515 // if LR is paired with something else than the first register. 2516 // The save_lrpair opcode requires the first register to be an odd one. 2517 if (Reg1 >= AArch64::X19 && Reg1 <= AArch64::X27 && 2518 (Reg1 - AArch64::X19) % 2 == 0 && Reg2 == AArch64::LR && !IsFirst) 2519 return false; 2520 return true; 2521 } 2522 2523 /// Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction. 2524 /// WindowsCFI requires that only consecutive registers can be paired. 2525 /// LR and FP need to be allocated together when the frame needs to save 2526 /// the frame-record. This means any other register pairing with LR is invalid. 2527 static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2, 2528 bool UsesWinAAPCS, bool NeedsWinCFI, 2529 bool NeedsFrameRecord, bool IsFirst, 2530 const TargetRegisterInfo *TRI) { 2531 if (UsesWinAAPCS) 2532 return invalidateWindowsRegisterPairing(Reg1, Reg2, NeedsWinCFI, IsFirst, 2533 TRI); 2534 2535 // If we need to store the frame record, don't pair any register 2536 // with LR other than FP. 2537 if (NeedsFrameRecord) 2538 return Reg2 == AArch64::LR; 2539 2540 return false; 2541 } 2542 2543 namespace { 2544 2545 struct RegPairInfo { 2546 unsigned Reg1 = AArch64::NoRegister; 2547 unsigned Reg2 = AArch64::NoRegister; 2548 int FrameIdx; 2549 int Offset; 2550 enum RegType { GPR, FPR64, FPR128, PPR, ZPR } Type; 2551 2552 RegPairInfo() = default; 2553 2554 bool isPaired() const { return Reg2 != AArch64::NoRegister; } 2555 2556 unsigned getScale() const { 2557 switch (Type) { 2558 case PPR: 2559 return 2; 2560 case GPR: 2561 case FPR64: 2562 return 8; 2563 case ZPR: 2564 case FPR128: 2565 return 16; 2566 } 2567 llvm_unreachable("Unsupported type"); 2568 } 2569 2570 bool isScalable() const { return Type == PPR || Type == ZPR; } 2571 }; 2572 2573 } // end anonymous namespace 2574 2575 static void computeCalleeSaveRegisterPairs( 2576 MachineFunction &MF, ArrayRef<CalleeSavedInfo> CSI, 2577 const TargetRegisterInfo *TRI, SmallVectorImpl<RegPairInfo> &RegPairs, 2578 bool NeedsFrameRecord) { 2579 2580 if (CSI.empty()) 2581 return; 2582 2583 bool IsWindows = isTargetWindows(MF); 2584 bool NeedsWinCFI = needsWinCFI(MF); 2585 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 2586 MachineFrameInfo &MFI = MF.getFrameInfo(); 2587 CallingConv::ID CC = MF.getFunction().getCallingConv(); 2588 unsigned Count = CSI.size(); 2589 (void)CC; 2590 // MachO's compact unwind format relies on all registers being stored in 2591 // pairs. 2592 assert((!produceCompactUnwindFrame(MF) || CC == CallingConv::PreserveMost || 2593 CC == CallingConv::PreserveAll || CC == CallingConv::CXX_FAST_TLS || 2594 CC == CallingConv::Win64 || (Count & 1) == 0) && 2595 "Odd number of callee-saved regs to spill!"); 2596 int ByteOffset = AFI->getCalleeSavedStackSize(); 2597 int StackFillDir = -1; 2598 int RegInc = 1; 2599 unsigned FirstReg = 0; 2600 if (NeedsWinCFI) { 2601 // For WinCFI, fill the stack from the bottom up. 2602 ByteOffset = 0; 2603 StackFillDir = 1; 2604 // As the CSI array is reversed to match PrologEpilogInserter, iterate 2605 // backwards, to pair up registers starting from lower numbered registers. 2606 RegInc = -1; 2607 FirstReg = Count - 1; 2608 } 2609 int ScalableByteOffset = AFI->getSVECalleeSavedStackSize(); 2610 bool NeedGapToAlignStack = AFI->hasCalleeSaveStackFreeSpace(); 2611 2612 // When iterating backwards, the loop condition relies on unsigned wraparound. 2613 for (unsigned i = FirstReg; i < Count; i += RegInc) { 2614 RegPairInfo RPI; 2615 RPI.Reg1 = CSI[i].getReg(); 2616 2617 if (AArch64::GPR64RegClass.contains(RPI.Reg1)) 2618 RPI.Type = RegPairInfo::GPR; 2619 else if (AArch64::FPR64RegClass.contains(RPI.Reg1)) 2620 RPI.Type = RegPairInfo::FPR64; 2621 else if (AArch64::FPR128RegClass.contains(RPI.Reg1)) 2622 RPI.Type = RegPairInfo::FPR128; 2623 else if (AArch64::ZPRRegClass.contains(RPI.Reg1)) 2624 RPI.Type = RegPairInfo::ZPR; 2625 else if (AArch64::PPRRegClass.contains(RPI.Reg1)) 2626 RPI.Type = RegPairInfo::PPR; 2627 else 2628 llvm_unreachable("Unsupported register class."); 2629 2630 // Add the next reg to the pair if it is in the same register class. 2631 if (unsigned(i + RegInc) < Count) { 2632 Register NextReg = CSI[i + RegInc].getReg(); 2633 bool IsFirst = i == FirstReg; 2634 switch (RPI.Type) { 2635 case RegPairInfo::GPR: 2636 if (AArch64::GPR64RegClass.contains(NextReg) && 2637 !invalidateRegisterPairing(RPI.Reg1, NextReg, IsWindows, 2638 NeedsWinCFI, NeedsFrameRecord, IsFirst, 2639 TRI)) 2640 RPI.Reg2 = NextReg; 2641 break; 2642 case RegPairInfo::FPR64: 2643 if (AArch64::FPR64RegClass.contains(NextReg) && 2644 !invalidateWindowsRegisterPairing(RPI.Reg1, NextReg, NeedsWinCFI, 2645 IsFirst, TRI)) 2646 RPI.Reg2 = NextReg; 2647 break; 2648 case RegPairInfo::FPR128: 2649 if (AArch64::FPR128RegClass.contains(NextReg)) 2650 RPI.Reg2 = NextReg; 2651 break; 2652 case RegPairInfo::PPR: 2653 case RegPairInfo::ZPR: 2654 break; 2655 } 2656 } 2657 2658 // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI 2659 // list to come in sorted by frame index so that we can issue the store 2660 // pair instructions directly. Assert if we see anything otherwise. 2661 // 2662 // The order of the registers in the list is controlled by 2663 // getCalleeSavedRegs(), so they will always be in-order, as well. 2664 assert((!RPI.isPaired() || 2665 (CSI[i].getFrameIdx() + RegInc == CSI[i + RegInc].getFrameIdx())) && 2666 "Out of order callee saved regs!"); 2667 2668 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP || 2669 RPI.Reg1 == AArch64::LR) && 2670 "FrameRecord must be allocated together with LR"); 2671 2672 // Windows AAPCS has FP and LR reversed. 2673 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP || 2674 RPI.Reg2 == AArch64::LR) && 2675 "FrameRecord must be allocated together with LR"); 2676 2677 // MachO's compact unwind format relies on all registers being stored in 2678 // adjacent register pairs. 2679 assert((!produceCompactUnwindFrame(MF) || CC == CallingConv::PreserveMost || 2680 CC == CallingConv::PreserveAll || CC == CallingConv::CXX_FAST_TLS || 2681 CC == CallingConv::Win64 || 2682 (RPI.isPaired() && 2683 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) || 2684 RPI.Reg1 + 1 == RPI.Reg2))) && 2685 "Callee-save registers not saved as adjacent register pair!"); 2686 2687 RPI.FrameIdx = CSI[i].getFrameIdx(); 2688 if (NeedsWinCFI && 2689 RPI.isPaired()) // RPI.FrameIdx must be the lower index of the pair 2690 RPI.FrameIdx = CSI[i + RegInc].getFrameIdx(); 2691 2692 int Scale = RPI.getScale(); 2693 2694 int OffsetPre = RPI.isScalable() ? ScalableByteOffset : ByteOffset; 2695 assert(OffsetPre % Scale == 0); 2696 2697 if (RPI.isScalable()) 2698 ScalableByteOffset += StackFillDir * Scale; 2699 else 2700 ByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale); 2701 2702 // Swift's async context is directly before FP, so allocate an extra 2703 // 8 bytes for it. 2704 if (NeedsFrameRecord && AFI->hasSwiftAsyncContext() && 2705 RPI.Reg2 == AArch64::FP) 2706 ByteOffset += StackFillDir * 8; 2707 2708 assert(!(RPI.isScalable() && RPI.isPaired()) && 2709 "Paired spill/fill instructions don't exist for SVE vectors"); 2710 2711 // Round up size of non-pair to pair size if we need to pad the 2712 // callee-save area to ensure 16-byte alignment. 2713 if (NeedGapToAlignStack && !NeedsWinCFI && 2714 !RPI.isScalable() && RPI.Type != RegPairInfo::FPR128 && 2715 !RPI.isPaired() && ByteOffset % 16 != 0) { 2716 ByteOffset += 8 * StackFillDir; 2717 assert(MFI.getObjectAlign(RPI.FrameIdx) <= Align(16)); 2718 // A stack frame with a gap looks like this, bottom up: 2719 // d9, d8. x21, gap, x20, x19. 2720 // Set extra alignment on the x21 object to create the gap above it. 2721 MFI.setObjectAlignment(RPI.FrameIdx, Align(16)); 2722 NeedGapToAlignStack = false; 2723 } 2724 2725 int OffsetPost = RPI.isScalable() ? ScalableByteOffset : ByteOffset; 2726 assert(OffsetPost % Scale == 0); 2727 // If filling top down (default), we want the offset after incrementing it. 2728 // If fillibg bootom up (WinCFI) we need the original offset. 2729 int Offset = NeedsWinCFI ? OffsetPre : OffsetPost; 2730 2731 // The FP, LR pair goes 8 bytes into our expanded 24-byte slot so that the 2732 // Swift context can directly precede FP. 2733 if (NeedsFrameRecord && AFI->hasSwiftAsyncContext() && 2734 RPI.Reg2 == AArch64::FP) 2735 Offset += 8; 2736 RPI.Offset = Offset / Scale; 2737 2738 assert(((!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) || 2739 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) && 2740 "Offset out of bounds for LDP/STP immediate"); 2741 2742 // Save the offset to frame record so that the FP register can point to the 2743 // innermost frame record (spilled FP and LR registers). 2744 if (NeedsFrameRecord && ((!IsWindows && RPI.Reg1 == AArch64::LR && 2745 RPI.Reg2 == AArch64::FP) || 2746 (IsWindows && RPI.Reg1 == AArch64::FP && 2747 RPI.Reg2 == AArch64::LR))) 2748 AFI->setCalleeSaveBaseToFrameRecordOffset(Offset); 2749 2750 RegPairs.push_back(RPI); 2751 if (RPI.isPaired()) 2752 i += RegInc; 2753 } 2754 if (NeedsWinCFI) { 2755 // If we need an alignment gap in the stack, align the topmost stack 2756 // object. A stack frame with a gap looks like this, bottom up: 2757 // x19, d8. d9, gap. 2758 // Set extra alignment on the topmost stack object (the first element in 2759 // CSI, which goes top down), to create the gap above it. 2760 if (AFI->hasCalleeSaveStackFreeSpace()) 2761 MFI.setObjectAlignment(CSI[0].getFrameIdx(), Align(16)); 2762 // We iterated bottom up over the registers; flip RegPairs back to top 2763 // down order. 2764 std::reverse(RegPairs.begin(), RegPairs.end()); 2765 } 2766 } 2767 2768 bool AArch64FrameLowering::spillCalleeSavedRegisters( 2769 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 2770 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 2771 MachineFunction &MF = *MBB.getParent(); 2772 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 2773 bool NeedsWinCFI = needsWinCFI(MF); 2774 DebugLoc DL; 2775 SmallVector<RegPairInfo, 8> RegPairs; 2776 2777 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs, hasFP(MF)); 2778 2779 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2780 if (homogeneousPrologEpilog(MF)) { 2781 auto MIB = BuildMI(MBB, MI, DL, TII.get(AArch64::HOM_Prolog)) 2782 .setMIFlag(MachineInstr::FrameSetup); 2783 2784 for (auto &RPI : RegPairs) { 2785 MIB.addReg(RPI.Reg1); 2786 MIB.addReg(RPI.Reg2); 2787 2788 // Update register live in. 2789 if (!MRI.isReserved(RPI.Reg1)) 2790 MBB.addLiveIn(RPI.Reg1); 2791 if (!MRI.isReserved(RPI.Reg2)) 2792 MBB.addLiveIn(RPI.Reg2); 2793 } 2794 return true; 2795 } 2796 for (const RegPairInfo &RPI : llvm::reverse(RegPairs)) { 2797 unsigned Reg1 = RPI.Reg1; 2798 unsigned Reg2 = RPI.Reg2; 2799 unsigned StrOpc; 2800 2801 // Issue sequence of spills for cs regs. The first spill may be converted 2802 // to a pre-decrement store later by emitPrologue if the callee-save stack 2803 // area allocation can't be combined with the local stack area allocation. 2804 // For example: 2805 // stp x22, x21, [sp, #0] // addImm(+0) 2806 // stp x20, x19, [sp, #16] // addImm(+2) 2807 // stp fp, lr, [sp, #32] // addImm(+4) 2808 // Rationale: This sequence saves uop updates compared to a sequence of 2809 // pre-increment spills like stp xi,xj,[sp,#-16]! 2810 // Note: Similar rationale and sequence for restores in epilog. 2811 unsigned Size; 2812 Align Alignment; 2813 switch (RPI.Type) { 2814 case RegPairInfo::GPR: 2815 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui; 2816 Size = 8; 2817 Alignment = Align(8); 2818 break; 2819 case RegPairInfo::FPR64: 2820 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui; 2821 Size = 8; 2822 Alignment = Align(8); 2823 break; 2824 case RegPairInfo::FPR128: 2825 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui; 2826 Size = 16; 2827 Alignment = Align(16); 2828 break; 2829 case RegPairInfo::ZPR: 2830 StrOpc = AArch64::STR_ZXI; 2831 Size = 16; 2832 Alignment = Align(16); 2833 break; 2834 case RegPairInfo::PPR: 2835 StrOpc = AArch64::STR_PXI; 2836 Size = 2; 2837 Alignment = Align(2); 2838 break; 2839 } 2840 LLVM_DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI); 2841 if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI); 2842 dbgs() << ") -> fi#(" << RPI.FrameIdx; 2843 if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1; 2844 dbgs() << ")\n"); 2845 2846 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) && 2847 "Windows unwdinding requires a consecutive (FP,LR) pair"); 2848 // Windows unwind codes require consecutive registers if registers are 2849 // paired. Make the switch here, so that the code below will save (x,x+1) 2850 // and not (x+1,x). 2851 unsigned FrameIdxReg1 = RPI.FrameIdx; 2852 unsigned FrameIdxReg2 = RPI.FrameIdx + 1; 2853 if (NeedsWinCFI && RPI.isPaired()) { 2854 std::swap(Reg1, Reg2); 2855 std::swap(FrameIdxReg1, FrameIdxReg2); 2856 } 2857 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc)); 2858 if (!MRI.isReserved(Reg1)) 2859 MBB.addLiveIn(Reg1); 2860 if (RPI.isPaired()) { 2861 if (!MRI.isReserved(Reg2)) 2862 MBB.addLiveIn(Reg2); 2863 MIB.addReg(Reg2, getPrologueDeath(MF, Reg2)); 2864 MIB.addMemOperand(MF.getMachineMemOperand( 2865 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2), 2866 MachineMemOperand::MOStore, Size, Alignment)); 2867 } 2868 MIB.addReg(Reg1, getPrologueDeath(MF, Reg1)) 2869 .addReg(AArch64::SP) 2870 .addImm(RPI.Offset) // [sp, #offset*scale], 2871 // where factor*scale is implicit 2872 .setMIFlag(MachineInstr::FrameSetup); 2873 MIB.addMemOperand(MF.getMachineMemOperand( 2874 MachinePointerInfo::getFixedStack(MF, FrameIdxReg1), 2875 MachineMemOperand::MOStore, Size, Alignment)); 2876 if (NeedsWinCFI) 2877 InsertSEH(MIB, TII, MachineInstr::FrameSetup); 2878 2879 // Update the StackIDs of the SVE stack slots. 2880 MachineFrameInfo &MFI = MF.getFrameInfo(); 2881 if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR) 2882 MFI.setStackID(RPI.FrameIdx, TargetStackID::ScalableVector); 2883 2884 } 2885 return true; 2886 } 2887 2888 bool AArch64FrameLowering::restoreCalleeSavedRegisters( 2889 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 2890 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 2891 MachineFunction &MF = *MBB.getParent(); 2892 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 2893 DebugLoc DL; 2894 SmallVector<RegPairInfo, 8> RegPairs; 2895 bool NeedsWinCFI = needsWinCFI(MF); 2896 2897 if (MBBI != MBB.end()) 2898 DL = MBBI->getDebugLoc(); 2899 2900 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs, hasFP(MF)); 2901 2902 auto EmitMI = [&](const RegPairInfo &RPI) -> MachineBasicBlock::iterator { 2903 unsigned Reg1 = RPI.Reg1; 2904 unsigned Reg2 = RPI.Reg2; 2905 2906 // Issue sequence of restores for cs regs. The last restore may be converted 2907 // to a post-increment load later by emitEpilogue if the callee-save stack 2908 // area allocation can't be combined with the local stack area allocation. 2909 // For example: 2910 // ldp fp, lr, [sp, #32] // addImm(+4) 2911 // ldp x20, x19, [sp, #16] // addImm(+2) 2912 // ldp x22, x21, [sp, #0] // addImm(+0) 2913 // Note: see comment in spillCalleeSavedRegisters() 2914 unsigned LdrOpc; 2915 unsigned Size; 2916 Align Alignment; 2917 switch (RPI.Type) { 2918 case RegPairInfo::GPR: 2919 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui; 2920 Size = 8; 2921 Alignment = Align(8); 2922 break; 2923 case RegPairInfo::FPR64: 2924 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui; 2925 Size = 8; 2926 Alignment = Align(8); 2927 break; 2928 case RegPairInfo::FPR128: 2929 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui; 2930 Size = 16; 2931 Alignment = Align(16); 2932 break; 2933 case RegPairInfo::ZPR: 2934 LdrOpc = AArch64::LDR_ZXI; 2935 Size = 16; 2936 Alignment = Align(16); 2937 break; 2938 case RegPairInfo::PPR: 2939 LdrOpc = AArch64::LDR_PXI; 2940 Size = 2; 2941 Alignment = Align(2); 2942 break; 2943 } 2944 LLVM_DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI); 2945 if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI); 2946 dbgs() << ") -> fi#(" << RPI.FrameIdx; 2947 if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1; 2948 dbgs() << ")\n"); 2949 2950 // Windows unwind codes require consecutive registers if registers are 2951 // paired. Make the switch here, so that the code below will save (x,x+1) 2952 // and not (x+1,x). 2953 unsigned FrameIdxReg1 = RPI.FrameIdx; 2954 unsigned FrameIdxReg2 = RPI.FrameIdx + 1; 2955 if (NeedsWinCFI && RPI.isPaired()) { 2956 std::swap(Reg1, Reg2); 2957 std::swap(FrameIdxReg1, FrameIdxReg2); 2958 } 2959 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(LdrOpc)); 2960 if (RPI.isPaired()) { 2961 MIB.addReg(Reg2, getDefRegState(true)); 2962 MIB.addMemOperand(MF.getMachineMemOperand( 2963 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2), 2964 MachineMemOperand::MOLoad, Size, Alignment)); 2965 } 2966 MIB.addReg(Reg1, getDefRegState(true)) 2967 .addReg(AArch64::SP) 2968 .addImm(RPI.Offset) // [sp, #offset*scale] 2969 // where factor*scale is implicit 2970 .setMIFlag(MachineInstr::FrameDestroy); 2971 MIB.addMemOperand(MF.getMachineMemOperand( 2972 MachinePointerInfo::getFixedStack(MF, FrameIdxReg1), 2973 MachineMemOperand::MOLoad, Size, Alignment)); 2974 if (NeedsWinCFI) 2975 InsertSEH(MIB, TII, MachineInstr::FrameDestroy); 2976 2977 return MIB->getIterator(); 2978 }; 2979 2980 // SVE objects are always restored in reverse order. 2981 for (const RegPairInfo &RPI : reverse(RegPairs)) 2982 if (RPI.isScalable()) 2983 EmitMI(RPI); 2984 2985 if (homogeneousPrologEpilog(MF, &MBB)) { 2986 auto MIB = BuildMI(MBB, MBBI, DL, TII.get(AArch64::HOM_Epilog)) 2987 .setMIFlag(MachineInstr::FrameDestroy); 2988 for (auto &RPI : RegPairs) { 2989 MIB.addReg(RPI.Reg1, RegState::Define); 2990 MIB.addReg(RPI.Reg2, RegState::Define); 2991 } 2992 return true; 2993 } 2994 2995 if (ReverseCSRRestoreSeq) { 2996 MachineBasicBlock::iterator First = MBB.end(); 2997 for (const RegPairInfo &RPI : reverse(RegPairs)) { 2998 if (RPI.isScalable()) 2999 continue; 3000 MachineBasicBlock::iterator It = EmitMI(RPI); 3001 if (First == MBB.end()) 3002 First = It; 3003 } 3004 if (First != MBB.end()) 3005 MBB.splice(MBBI, &MBB, First); 3006 } else { 3007 for (const RegPairInfo &RPI : RegPairs) { 3008 if (RPI.isScalable()) 3009 continue; 3010 (void)EmitMI(RPI); 3011 } 3012 } 3013 3014 return true; 3015 } 3016 3017 void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, 3018 BitVector &SavedRegs, 3019 RegScavenger *RS) const { 3020 // All calls are tail calls in GHC calling conv, and functions have no 3021 // prologue/epilogue. 3022 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 3023 return; 3024 3025 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 3026 const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>( 3027 MF.getSubtarget().getRegisterInfo()); 3028 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 3029 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 3030 unsigned UnspilledCSGPR = AArch64::NoRegister; 3031 unsigned UnspilledCSGPRPaired = AArch64::NoRegister; 3032 3033 MachineFrameInfo &MFI = MF.getFrameInfo(); 3034 const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs(); 3035 3036 unsigned BasePointerReg = RegInfo->hasBasePointer(MF) 3037 ? RegInfo->getBaseRegister() 3038 : (unsigned)AArch64::NoRegister; 3039 3040 unsigned ExtraCSSpill = 0; 3041 // Figure out which callee-saved registers to save/restore. 3042 for (unsigned i = 0; CSRegs[i]; ++i) { 3043 const unsigned Reg = CSRegs[i]; 3044 3045 // Add the base pointer register to SavedRegs if it is callee-save. 3046 if (Reg == BasePointerReg) 3047 SavedRegs.set(Reg); 3048 3049 bool RegUsed = SavedRegs.test(Reg); 3050 unsigned PairedReg = AArch64::NoRegister; 3051 if (AArch64::GPR64RegClass.contains(Reg) || 3052 AArch64::FPR64RegClass.contains(Reg) || 3053 AArch64::FPR128RegClass.contains(Reg)) 3054 PairedReg = CSRegs[i ^ 1]; 3055 3056 if (!RegUsed) { 3057 if (AArch64::GPR64RegClass.contains(Reg) && 3058 !RegInfo->isReservedReg(MF, Reg)) { 3059 UnspilledCSGPR = Reg; 3060 UnspilledCSGPRPaired = PairedReg; 3061 } 3062 continue; 3063 } 3064 3065 // MachO's compact unwind format relies on all registers being stored in 3066 // pairs. 3067 // FIXME: the usual format is actually better if unwinding isn't needed. 3068 if (producePairRegisters(MF) && PairedReg != AArch64::NoRegister && 3069 !SavedRegs.test(PairedReg)) { 3070 SavedRegs.set(PairedReg); 3071 if (AArch64::GPR64RegClass.contains(PairedReg) && 3072 !RegInfo->isReservedReg(MF, PairedReg)) 3073 ExtraCSSpill = PairedReg; 3074 } 3075 } 3076 3077 if (MF.getFunction().getCallingConv() == CallingConv::Win64 && 3078 !Subtarget.isTargetWindows()) { 3079 // For Windows calling convention on a non-windows OS, where X18 is treated 3080 // as reserved, back up X18 when entering non-windows code (marked with the 3081 // Windows calling convention) and restore when returning regardless of 3082 // whether the individual function uses it - it might call other functions 3083 // that clobber it. 3084 SavedRegs.set(AArch64::X18); 3085 } 3086 3087 // Calculates the callee saved stack size. 3088 unsigned CSStackSize = 0; 3089 unsigned SVECSStackSize = 0; 3090 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 3091 const MachineRegisterInfo &MRI = MF.getRegInfo(); 3092 for (unsigned Reg : SavedRegs.set_bits()) { 3093 auto RegSize = TRI->getRegSizeInBits(Reg, MRI) / 8; 3094 if (AArch64::PPRRegClass.contains(Reg) || 3095 AArch64::ZPRRegClass.contains(Reg)) 3096 SVECSStackSize += RegSize; 3097 else 3098 CSStackSize += RegSize; 3099 } 3100 3101 // Save number of saved regs, so we can easily update CSStackSize later. 3102 unsigned NumSavedRegs = SavedRegs.count(); 3103 3104 // The frame record needs to be created by saving the appropriate registers 3105 uint64_t EstimatedStackSize = MFI.estimateStackSize(MF); 3106 if (hasFP(MF) || 3107 windowsRequiresStackProbe(MF, EstimatedStackSize + CSStackSize + 16)) { 3108 SavedRegs.set(AArch64::FP); 3109 SavedRegs.set(AArch64::LR); 3110 } 3111 3112 LLVM_DEBUG(dbgs() << "*** determineCalleeSaves\nSaved CSRs:"; 3113 for (unsigned Reg 3114 : SavedRegs.set_bits()) dbgs() 3115 << ' ' << printReg(Reg, RegInfo); 3116 dbgs() << "\n";); 3117 3118 // If any callee-saved registers are used, the frame cannot be eliminated. 3119 int64_t SVEStackSize = 3120 alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16); 3121 bool CanEliminateFrame = (SavedRegs.count() == 0) && !SVEStackSize; 3122 3123 // The CSR spill slots have not been allocated yet, so estimateStackSize 3124 // won't include them. 3125 unsigned EstimatedStackSizeLimit = estimateRSStackSizeLimit(MF); 3126 3127 // We may address some of the stack above the canonical frame address, either 3128 // for our own arguments or during a call. Include that in calculating whether 3129 // we have complicated addressing concerns. 3130 int64_t CalleeStackUsed = 0; 3131 for (int I = MFI.getObjectIndexBegin(); I != 0; ++I) { 3132 int64_t FixedOff = MFI.getObjectOffset(I); 3133 if (FixedOff > CalleeStackUsed) CalleeStackUsed = FixedOff; 3134 } 3135 3136 // Conservatively always assume BigStack when there are SVE spills. 3137 bool BigStack = SVEStackSize || (EstimatedStackSize + CSStackSize + 3138 CalleeStackUsed) > EstimatedStackSizeLimit; 3139 if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) 3140 AFI->setHasStackFrame(true); 3141 3142 // Estimate if we might need to scavenge a register at some point in order 3143 // to materialize a stack offset. If so, either spill one additional 3144 // callee-saved register or reserve a special spill slot to facilitate 3145 // register scavenging. If we already spilled an extra callee-saved register 3146 // above to keep the number of spills even, we don't need to do anything else 3147 // here. 3148 if (BigStack) { 3149 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) { 3150 LLVM_DEBUG(dbgs() << "Spilling " << printReg(UnspilledCSGPR, RegInfo) 3151 << " to get a scratch register.\n"); 3152 SavedRegs.set(UnspilledCSGPR); 3153 // MachO's compact unwind format relies on all registers being stored in 3154 // pairs, so if we need to spill one extra for BigStack, then we need to 3155 // store the pair. 3156 if (producePairRegisters(MF)) 3157 SavedRegs.set(UnspilledCSGPRPaired); 3158 ExtraCSSpill = UnspilledCSGPR; 3159 } 3160 3161 // If we didn't find an extra callee-saved register to spill, create 3162 // an emergency spill slot. 3163 if (!ExtraCSSpill || MF.getRegInfo().isPhysRegUsed(ExtraCSSpill)) { 3164 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 3165 const TargetRegisterClass &RC = AArch64::GPR64RegClass; 3166 unsigned Size = TRI->getSpillSize(RC); 3167 Align Alignment = TRI->getSpillAlign(RC); 3168 int FI = MFI.CreateStackObject(Size, Alignment, false); 3169 RS->addScavengingFrameIndex(FI); 3170 LLVM_DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI 3171 << " as the emergency spill slot.\n"); 3172 } 3173 } 3174 3175 // Adding the size of additional 64bit GPR saves. 3176 CSStackSize += 8 * (SavedRegs.count() - NumSavedRegs); 3177 3178 // A Swift asynchronous context extends the frame record with a pointer 3179 // directly before FP. 3180 if (hasFP(MF) && AFI->hasSwiftAsyncContext()) 3181 CSStackSize += 8; 3182 3183 uint64_t AlignedCSStackSize = alignTo(CSStackSize, 16); 3184 LLVM_DEBUG(dbgs() << "Estimated stack frame size: " 3185 << EstimatedStackSize + AlignedCSStackSize 3186 << " bytes.\n"); 3187 3188 assert((!MFI.isCalleeSavedInfoValid() || 3189 AFI->getCalleeSavedStackSize() == AlignedCSStackSize) && 3190 "Should not invalidate callee saved info"); 3191 3192 // Round up to register pair alignment to avoid additional SP adjustment 3193 // instructions. 3194 AFI->setCalleeSavedStackSize(AlignedCSStackSize); 3195 AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize); 3196 AFI->setSVECalleeSavedStackSize(alignTo(SVECSStackSize, 16)); 3197 } 3198 3199 bool AArch64FrameLowering::assignCalleeSavedSpillSlots( 3200 MachineFunction &MF, const TargetRegisterInfo *RegInfo, 3201 std::vector<CalleeSavedInfo> &CSI, unsigned &MinCSFrameIndex, 3202 unsigned &MaxCSFrameIndex) const { 3203 bool NeedsWinCFI = needsWinCFI(MF); 3204 // To match the canonical windows frame layout, reverse the list of 3205 // callee saved registers to get them laid out by PrologEpilogInserter 3206 // in the right order. (PrologEpilogInserter allocates stack objects top 3207 // down. Windows canonical prologs store higher numbered registers at 3208 // the top, thus have the CSI array start from the highest registers.) 3209 if (NeedsWinCFI) 3210 std::reverse(CSI.begin(), CSI.end()); 3211 3212 if (CSI.empty()) 3213 return true; // Early exit if no callee saved registers are modified! 3214 3215 // Now that we know which registers need to be saved and restored, allocate 3216 // stack slots for them. 3217 MachineFrameInfo &MFI = MF.getFrameInfo(); 3218 auto *AFI = MF.getInfo<AArch64FunctionInfo>(); 3219 3220 bool UsesWinAAPCS = isTargetWindows(MF); 3221 if (UsesWinAAPCS && hasFP(MF) && AFI->hasSwiftAsyncContext()) { 3222 int FrameIdx = MFI.CreateStackObject(8, Align(16), true); 3223 AFI->setSwiftAsyncContextFrameIdx(FrameIdx); 3224 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 3225 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 3226 } 3227 3228 for (auto &CS : CSI) { 3229 Register Reg = CS.getReg(); 3230 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); 3231 3232 unsigned Size = RegInfo->getSpillSize(*RC); 3233 Align Alignment(RegInfo->getSpillAlign(*RC)); 3234 int FrameIdx = MFI.CreateStackObject(Size, Alignment, true); 3235 CS.setFrameIdx(FrameIdx); 3236 3237 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 3238 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 3239 3240 // Grab 8 bytes below FP for the extended asynchronous frame info. 3241 if (hasFP(MF) && AFI->hasSwiftAsyncContext() && !UsesWinAAPCS && 3242 Reg == AArch64::FP) { 3243 FrameIdx = MFI.CreateStackObject(8, Alignment, true); 3244 AFI->setSwiftAsyncContextFrameIdx(FrameIdx); 3245 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 3246 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 3247 } 3248 } 3249 return true; 3250 } 3251 3252 bool AArch64FrameLowering::enableStackSlotScavenging( 3253 const MachineFunction &MF) const { 3254 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 3255 return AFI->hasCalleeSaveStackFreeSpace(); 3256 } 3257 3258 /// returns true if there are any SVE callee saves. 3259 static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, 3260 int &Min, int &Max) { 3261 Min = std::numeric_limits<int>::max(); 3262 Max = std::numeric_limits<int>::min(); 3263 3264 if (!MFI.isCalleeSavedInfoValid()) 3265 return false; 3266 3267 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 3268 for (auto &CS : CSI) { 3269 if (AArch64::ZPRRegClass.contains(CS.getReg()) || 3270 AArch64::PPRRegClass.contains(CS.getReg())) { 3271 assert((Max == std::numeric_limits<int>::min() || 3272 Max + 1 == CS.getFrameIdx()) && 3273 "SVE CalleeSaves are not consecutive"); 3274 3275 Min = std::min(Min, CS.getFrameIdx()); 3276 Max = std::max(Max, CS.getFrameIdx()); 3277 } 3278 } 3279 return Min != std::numeric_limits<int>::max(); 3280 } 3281 3282 // Process all the SVE stack objects and determine offsets for each 3283 // object. If AssignOffsets is true, the offsets get assigned. 3284 // Fills in the first and last callee-saved frame indices into 3285 // Min/MaxCSFrameIndex, respectively. 3286 // Returns the size of the stack. 3287 static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, 3288 int &MinCSFrameIndex, 3289 int &MaxCSFrameIndex, 3290 bool AssignOffsets) { 3291 #ifndef NDEBUG 3292 // First process all fixed stack objects. 3293 for (int I = MFI.getObjectIndexBegin(); I != 0; ++I) 3294 assert(MFI.getStackID(I) != TargetStackID::ScalableVector && 3295 "SVE vectors should never be passed on the stack by value, only by " 3296 "reference."); 3297 #endif 3298 3299 auto Assign = [&MFI](int FI, int64_t Offset) { 3300 LLVM_DEBUG(dbgs() << "alloc FI(" << FI << ") at SP[" << Offset << "]\n"); 3301 MFI.setObjectOffset(FI, Offset); 3302 }; 3303 3304 int64_t Offset = 0; 3305 3306 // Then process all callee saved slots. 3307 if (getSVECalleeSaveSlotRange(MFI, MinCSFrameIndex, MaxCSFrameIndex)) { 3308 // Assign offsets to the callee save slots. 3309 for (int I = MinCSFrameIndex; I <= MaxCSFrameIndex; ++I) { 3310 Offset += MFI.getObjectSize(I); 3311 Offset = alignTo(Offset, MFI.getObjectAlign(I)); 3312 if (AssignOffsets) 3313 Assign(I, -Offset); 3314 } 3315 } 3316 3317 // Ensure that the Callee-save area is aligned to 16bytes. 3318 Offset = alignTo(Offset, Align(16U)); 3319 3320 // Create a buffer of SVE objects to allocate and sort it. 3321 SmallVector<int, 8> ObjectsToAllocate; 3322 // If we have a stack protector, and we've previously decided that we have SVE 3323 // objects on the stack and thus need it to go in the SVE stack area, then it 3324 // needs to go first. 3325 int StackProtectorFI = -1; 3326 if (MFI.hasStackProtectorIndex()) { 3327 StackProtectorFI = MFI.getStackProtectorIndex(); 3328 if (MFI.getStackID(StackProtectorFI) == TargetStackID::ScalableVector) 3329 ObjectsToAllocate.push_back(StackProtectorFI); 3330 } 3331 for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) { 3332 unsigned StackID = MFI.getStackID(I); 3333 if (StackID != TargetStackID::ScalableVector) 3334 continue; 3335 if (I == StackProtectorFI) 3336 continue; 3337 if (MaxCSFrameIndex >= I && I >= MinCSFrameIndex) 3338 continue; 3339 if (MFI.isDeadObjectIndex(I)) 3340 continue; 3341 3342 ObjectsToAllocate.push_back(I); 3343 } 3344 3345 // Allocate all SVE locals and spills 3346 for (unsigned FI : ObjectsToAllocate) { 3347 Align Alignment = MFI.getObjectAlign(FI); 3348 // FIXME: Given that the length of SVE vectors is not necessarily a power of 3349 // two, we'd need to align every object dynamically at runtime if the 3350 // alignment is larger than 16. This is not yet supported. 3351 if (Alignment > Align(16)) 3352 report_fatal_error( 3353 "Alignment of scalable vectors > 16 bytes is not yet supported"); 3354 3355 Offset = alignTo(Offset + MFI.getObjectSize(FI), Alignment); 3356 if (AssignOffsets) 3357 Assign(FI, -Offset); 3358 } 3359 3360 return Offset; 3361 } 3362 3363 int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets( 3364 MachineFrameInfo &MFI) const { 3365 int MinCSFrameIndex, MaxCSFrameIndex; 3366 return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex, false); 3367 } 3368 3369 int64_t AArch64FrameLowering::assignSVEStackObjectOffsets( 3370 MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex) const { 3371 return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex, 3372 true); 3373 } 3374 3375 void AArch64FrameLowering::processFunctionBeforeFrameFinalized( 3376 MachineFunction &MF, RegScavenger *RS) const { 3377 MachineFrameInfo &MFI = MF.getFrameInfo(); 3378 3379 assert(getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown && 3380 "Upwards growing stack unsupported"); 3381 3382 int MinCSFrameIndex, MaxCSFrameIndex; 3383 int64_t SVEStackSize = 3384 assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex); 3385 3386 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 3387 AFI->setStackSizeSVE(alignTo(SVEStackSize, 16U)); 3388 AFI->setMinMaxSVECSFrameIndex(MinCSFrameIndex, MaxCSFrameIndex); 3389 3390 // If this function isn't doing Win64-style C++ EH, we don't need to do 3391 // anything. 3392 if (!MF.hasEHFunclets()) 3393 return; 3394 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 3395 WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo(); 3396 3397 MachineBasicBlock &MBB = MF.front(); 3398 auto MBBI = MBB.begin(); 3399 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) 3400 ++MBBI; 3401 3402 // Create an UnwindHelp object. 3403 // The UnwindHelp object is allocated at the start of the fixed object area 3404 int64_t FixedObject = 3405 getFixedObjectSize(MF, AFI, /*IsWin64*/ true, /*IsFunclet*/ false); 3406 int UnwindHelpFI = MFI.CreateFixedObject(/*Size*/ 8, 3407 /*SPOffset*/ -FixedObject, 3408 /*IsImmutable=*/false); 3409 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI; 3410 3411 // We need to store -2 into the UnwindHelp object at the start of the 3412 // function. 3413 DebugLoc DL; 3414 RS->enterBasicBlockEnd(MBB); 3415 RS->backward(std::prev(MBBI)); 3416 Register DstReg = RS->FindUnusedReg(&AArch64::GPR64commonRegClass); 3417 assert(DstReg && "There must be a free register after frame setup"); 3418 BuildMI(MBB, MBBI, DL, TII.get(AArch64::MOVi64imm), DstReg).addImm(-2); 3419 BuildMI(MBB, MBBI, DL, TII.get(AArch64::STURXi)) 3420 .addReg(DstReg, getKillRegState(true)) 3421 .addFrameIndex(UnwindHelpFI) 3422 .addImm(0); 3423 } 3424 3425 namespace { 3426 struct TagStoreInstr { 3427 MachineInstr *MI; 3428 int64_t Offset, Size; 3429 explicit TagStoreInstr(MachineInstr *MI, int64_t Offset, int64_t Size) 3430 : MI(MI), Offset(Offset), Size(Size) {} 3431 }; 3432 3433 class TagStoreEdit { 3434 MachineFunction *MF; 3435 MachineBasicBlock *MBB; 3436 MachineRegisterInfo *MRI; 3437 // Tag store instructions that are being replaced. 3438 SmallVector<TagStoreInstr, 8> TagStores; 3439 // Combined memref arguments of the above instructions. 3440 SmallVector<MachineMemOperand *, 8> CombinedMemRefs; 3441 3442 // Replace allocation tags in [FrameReg + FrameRegOffset, FrameReg + 3443 // FrameRegOffset + Size) with the address tag of SP. 3444 Register FrameReg; 3445 StackOffset FrameRegOffset; 3446 int64_t Size; 3447 // If not std::nullopt, move FrameReg to (FrameReg + FrameRegUpdate) at the 3448 // end. 3449 std::optional<int64_t> FrameRegUpdate; 3450 // MIFlags for any FrameReg updating instructions. 3451 unsigned FrameRegUpdateFlags; 3452 3453 // Use zeroing instruction variants. 3454 bool ZeroData; 3455 DebugLoc DL; 3456 3457 void emitUnrolled(MachineBasicBlock::iterator InsertI); 3458 void emitLoop(MachineBasicBlock::iterator InsertI); 3459 3460 public: 3461 TagStoreEdit(MachineBasicBlock *MBB, bool ZeroData) 3462 : MBB(MBB), ZeroData(ZeroData) { 3463 MF = MBB->getParent(); 3464 MRI = &MF->getRegInfo(); 3465 } 3466 // Add an instruction to be replaced. Instructions must be added in the 3467 // ascending order of Offset, and have to be adjacent. 3468 void addInstruction(TagStoreInstr I) { 3469 assert((TagStores.empty() || 3470 TagStores.back().Offset + TagStores.back().Size == I.Offset) && 3471 "Non-adjacent tag store instructions."); 3472 TagStores.push_back(I); 3473 } 3474 void clear() { TagStores.clear(); } 3475 // Emit equivalent code at the given location, and erase the current set of 3476 // instructions. May skip if the replacement is not profitable. May invalidate 3477 // the input iterator and replace it with a valid one. 3478 void emitCode(MachineBasicBlock::iterator &InsertI, 3479 const AArch64FrameLowering *TFI, bool TryMergeSPUpdate); 3480 }; 3481 3482 void TagStoreEdit::emitUnrolled(MachineBasicBlock::iterator InsertI) { 3483 const AArch64InstrInfo *TII = 3484 MF->getSubtarget<AArch64Subtarget>().getInstrInfo(); 3485 3486 const int64_t kMinOffset = -256 * 16; 3487 const int64_t kMaxOffset = 255 * 16; 3488 3489 Register BaseReg = FrameReg; 3490 int64_t BaseRegOffsetBytes = FrameRegOffset.getFixed(); 3491 if (BaseRegOffsetBytes < kMinOffset || 3492 BaseRegOffsetBytes + (Size - Size % 32) > kMaxOffset || 3493 // BaseReg can be FP, which is not necessarily aligned to 16-bytes. In 3494 // that case, BaseRegOffsetBytes will not be aligned to 16 bytes, which 3495 // is required for the offset of ST2G. 3496 BaseRegOffsetBytes % 16 != 0) { 3497 Register ScratchReg = MRI->createVirtualRegister(&AArch64::GPR64RegClass); 3498 emitFrameOffset(*MBB, InsertI, DL, ScratchReg, BaseReg, 3499 StackOffset::getFixed(BaseRegOffsetBytes), TII); 3500 BaseReg = ScratchReg; 3501 BaseRegOffsetBytes = 0; 3502 } 3503 3504 MachineInstr *LastI = nullptr; 3505 while (Size) { 3506 int64_t InstrSize = (Size > 16) ? 32 : 16; 3507 unsigned Opcode = 3508 InstrSize == 16 3509 ? (ZeroData ? AArch64::STZGi : AArch64::STGi) 3510 : (ZeroData ? AArch64::STZ2Gi : AArch64::ST2Gi); 3511 assert(BaseRegOffsetBytes % 16 == 0); 3512 MachineInstr *I = BuildMI(*MBB, InsertI, DL, TII->get(Opcode)) 3513 .addReg(AArch64::SP) 3514 .addReg(BaseReg) 3515 .addImm(BaseRegOffsetBytes / 16) 3516 .setMemRefs(CombinedMemRefs); 3517 // A store to [BaseReg, #0] should go last for an opportunity to fold the 3518 // final SP adjustment in the epilogue. 3519 if (BaseRegOffsetBytes == 0) 3520 LastI = I; 3521 BaseRegOffsetBytes += InstrSize; 3522 Size -= InstrSize; 3523 } 3524 3525 if (LastI) 3526 MBB->splice(InsertI, MBB, LastI); 3527 } 3528 3529 void TagStoreEdit::emitLoop(MachineBasicBlock::iterator InsertI) { 3530 const AArch64InstrInfo *TII = 3531 MF->getSubtarget<AArch64Subtarget>().getInstrInfo(); 3532 3533 Register BaseReg = FrameRegUpdate 3534 ? FrameReg 3535 : MRI->createVirtualRegister(&AArch64::GPR64RegClass); 3536 Register SizeReg = MRI->createVirtualRegister(&AArch64::GPR64RegClass); 3537 3538 emitFrameOffset(*MBB, InsertI, DL, BaseReg, FrameReg, FrameRegOffset, TII); 3539 3540 int64_t LoopSize = Size; 3541 // If the loop size is not a multiple of 32, split off one 16-byte store at 3542 // the end to fold BaseReg update into. 3543 if (FrameRegUpdate && *FrameRegUpdate) 3544 LoopSize -= LoopSize % 32; 3545 MachineInstr *LoopI = BuildMI(*MBB, InsertI, DL, 3546 TII->get(ZeroData ? AArch64::STZGloop_wback 3547 : AArch64::STGloop_wback)) 3548 .addDef(SizeReg) 3549 .addDef(BaseReg) 3550 .addImm(LoopSize) 3551 .addReg(BaseReg) 3552 .setMemRefs(CombinedMemRefs); 3553 if (FrameRegUpdate) 3554 LoopI->setFlags(FrameRegUpdateFlags); 3555 3556 int64_t ExtraBaseRegUpdate = 3557 FrameRegUpdate ? (*FrameRegUpdate - FrameRegOffset.getFixed() - Size) : 0; 3558 if (LoopSize < Size) { 3559 assert(FrameRegUpdate); 3560 assert(Size - LoopSize == 16); 3561 // Tag 16 more bytes at BaseReg and update BaseReg. 3562 BuildMI(*MBB, InsertI, DL, 3563 TII->get(ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex)) 3564 .addDef(BaseReg) 3565 .addReg(BaseReg) 3566 .addReg(BaseReg) 3567 .addImm(1 + ExtraBaseRegUpdate / 16) 3568 .setMemRefs(CombinedMemRefs) 3569 .setMIFlags(FrameRegUpdateFlags); 3570 } else if (ExtraBaseRegUpdate) { 3571 // Update BaseReg. 3572 BuildMI( 3573 *MBB, InsertI, DL, 3574 TII->get(ExtraBaseRegUpdate > 0 ? AArch64::ADDXri : AArch64::SUBXri)) 3575 .addDef(BaseReg) 3576 .addReg(BaseReg) 3577 .addImm(std::abs(ExtraBaseRegUpdate)) 3578 .addImm(0) 3579 .setMIFlags(FrameRegUpdateFlags); 3580 } 3581 } 3582 3583 // Check if *II is a register update that can be merged into STGloop that ends 3584 // at (Reg + Size). RemainingOffset is the required adjustment to Reg after the 3585 // end of the loop. 3586 bool canMergeRegUpdate(MachineBasicBlock::iterator II, unsigned Reg, 3587 int64_t Size, int64_t *TotalOffset) { 3588 MachineInstr &MI = *II; 3589 if ((MI.getOpcode() == AArch64::ADDXri || 3590 MI.getOpcode() == AArch64::SUBXri) && 3591 MI.getOperand(0).getReg() == Reg && MI.getOperand(1).getReg() == Reg) { 3592 unsigned Shift = AArch64_AM::getShiftValue(MI.getOperand(3).getImm()); 3593 int64_t Offset = MI.getOperand(2).getImm() << Shift; 3594 if (MI.getOpcode() == AArch64::SUBXri) 3595 Offset = -Offset; 3596 int64_t AbsPostOffset = std::abs(Offset - Size); 3597 const int64_t kMaxOffset = 3598 0xFFF; // Max encoding for unshifted ADDXri / SUBXri 3599 if (AbsPostOffset <= kMaxOffset && AbsPostOffset % 16 == 0) { 3600 *TotalOffset = Offset; 3601 return true; 3602 } 3603 } 3604 return false; 3605 } 3606 3607 void mergeMemRefs(const SmallVectorImpl<TagStoreInstr> &TSE, 3608 SmallVectorImpl<MachineMemOperand *> &MemRefs) { 3609 MemRefs.clear(); 3610 for (auto &TS : TSE) { 3611 MachineInstr *MI = TS.MI; 3612 // An instruction without memory operands may access anything. Be 3613 // conservative and return an empty list. 3614 if (MI->memoperands_empty()) { 3615 MemRefs.clear(); 3616 return; 3617 } 3618 MemRefs.append(MI->memoperands_begin(), MI->memoperands_end()); 3619 } 3620 } 3621 3622 void TagStoreEdit::emitCode(MachineBasicBlock::iterator &InsertI, 3623 const AArch64FrameLowering *TFI, 3624 bool TryMergeSPUpdate) { 3625 if (TagStores.empty()) 3626 return; 3627 TagStoreInstr &FirstTagStore = TagStores[0]; 3628 TagStoreInstr &LastTagStore = TagStores[TagStores.size() - 1]; 3629 Size = LastTagStore.Offset - FirstTagStore.Offset + LastTagStore.Size; 3630 DL = TagStores[0].MI->getDebugLoc(); 3631 3632 Register Reg; 3633 FrameRegOffset = TFI->resolveFrameOffsetReference( 3634 *MF, FirstTagStore.Offset, false /*isFixed*/, false /*isSVE*/, Reg, 3635 /*PreferFP=*/false, /*ForSimm=*/true); 3636 FrameReg = Reg; 3637 FrameRegUpdate = std::nullopt; 3638 3639 mergeMemRefs(TagStores, CombinedMemRefs); 3640 3641 LLVM_DEBUG(dbgs() << "Replacing adjacent STG instructions:\n"; 3642 for (const auto &Instr 3643 : TagStores) { dbgs() << " " << *Instr.MI; }); 3644 3645 // Size threshold where a loop becomes shorter than a linear sequence of 3646 // tagging instructions. 3647 const int kSetTagLoopThreshold = 176; 3648 if (Size < kSetTagLoopThreshold) { 3649 if (TagStores.size() < 2) 3650 return; 3651 emitUnrolled(InsertI); 3652 } else { 3653 MachineInstr *UpdateInstr = nullptr; 3654 int64_t TotalOffset = 0; 3655 if (TryMergeSPUpdate) { 3656 // See if we can merge base register update into the STGloop. 3657 // This is done in AArch64LoadStoreOptimizer for "normal" stores, 3658 // but STGloop is way too unusual for that, and also it only 3659 // realistically happens in function epilogue. Also, STGloop is expanded 3660 // before that pass. 3661 if (InsertI != MBB->end() && 3662 canMergeRegUpdate(InsertI, FrameReg, FrameRegOffset.getFixed() + Size, 3663 &TotalOffset)) { 3664 UpdateInstr = &*InsertI++; 3665 LLVM_DEBUG(dbgs() << "Folding SP update into loop:\n " 3666 << *UpdateInstr); 3667 } 3668 } 3669 3670 if (!UpdateInstr && TagStores.size() < 2) 3671 return; 3672 3673 if (UpdateInstr) { 3674 FrameRegUpdate = TotalOffset; 3675 FrameRegUpdateFlags = UpdateInstr->getFlags(); 3676 } 3677 emitLoop(InsertI); 3678 if (UpdateInstr) 3679 UpdateInstr->eraseFromParent(); 3680 } 3681 3682 for (auto &TS : TagStores) 3683 TS.MI->eraseFromParent(); 3684 } 3685 3686 bool isMergeableStackTaggingInstruction(MachineInstr &MI, int64_t &Offset, 3687 int64_t &Size, bool &ZeroData) { 3688 MachineFunction &MF = *MI.getParent()->getParent(); 3689 const MachineFrameInfo &MFI = MF.getFrameInfo(); 3690 3691 unsigned Opcode = MI.getOpcode(); 3692 ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGi || 3693 Opcode == AArch64::STZ2Gi); 3694 3695 if (Opcode == AArch64::STGloop || Opcode == AArch64::STZGloop) { 3696 if (!MI.getOperand(0).isDead() || !MI.getOperand(1).isDead()) 3697 return false; 3698 if (!MI.getOperand(2).isImm() || !MI.getOperand(3).isFI()) 3699 return false; 3700 Offset = MFI.getObjectOffset(MI.getOperand(3).getIndex()); 3701 Size = MI.getOperand(2).getImm(); 3702 return true; 3703 } 3704 3705 if (Opcode == AArch64::STGi || Opcode == AArch64::STZGi) 3706 Size = 16; 3707 else if (Opcode == AArch64::ST2Gi || Opcode == AArch64::STZ2Gi) 3708 Size = 32; 3709 else 3710 return false; 3711 3712 if (MI.getOperand(0).getReg() != AArch64::SP || !MI.getOperand(1).isFI()) 3713 return false; 3714 3715 Offset = MFI.getObjectOffset(MI.getOperand(1).getIndex()) + 3716 16 * MI.getOperand(2).getImm(); 3717 return true; 3718 } 3719 3720 // Detect a run of memory tagging instructions for adjacent stack frame slots, 3721 // and replace them with a shorter instruction sequence: 3722 // * replace STG + STG with ST2G 3723 // * replace STGloop + STGloop with STGloop 3724 // This code needs to run when stack slot offsets are already known, but before 3725 // FrameIndex operands in STG instructions are eliminated. 3726 MachineBasicBlock::iterator tryMergeAdjacentSTG(MachineBasicBlock::iterator II, 3727 const AArch64FrameLowering *TFI, 3728 RegScavenger *RS) { 3729 bool FirstZeroData; 3730 int64_t Size, Offset; 3731 MachineInstr &MI = *II; 3732 MachineBasicBlock *MBB = MI.getParent(); 3733 MachineBasicBlock::iterator NextI = ++II; 3734 if (&MI == &MBB->instr_back()) 3735 return II; 3736 if (!isMergeableStackTaggingInstruction(MI, Offset, Size, FirstZeroData)) 3737 return II; 3738 3739 SmallVector<TagStoreInstr, 4> Instrs; 3740 Instrs.emplace_back(&MI, Offset, Size); 3741 3742 constexpr int kScanLimit = 10; 3743 int Count = 0; 3744 for (MachineBasicBlock::iterator E = MBB->end(); 3745 NextI != E && Count < kScanLimit; ++NextI) { 3746 MachineInstr &MI = *NextI; 3747 bool ZeroData; 3748 int64_t Size, Offset; 3749 // Collect instructions that update memory tags with a FrameIndex operand 3750 // and (when applicable) constant size, and whose output registers are dead 3751 // (the latter is almost always the case in practice). Since these 3752 // instructions effectively have no inputs or outputs, we are free to skip 3753 // any non-aliasing instructions in between without tracking used registers. 3754 if (isMergeableStackTaggingInstruction(MI, Offset, Size, ZeroData)) { 3755 if (ZeroData != FirstZeroData) 3756 break; 3757 Instrs.emplace_back(&MI, Offset, Size); 3758 continue; 3759 } 3760 3761 // Only count non-transient, non-tagging instructions toward the scan 3762 // limit. 3763 if (!MI.isTransient()) 3764 ++Count; 3765 3766 // Just in case, stop before the epilogue code starts. 3767 if (MI.getFlag(MachineInstr::FrameSetup) || 3768 MI.getFlag(MachineInstr::FrameDestroy)) 3769 break; 3770 3771 // Reject anything that may alias the collected instructions. 3772 if (MI.mayLoadOrStore() || MI.hasUnmodeledSideEffects()) 3773 break; 3774 } 3775 3776 // New code will be inserted after the last tagging instruction we've found. 3777 MachineBasicBlock::iterator InsertI = Instrs.back().MI; 3778 InsertI++; 3779 3780 llvm::stable_sort(Instrs, 3781 [](const TagStoreInstr &Left, const TagStoreInstr &Right) { 3782 return Left.Offset < Right.Offset; 3783 }); 3784 3785 // Make sure that we don't have any overlapping stores. 3786 int64_t CurOffset = Instrs[0].Offset; 3787 for (auto &Instr : Instrs) { 3788 if (CurOffset > Instr.Offset) 3789 return NextI; 3790 CurOffset = Instr.Offset + Instr.Size; 3791 } 3792 3793 // Find contiguous runs of tagged memory and emit shorter instruction 3794 // sequencies for them when possible. 3795 TagStoreEdit TSE(MBB, FirstZeroData); 3796 std::optional<int64_t> EndOffset; 3797 for (auto &Instr : Instrs) { 3798 if (EndOffset && *EndOffset != Instr.Offset) { 3799 // Found a gap. 3800 TSE.emitCode(InsertI, TFI, /*TryMergeSPUpdate = */ false); 3801 TSE.clear(); 3802 } 3803 3804 TSE.addInstruction(Instr); 3805 EndOffset = Instr.Offset + Instr.Size; 3806 } 3807 3808 const MachineFunction *MF = MBB->getParent(); 3809 // Multiple FP/SP updates in a loop cannot be described by CFI instructions. 3810 TSE.emitCode( 3811 InsertI, TFI, /*TryMergeSPUpdate = */ 3812 !MF->getInfo<AArch64FunctionInfo>()->needsAsyncDwarfUnwindInfo(*MF)); 3813 3814 return InsertI; 3815 } 3816 } // namespace 3817 3818 void AArch64FrameLowering::processFunctionBeforeFrameIndicesReplaced( 3819 MachineFunction &MF, RegScavenger *RS = nullptr) const { 3820 if (StackTaggingMergeSetTag) 3821 for (auto &BB : MF) 3822 for (MachineBasicBlock::iterator II = BB.begin(); II != BB.end();) 3823 II = tryMergeAdjacentSTG(II, this, RS); 3824 } 3825 3826 /// For Win64 AArch64 EH, the offset to the Unwind object is from the SP 3827 /// before the update. This is easily retrieved as it is exactly the offset 3828 /// that is set in processFunctionBeforeFrameFinalized. 3829 StackOffset AArch64FrameLowering::getFrameIndexReferencePreferSP( 3830 const MachineFunction &MF, int FI, Register &FrameReg, 3831 bool IgnoreSPUpdates) const { 3832 const MachineFrameInfo &MFI = MF.getFrameInfo(); 3833 if (IgnoreSPUpdates) { 3834 LLVM_DEBUG(dbgs() << "Offset from the SP for " << FI << " is " 3835 << MFI.getObjectOffset(FI) << "\n"); 3836 FrameReg = AArch64::SP; 3837 return StackOffset::getFixed(MFI.getObjectOffset(FI)); 3838 } 3839 3840 // Go to common code if we cannot provide sp + offset. 3841 if (MFI.hasVarSizedObjects() || 3842 MF.getInfo<AArch64FunctionInfo>()->getStackSizeSVE() || 3843 MF.getSubtarget().getRegisterInfo()->hasStackRealignment(MF)) 3844 return getFrameIndexReference(MF, FI, FrameReg); 3845 3846 FrameReg = AArch64::SP; 3847 return getStackOffset(MF, MFI.getObjectOffset(FI)); 3848 } 3849 3850 /// The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve 3851 /// the parent's frame pointer 3852 unsigned AArch64FrameLowering::getWinEHParentFrameOffset( 3853 const MachineFunction &MF) const { 3854 return 0; 3855 } 3856 3857 /// Funclets only need to account for space for the callee saved registers, 3858 /// as the locals are accounted for in the parent's stack frame. 3859 unsigned AArch64FrameLowering::getWinEHFuncletFrameSize( 3860 const MachineFunction &MF) const { 3861 // This is the size of the pushed CSRs. 3862 unsigned CSSize = 3863 MF.getInfo<AArch64FunctionInfo>()->getCalleeSavedStackSize(); 3864 // This is the amount of stack a funclet needs to allocate. 3865 return alignTo(CSSize + MF.getFrameInfo().getMaxCallFrameSize(), 3866 getStackAlign()); 3867 } 3868 3869 namespace { 3870 struct FrameObject { 3871 bool IsValid = false; 3872 // Index of the object in MFI. 3873 int ObjectIndex = 0; 3874 // Group ID this object belongs to. 3875 int GroupIndex = -1; 3876 // This object should be placed first (closest to SP). 3877 bool ObjectFirst = false; 3878 // This object's group (which always contains the object with 3879 // ObjectFirst==true) should be placed first. 3880 bool GroupFirst = false; 3881 }; 3882 3883 class GroupBuilder { 3884 SmallVector<int, 8> CurrentMembers; 3885 int NextGroupIndex = 0; 3886 std::vector<FrameObject> &Objects; 3887 3888 public: 3889 GroupBuilder(std::vector<FrameObject> &Objects) : Objects(Objects) {} 3890 void AddMember(int Index) { CurrentMembers.push_back(Index); } 3891 void EndCurrentGroup() { 3892 if (CurrentMembers.size() > 1) { 3893 // Create a new group with the current member list. This might remove them 3894 // from their pre-existing groups. That's OK, dealing with overlapping 3895 // groups is too hard and unlikely to make a difference. 3896 LLVM_DEBUG(dbgs() << "group:"); 3897 for (int Index : CurrentMembers) { 3898 Objects[Index].GroupIndex = NextGroupIndex; 3899 LLVM_DEBUG(dbgs() << " " << Index); 3900 } 3901 LLVM_DEBUG(dbgs() << "\n"); 3902 NextGroupIndex++; 3903 } 3904 CurrentMembers.clear(); 3905 } 3906 }; 3907 3908 bool FrameObjectCompare(const FrameObject &A, const FrameObject &B) { 3909 // Objects at a lower index are closer to FP; objects at a higher index are 3910 // closer to SP. 3911 // 3912 // For consistency in our comparison, all invalid objects are placed 3913 // at the end. This also allows us to stop walking when we hit the 3914 // first invalid item after it's all sorted. 3915 // 3916 // The "first" object goes first (closest to SP), followed by the members of 3917 // the "first" group. 3918 // 3919 // The rest are sorted by the group index to keep the groups together. 3920 // Higher numbered groups are more likely to be around longer (i.e. untagged 3921 // in the function epilogue and not at some earlier point). Place them closer 3922 // to SP. 3923 // 3924 // If all else equal, sort by the object index to keep the objects in the 3925 // original order. 3926 return std::make_tuple(!A.IsValid, A.ObjectFirst, A.GroupFirst, A.GroupIndex, 3927 A.ObjectIndex) < 3928 std::make_tuple(!B.IsValid, B.ObjectFirst, B.GroupFirst, B.GroupIndex, 3929 B.ObjectIndex); 3930 } 3931 } // namespace 3932 3933 void AArch64FrameLowering::orderFrameObjects( 3934 const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const { 3935 if (!OrderFrameObjects || ObjectsToAllocate.empty()) 3936 return; 3937 3938 const MachineFrameInfo &MFI = MF.getFrameInfo(); 3939 std::vector<FrameObject> FrameObjects(MFI.getObjectIndexEnd()); 3940 for (auto &Obj : ObjectsToAllocate) { 3941 FrameObjects[Obj].IsValid = true; 3942 FrameObjects[Obj].ObjectIndex = Obj; 3943 } 3944 3945 // Identify stack slots that are tagged at the same time. 3946 GroupBuilder GB(FrameObjects); 3947 for (auto &MBB : MF) { 3948 for (auto &MI : MBB) { 3949 if (MI.isDebugInstr()) 3950 continue; 3951 int OpIndex; 3952 switch (MI.getOpcode()) { 3953 case AArch64::STGloop: 3954 case AArch64::STZGloop: 3955 OpIndex = 3; 3956 break; 3957 case AArch64::STGi: 3958 case AArch64::STZGi: 3959 case AArch64::ST2Gi: 3960 case AArch64::STZ2Gi: 3961 OpIndex = 1; 3962 break; 3963 default: 3964 OpIndex = -1; 3965 } 3966 3967 int TaggedFI = -1; 3968 if (OpIndex >= 0) { 3969 const MachineOperand &MO = MI.getOperand(OpIndex); 3970 if (MO.isFI()) { 3971 int FI = MO.getIndex(); 3972 if (FI >= 0 && FI < MFI.getObjectIndexEnd() && 3973 FrameObjects[FI].IsValid) 3974 TaggedFI = FI; 3975 } 3976 } 3977 3978 // If this is a stack tagging instruction for a slot that is not part of a 3979 // group yet, either start a new group or add it to the current one. 3980 if (TaggedFI >= 0) 3981 GB.AddMember(TaggedFI); 3982 else 3983 GB.EndCurrentGroup(); 3984 } 3985 // Groups should never span multiple basic blocks. 3986 GB.EndCurrentGroup(); 3987 } 3988 3989 // If the function's tagged base pointer is pinned to a stack slot, we want to 3990 // put that slot first when possible. This will likely place it at SP + 0, 3991 // and save one instruction when generating the base pointer because IRG does 3992 // not allow an immediate offset. 3993 const AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>(); 3994 std::optional<int> TBPI = AFI.getTaggedBasePointerIndex(); 3995 if (TBPI) { 3996 FrameObjects[*TBPI].ObjectFirst = true; 3997 FrameObjects[*TBPI].GroupFirst = true; 3998 int FirstGroupIndex = FrameObjects[*TBPI].GroupIndex; 3999 if (FirstGroupIndex >= 0) 4000 for (FrameObject &Object : FrameObjects) 4001 if (Object.GroupIndex == FirstGroupIndex) 4002 Object.GroupFirst = true; 4003 } 4004 4005 llvm::stable_sort(FrameObjects, FrameObjectCompare); 4006 4007 int i = 0; 4008 for (auto &Obj : FrameObjects) { 4009 // All invalid items are sorted at the end, so it's safe to stop. 4010 if (!Obj.IsValid) 4011 break; 4012 ObjectsToAllocate[i++] = Obj.ObjectIndex; 4013 } 4014 4015 LLVM_DEBUG(dbgs() << "Final frame order:\n"; for (auto &Obj 4016 : FrameObjects) { 4017 if (!Obj.IsValid) 4018 break; 4019 dbgs() << " " << Obj.ObjectIndex << ": group " << Obj.GroupIndex; 4020 if (Obj.ObjectFirst) 4021 dbgs() << ", first"; 4022 if (Obj.GroupFirst) 4023 dbgs() << ", group-first"; 4024 dbgs() << "\n"; 4025 }); 4026 } 4027