1 //===--- AArch64CallLowering.cpp - Call lowering --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "AArch64CallLowering.h" 16 #include "AArch64ISelLowering.h" 17 #include "AArch64MachineFunctionInfo.h" 18 #include "AArch64Subtarget.h" 19 #include "llvm/ADT/ArrayRef.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/CodeGen/Analysis.h" 22 #include "llvm/CodeGen/CallingConvLower.h" 23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 24 #include "llvm/CodeGen/GlobalISel/Utils.h" 25 #include "llvm/CodeGen/LowLevelType.h" 26 #include "llvm/CodeGen/MachineBasicBlock.h" 27 #include "llvm/CodeGen/MachineFrameInfo.h" 28 #include "llvm/CodeGen/MachineFunction.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineMemOperand.h" 31 #include "llvm/CodeGen/MachineOperand.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/CodeGen/TargetRegisterInfo.h" 34 #include "llvm/CodeGen/TargetSubtargetInfo.h" 35 #include "llvm/CodeGen/ValueTypes.h" 36 #include "llvm/IR/Argument.h" 37 #include "llvm/IR/Attributes.h" 38 #include "llvm/IR/Function.h" 39 #include "llvm/IR/Type.h" 40 #include "llvm/IR/Value.h" 41 #include "llvm/Support/MachineValueType.h" 42 #include <algorithm> 43 #include <cassert> 44 #include <cstdint> 45 #include <iterator> 46 47 #define DEBUG_TYPE "aarch64-call-lowering" 48 49 using namespace llvm; 50 51 AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI) 52 : CallLowering(&TLI) {} 53 54 namespace { 55 struct IncomingArgHandler : public CallLowering::ValueHandler { 56 IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 57 CCAssignFn *AssignFn) 58 : ValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {} 59 60 Register getStackAddress(uint64_t Size, int64_t Offset, 61 MachinePointerInfo &MPO) override { 62 auto &MFI = MIRBuilder.getMF().getFrameInfo(); 63 int FI = MFI.CreateFixedObject(Size, Offset, true); 64 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 65 auto AddrReg = MIRBuilder.buildFrameIndex(LLT::pointer(0, 64), FI); 66 StackUsed = std::max(StackUsed, Size + Offset); 67 return AddrReg.getReg(0); 68 } 69 70 void assignValueToReg(Register ValVReg, Register PhysReg, 71 CCValAssign &VA) override { 72 markPhysRegUsed(PhysReg); 73 switch (VA.getLocInfo()) { 74 default: 75 MIRBuilder.buildCopy(ValVReg, PhysReg); 76 break; 77 case CCValAssign::LocInfo::SExt: 78 case CCValAssign::LocInfo::ZExt: 79 case CCValAssign::LocInfo::AExt: { 80 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg); 81 MIRBuilder.buildTrunc(ValVReg, Copy); 82 break; 83 } 84 } 85 } 86 87 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, 88 MachinePointerInfo &MPO, CCValAssign &VA) override { 89 MachineFunction &MF = MIRBuilder.getMF(); 90 auto MMO = MF.getMachineMemOperand( 91 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size, 92 inferAlignFromPtrInfo(MF, MPO)); 93 MIRBuilder.buildLoad(ValVReg, Addr, *MMO); 94 } 95 96 /// How the physical register gets marked varies between formal 97 /// parameters (it's a basic-block live-in), and a call instruction 98 /// (it's an implicit-def of the BL). 99 virtual void markPhysRegUsed(unsigned PhysReg) = 0; 100 101 bool isIncomingArgumentHandler() const override { return true; } 102 103 uint64_t StackUsed; 104 }; 105 106 struct FormalArgHandler : public IncomingArgHandler { 107 FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 108 CCAssignFn *AssignFn) 109 : IncomingArgHandler(MIRBuilder, MRI, AssignFn) {} 110 111 void markPhysRegUsed(unsigned PhysReg) override { 112 MIRBuilder.getMRI()->addLiveIn(PhysReg); 113 MIRBuilder.getMBB().addLiveIn(PhysReg); 114 } 115 }; 116 117 struct CallReturnHandler : public IncomingArgHandler { 118 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 119 MachineInstrBuilder MIB, CCAssignFn *AssignFn) 120 : IncomingArgHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {} 121 122 void markPhysRegUsed(unsigned PhysReg) override { 123 MIB.addDef(PhysReg, RegState::Implicit); 124 } 125 126 MachineInstrBuilder MIB; 127 }; 128 129 struct OutgoingArgHandler : public CallLowering::ValueHandler { 130 OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 131 MachineInstrBuilder MIB, CCAssignFn *AssignFn, 132 CCAssignFn *AssignFnVarArg, bool IsTailCall = false, 133 int FPDiff = 0) 134 : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB), 135 AssignFnVarArg(AssignFnVarArg), IsTailCall(IsTailCall), FPDiff(FPDiff), 136 StackSize(0), SPReg(0) {} 137 138 bool isIncomingArgumentHandler() const override { return false; } 139 140 Register getStackAddress(uint64_t Size, int64_t Offset, 141 MachinePointerInfo &MPO) override { 142 MachineFunction &MF = MIRBuilder.getMF(); 143 LLT p0 = LLT::pointer(0, 64); 144 LLT s64 = LLT::scalar(64); 145 146 if (IsTailCall) { 147 Offset += FPDiff; 148 int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true); 149 auto FIReg = MIRBuilder.buildFrameIndex(p0, FI); 150 MPO = MachinePointerInfo::getFixedStack(MF, FI); 151 return FIReg.getReg(0); 152 } 153 154 if (!SPReg) 155 SPReg = MIRBuilder.buildCopy(p0, Register(AArch64::SP)).getReg(0); 156 157 auto OffsetReg = MIRBuilder.buildConstant(s64, Offset); 158 159 auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg); 160 161 MPO = MachinePointerInfo::getStack(MF, Offset); 162 return AddrReg.getReg(0); 163 } 164 165 void assignValueToReg(Register ValVReg, Register PhysReg, 166 CCValAssign &VA) override { 167 MIB.addUse(PhysReg, RegState::Implicit); 168 Register ExtReg = extendRegister(ValVReg, VA); 169 MIRBuilder.buildCopy(PhysReg, ExtReg); 170 } 171 172 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, 173 MachinePointerInfo &MPO, CCValAssign &VA) override { 174 MachineFunction &MF = MIRBuilder.getMF(); 175 auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, 176 inferAlignFromPtrInfo(MF, MPO)); 177 MIRBuilder.buildStore(ValVReg, Addr, *MMO); 178 } 179 180 void assignValueToAddress(const CallLowering::ArgInfo &Arg, Register Addr, 181 uint64_t Size, MachinePointerInfo &MPO, 182 CCValAssign &VA) override { 183 unsigned MaxSize = Size * 8; 184 // For varargs, we always want to extend them to 8 bytes, in which case 185 // we disable setting a max. 186 if (!Arg.IsFixed) 187 MaxSize = 0; 188 189 Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt 190 ? extendRegister(Arg.Regs[0], VA, MaxSize) 191 : Arg.Regs[0]; 192 193 // If we extended we might need to adjust the MMO's Size. 194 const LLT RegTy = MRI.getType(ValVReg); 195 if (RegTy.getSizeInBytes() > Size) 196 Size = RegTy.getSizeInBytes(); 197 198 assignValueToAddress(ValVReg, Addr, Size, MPO, VA); 199 } 200 201 bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT, 202 CCValAssign::LocInfo LocInfo, 203 const CallLowering::ArgInfo &Info, 204 ISD::ArgFlagsTy Flags, 205 CCState &State) override { 206 bool Res; 207 if (Info.IsFixed) 208 Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State); 209 else 210 Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State); 211 212 StackSize = State.getNextStackOffset(); 213 return Res; 214 } 215 216 MachineInstrBuilder MIB; 217 CCAssignFn *AssignFnVarArg; 218 bool IsTailCall; 219 220 /// For tail calls, the byte offset of the call's argument area from the 221 /// callee's. Unused elsewhere. 222 int FPDiff; 223 uint64_t StackSize; 224 225 // Cache the SP register vreg if we need it more than once in this call site. 226 Register SPReg; 227 }; 228 } // namespace 229 230 static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt) { 231 return CallConv == CallingConv::Fast && TailCallOpt; 232 } 233 234 void AArch64CallLowering::splitToValueTypes( 235 const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs, 236 const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv) const { 237 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>(); 238 LLVMContext &Ctx = OrigArg.Ty->getContext(); 239 240 SmallVector<EVT, 4> SplitVTs; 241 SmallVector<uint64_t, 4> Offsets; 242 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0); 243 244 if (SplitVTs.size() == 0) 245 return; 246 247 if (SplitVTs.size() == 1) { 248 // No splitting to do, but we want to replace the original type (e.g. [1 x 249 // double] -> double). 250 SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx), 251 OrigArg.Flags[0], OrigArg.IsFixed); 252 return; 253 } 254 255 // Create one ArgInfo for each virtual register in the original ArgInfo. 256 assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch"); 257 258 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( 259 OrigArg.Ty, CallConv, false); 260 for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) { 261 Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx); 262 SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0], 263 OrigArg.IsFixed); 264 if (NeedsRegBlock) 265 SplitArgs.back().Flags[0].setInConsecutiveRegs(); 266 } 267 268 SplitArgs.back().Flags[0].setInConsecutiveRegsLast(); 269 } 270 271 bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, 272 const Value *Val, 273 ArrayRef<Register> VRegs, 274 Register SwiftErrorVReg) const { 275 auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR); 276 assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) && 277 "Return value without a vreg"); 278 279 bool Success = true; 280 if (!VRegs.empty()) { 281 MachineFunction &MF = MIRBuilder.getMF(); 282 const Function &F = MF.getFunction(); 283 284 MachineRegisterInfo &MRI = MF.getRegInfo(); 285 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>(); 286 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv()); 287 auto &DL = F.getParent()->getDataLayout(); 288 LLVMContext &Ctx = Val->getType()->getContext(); 289 290 SmallVector<EVT, 4> SplitEVTs; 291 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); 292 assert(VRegs.size() == SplitEVTs.size() && 293 "For each split Type there should be exactly one VReg."); 294 295 SmallVector<ArgInfo, 8> SplitArgs; 296 CallingConv::ID CC = F.getCallingConv(); 297 298 for (unsigned i = 0; i < SplitEVTs.size(); ++i) { 299 if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) > 1) { 300 LLVM_DEBUG(dbgs() << "Can't handle extended arg types which need split"); 301 return false; 302 } 303 304 Register CurVReg = VRegs[i]; 305 ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx)}; 306 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F); 307 308 // i1 is a special case because SDAG i1 true is naturally zero extended 309 // when widened using ANYEXT. We need to do it explicitly here. 310 if (MRI.getType(CurVReg).getSizeInBits() == 1) { 311 CurVReg = MIRBuilder.buildZExt(LLT::scalar(8), CurVReg).getReg(0); 312 } else { 313 // Some types will need extending as specified by the CC. 314 MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]); 315 if (EVT(NewVT) != SplitEVTs[i]) { 316 unsigned ExtendOp = TargetOpcode::G_ANYEXT; 317 if (F.getAttributes().hasAttribute(AttributeList::ReturnIndex, 318 Attribute::SExt)) 319 ExtendOp = TargetOpcode::G_SEXT; 320 else if (F.getAttributes().hasAttribute(AttributeList::ReturnIndex, 321 Attribute::ZExt)) 322 ExtendOp = TargetOpcode::G_ZEXT; 323 324 LLT NewLLT(NewVT); 325 LLT OldLLT(MVT::getVT(CurArgInfo.Ty)); 326 CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx); 327 // Instead of an extend, we might have a vector type which needs 328 // padding with more elements, e.g. <2 x half> -> <4 x half>. 329 if (NewVT.isVector()) { 330 if (OldLLT.isVector()) { 331 if (NewLLT.getNumElements() > OldLLT.getNumElements()) { 332 // We don't handle VA types which are not exactly twice the 333 // size, but can easily be done in future. 334 if (NewLLT.getNumElements() != OldLLT.getNumElements() * 2) { 335 LLVM_DEBUG(dbgs() << "Outgoing vector ret has too many elts"); 336 return false; 337 } 338 auto Undef = MIRBuilder.buildUndef({OldLLT}); 339 CurVReg = 340 MIRBuilder.buildMerge({NewLLT}, {CurVReg, Undef}).getReg(0); 341 } else { 342 // Just do a vector extend. 343 CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg}) 344 .getReg(0); 345 } 346 } else if (NewLLT.getNumElements() == 2) { 347 // We need to pad a <1 x S> type to <2 x S>. Since we don't have 348 // <1 x S> vector types in GISel we use a build_vector instead 349 // of a vector merge/concat. 350 auto Undef = MIRBuilder.buildUndef({OldLLT}); 351 CurVReg = 352 MIRBuilder 353 .buildBuildVector({NewLLT}, {CurVReg, Undef.getReg(0)}) 354 .getReg(0); 355 } else { 356 LLVM_DEBUG(dbgs() << "Could not handle ret ty"); 357 return false; 358 } 359 } else { 360 // A scalar extend. 361 CurVReg = 362 MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg}).getReg(0); 363 } 364 } 365 } 366 if (CurVReg != CurArgInfo.Regs[0]) { 367 CurArgInfo.Regs[0] = CurVReg; 368 // Reset the arg flags after modifying CurVReg. 369 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F); 370 } 371 splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI, CC); 372 } 373 374 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn); 375 Success = handleAssignments(MIRBuilder, SplitArgs, Handler); 376 } 377 378 if (SwiftErrorVReg) { 379 MIB.addUse(AArch64::X21, RegState::Implicit); 380 MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg); 381 } 382 383 MIRBuilder.insertInstr(MIB); 384 return Success; 385 } 386 387 /// Helper function to compute forwarded registers for musttail calls. Computes 388 /// the forwarded registers, sets MBB liveness, and emits COPY instructions that 389 /// can be used to save + restore registers later. 390 static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder, 391 CCAssignFn *AssignFn) { 392 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 393 MachineFunction &MF = MIRBuilder.getMF(); 394 MachineFrameInfo &MFI = MF.getFrameInfo(); 395 396 if (!MFI.hasMustTailInVarArgFunc()) 397 return; 398 399 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); 400 const Function &F = MF.getFunction(); 401 assert(F.isVarArg() && "Expected F to be vararg?"); 402 403 // Compute the set of forwarded registers. The rest are scratch. 404 SmallVector<CCValAssign, 16> ArgLocs; 405 CCState CCInfo(F.getCallingConv(), /*IsVarArg=*/true, MF, ArgLocs, 406 F.getContext()); 407 SmallVector<MVT, 2> RegParmTypes; 408 RegParmTypes.push_back(MVT::i64); 409 RegParmTypes.push_back(MVT::f128); 410 411 // Later on, we can use this vector to restore the registers if necessary. 412 SmallVectorImpl<ForwardedRegister> &Forwards = 413 FuncInfo->getForwardedMustTailRegParms(); 414 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, AssignFn); 415 416 // Conservatively forward X8, since it might be used for an aggregate 417 // return. 418 if (!CCInfo.isAllocated(AArch64::X8)) { 419 unsigned X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass); 420 Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64)); 421 } 422 423 // Add the forwards to the MachineBasicBlock and MachineFunction. 424 for (const auto &F : Forwards) { 425 MBB.addLiveIn(F.PReg); 426 MIRBuilder.buildCopy(Register(F.VReg), Register(F.PReg)); 427 } 428 } 429 430 bool AArch64CallLowering::fallBackToDAGISel(const Function &F) const { 431 if (isa<ScalableVectorType>(F.getReturnType())) 432 return true; 433 return llvm::any_of(F.args(), [](const Argument &A) { 434 return isa<ScalableVectorType>(A.getType()); 435 }); 436 } 437 438 bool AArch64CallLowering::lowerFormalArguments( 439 MachineIRBuilder &MIRBuilder, const Function &F, 440 ArrayRef<ArrayRef<Register>> VRegs) const { 441 MachineFunction &MF = MIRBuilder.getMF(); 442 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 443 MachineRegisterInfo &MRI = MF.getRegInfo(); 444 auto &DL = F.getParent()->getDataLayout(); 445 446 SmallVector<ArgInfo, 8> SplitArgs; 447 unsigned i = 0; 448 for (auto &Arg : F.args()) { 449 if (DL.getTypeStoreSize(Arg.getType()).isZero()) 450 continue; 451 452 ArgInfo OrigArg{VRegs[i], Arg.getType()}; 453 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F); 454 455 splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv()); 456 ++i; 457 } 458 459 if (!MBB.empty()) 460 MIRBuilder.setInstr(*MBB.begin()); 461 462 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>(); 463 CCAssignFn *AssignFn = 464 TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false); 465 466 FormalArgHandler Handler(MIRBuilder, MRI, AssignFn); 467 if (!handleAssignments(MIRBuilder, SplitArgs, Handler)) 468 return false; 469 470 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); 471 uint64_t StackOffset = Handler.StackUsed; 472 if (F.isVarArg()) { 473 auto &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 474 if (!Subtarget.isTargetDarwin()) { 475 // FIXME: we need to reimplement saveVarArgsRegisters from 476 // AArch64ISelLowering. 477 return false; 478 } 479 480 // We currently pass all varargs at 8-byte alignment, or 4 in ILP32. 481 StackOffset = alignTo(Handler.StackUsed, Subtarget.isTargetILP32() ? 4 : 8); 482 483 auto &MFI = MIRBuilder.getMF().getFrameInfo(); 484 FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true)); 485 } 486 487 if (doesCalleeRestoreStack(F.getCallingConv(), 488 MF.getTarget().Options.GuaranteedTailCallOpt)) { 489 // We have a non-standard ABI, so why not make full use of the stack that 490 // we're going to pop? It must be aligned to 16 B in any case. 491 StackOffset = alignTo(StackOffset, 16); 492 493 // If we're expected to restore the stack (e.g. fastcc), then we'll be 494 // adding a multiple of 16. 495 FuncInfo->setArgumentStackToRestore(StackOffset); 496 497 // Our own callers will guarantee that the space is free by giving an 498 // aligned value to CALLSEQ_START. 499 } 500 501 // When we tail call, we need to check if the callee's arguments 502 // will fit on the caller's stack. So, whenever we lower formal arguments, 503 // we should keep track of this information, since we might lower a tail call 504 // in this function later. 505 FuncInfo->setBytesInStackArgArea(StackOffset); 506 507 auto &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 508 if (Subtarget.hasCustomCallingConv()) 509 Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF); 510 511 handleMustTailForwardedRegisters(MIRBuilder, AssignFn); 512 513 // Move back to the end of the basic block. 514 MIRBuilder.setMBB(MBB); 515 516 return true; 517 } 518 519 /// Return true if the calling convention is one that we can guarantee TCO for. 520 static bool canGuaranteeTCO(CallingConv::ID CC) { 521 return CC == CallingConv::Fast; 522 } 523 524 /// Return true if we might ever do TCO for calls with this calling convention. 525 static bool mayTailCallThisCC(CallingConv::ID CC) { 526 switch (CC) { 527 case CallingConv::C: 528 case CallingConv::PreserveMost: 529 case CallingConv::Swift: 530 return true; 531 default: 532 return canGuaranteeTCO(CC); 533 } 534 } 535 536 /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for 537 /// CC. 538 static std::pair<CCAssignFn *, CCAssignFn *> 539 getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI) { 540 return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)}; 541 } 542 543 bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay( 544 CallLoweringInfo &Info, MachineFunction &MF, 545 SmallVectorImpl<ArgInfo> &InArgs) const { 546 const Function &CallerF = MF.getFunction(); 547 CallingConv::ID CalleeCC = Info.CallConv; 548 CallingConv::ID CallerCC = CallerF.getCallingConv(); 549 550 // If the calling conventions match, then everything must be the same. 551 if (CalleeCC == CallerCC) 552 return true; 553 554 // Check if the caller and callee will handle arguments in the same way. 555 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>(); 556 CCAssignFn *CalleeAssignFnFixed; 557 CCAssignFn *CalleeAssignFnVarArg; 558 std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) = 559 getAssignFnsForCC(CalleeCC, TLI); 560 561 CCAssignFn *CallerAssignFnFixed; 562 CCAssignFn *CallerAssignFnVarArg; 563 std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) = 564 getAssignFnsForCC(CallerCC, TLI); 565 566 if (!resultsCompatible(Info, MF, InArgs, *CalleeAssignFnFixed, 567 *CalleeAssignFnVarArg, *CallerAssignFnFixed, 568 *CallerAssignFnVarArg)) 569 return false; 570 571 // Make sure that the caller and callee preserve all of the same registers. 572 auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo(); 573 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 574 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 575 if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv()) { 576 TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved); 577 TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved); 578 } 579 580 return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved); 581 } 582 583 bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable( 584 CallLoweringInfo &Info, MachineFunction &MF, 585 SmallVectorImpl<ArgInfo> &OutArgs) const { 586 // If there are no outgoing arguments, then we are done. 587 if (OutArgs.empty()) 588 return true; 589 590 const Function &CallerF = MF.getFunction(); 591 CallingConv::ID CalleeCC = Info.CallConv; 592 CallingConv::ID CallerCC = CallerF.getCallingConv(); 593 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>(); 594 595 CCAssignFn *AssignFnFixed; 596 CCAssignFn *AssignFnVarArg; 597 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); 598 599 // We have outgoing arguments. Make sure that we can tail call with them. 600 SmallVector<CCValAssign, 16> OutLocs; 601 CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext()); 602 603 if (!analyzeArgInfo(OutInfo, OutArgs, *AssignFnFixed, *AssignFnVarArg)) { 604 LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n"); 605 return false; 606 } 607 608 // Make sure that they can fit on the caller's stack. 609 const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); 610 if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) { 611 LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n"); 612 return false; 613 } 614 615 // Verify that the parameters in callee-saved registers match. 616 // TODO: Port this over to CallLowering as general code once swiftself is 617 // supported. 618 auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo(); 619 const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC); 620 MachineRegisterInfo &MRI = MF.getRegInfo(); 621 622 for (unsigned i = 0; i < OutLocs.size(); ++i) { 623 auto &ArgLoc = OutLocs[i]; 624 // If it's not a register, it's fine. 625 if (!ArgLoc.isRegLoc()) { 626 if (Info.IsVarArg) { 627 // Be conservative and disallow variadic memory operands to match SDAG's 628 // behaviour. 629 // FIXME: If the caller's calling convention is C, then we can 630 // potentially use its argument area. However, for cases like fastcc, 631 // we can't do anything. 632 LLVM_DEBUG( 633 dbgs() 634 << "... Cannot tail call vararg function with stack arguments\n"); 635 return false; 636 } 637 continue; 638 } 639 640 Register Reg = ArgLoc.getLocReg(); 641 642 // Only look at callee-saved registers. 643 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg)) 644 continue; 645 646 LLVM_DEBUG( 647 dbgs() 648 << "... Call has an argument passed in a callee-saved register.\n"); 649 650 // Check if it was copied from. 651 ArgInfo &OutInfo = OutArgs[i]; 652 653 if (OutInfo.Regs.size() > 1) { 654 LLVM_DEBUG( 655 dbgs() << "... Cannot handle arguments in multiple registers.\n"); 656 return false; 657 } 658 659 // Check if we copy the register, walking through copies from virtual 660 // registers. Note that getDefIgnoringCopies does not ignore copies from 661 // physical registers. 662 MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI); 663 if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) { 664 LLVM_DEBUG( 665 dbgs() 666 << "... Parameter was not copied into a VReg, cannot tail call.\n"); 667 return false; 668 } 669 670 // Got a copy. Verify that it's the same as the register we want. 671 Register CopyRHS = RegDef->getOperand(1).getReg(); 672 if (CopyRHS != Reg) { 673 LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into " 674 "VReg, cannot tail call.\n"); 675 return false; 676 } 677 } 678 679 return true; 680 } 681 682 bool AArch64CallLowering::isEligibleForTailCallOptimization( 683 MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, 684 SmallVectorImpl<ArgInfo> &InArgs, 685 SmallVectorImpl<ArgInfo> &OutArgs) const { 686 687 // Must pass all target-independent checks in order to tail call optimize. 688 if (!Info.IsTailCall) 689 return false; 690 691 CallingConv::ID CalleeCC = Info.CallConv; 692 MachineFunction &MF = MIRBuilder.getMF(); 693 const Function &CallerF = MF.getFunction(); 694 695 LLVM_DEBUG(dbgs() << "Attempting to lower call as tail call\n"); 696 697 if (Info.SwiftErrorVReg) { 698 // TODO: We should handle this. 699 // Note that this is also handled by the check for no outgoing arguments. 700 // Proactively disabling this though, because the swifterror handling in 701 // lowerCall inserts a COPY *after* the location of the call. 702 LLVM_DEBUG(dbgs() << "... Cannot handle tail calls with swifterror yet.\n"); 703 return false; 704 } 705 706 if (!mayTailCallThisCC(CalleeCC)) { 707 LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n"); 708 return false; 709 } 710 711 // Byval parameters hand the function a pointer directly into the stack area 712 // we want to reuse during a tail call. Working around this *is* possible (see 713 // X86). 714 // 715 // FIXME: In AArch64ISelLowering, this isn't worked around. Can/should we try 716 // it? 717 // 718 // On Windows, "inreg" attributes signify non-aggregate indirect returns. 719 // In this case, it is necessary to save/restore X0 in the callee. Tail 720 // call opt interferes with this. So we disable tail call opt when the 721 // caller has an argument with "inreg" attribute. 722 // 723 // FIXME: Check whether the callee also has an "inreg" argument. 724 // 725 // When the caller has a swifterror argument, we don't want to tail call 726 // because would have to move into the swifterror register before the 727 // tail call. 728 if (any_of(CallerF.args(), [](const Argument &A) { 729 return A.hasByValAttr() || A.hasInRegAttr() || A.hasSwiftErrorAttr(); 730 })) { 731 LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval, " 732 "inreg, or swifterror arguments\n"); 733 return false; 734 } 735 736 // Externally-defined functions with weak linkage should not be 737 // tail-called on AArch64 when the OS does not support dynamic 738 // pre-emption of symbols, as the AAELF spec requires normal calls 739 // to undefined weak functions to be replaced with a NOP or jump to the 740 // next instruction. The behaviour of branch instructions in this 741 // situation (as used for tail calls) is implementation-defined, so we 742 // cannot rely on the linker replacing the tail call with a return. 743 if (Info.Callee.isGlobal()) { 744 const GlobalValue *GV = Info.Callee.getGlobal(); 745 const Triple &TT = MF.getTarget().getTargetTriple(); 746 if (GV->hasExternalWeakLinkage() && 747 (!TT.isOSWindows() || TT.isOSBinFormatELF() || 748 TT.isOSBinFormatMachO())) { 749 LLVM_DEBUG(dbgs() << "... Cannot tail call externally-defined function " 750 "with weak linkage for this OS.\n"); 751 return false; 752 } 753 } 754 755 // If we have -tailcallopt, then we're done. 756 if (MF.getTarget().Options.GuaranteedTailCallOpt) 757 return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv(); 758 759 // We don't have -tailcallopt, so we're allowed to change the ABI (sibcall). 760 // Try to find cases where we can do that. 761 762 // I want anyone implementing a new calling convention to think long and hard 763 // about this assert. 764 assert((!Info.IsVarArg || CalleeCC == CallingConv::C) && 765 "Unexpected variadic calling convention"); 766 767 // Verify that the incoming and outgoing arguments from the callee are 768 // safe to tail call. 769 if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) { 770 LLVM_DEBUG( 771 dbgs() 772 << "... Caller and callee have incompatible calling conventions.\n"); 773 return false; 774 } 775 776 if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs)) 777 return false; 778 779 LLVM_DEBUG( 780 dbgs() << "... Call is eligible for tail call optimization.\n"); 781 return true; 782 } 783 784 static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, 785 bool IsTailCall) { 786 if (!IsTailCall) 787 return IsIndirect ? getBLRCallOpcode(CallerF) : (unsigned)AArch64::BL; 788 789 if (!IsIndirect) 790 return AArch64::TCRETURNdi; 791 792 // When BTI is enabled, we need to use TCRETURNriBTI to make sure that we use 793 // x16 or x17. 794 if (CallerF.getFunction().hasFnAttribute("branch-target-enforcement")) 795 return AArch64::TCRETURNriBTI; 796 797 return AArch64::TCRETURNri; 798 } 799 800 bool AArch64CallLowering::lowerTailCall( 801 MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, 802 SmallVectorImpl<ArgInfo> &OutArgs) const { 803 MachineFunction &MF = MIRBuilder.getMF(); 804 const Function &F = MF.getFunction(); 805 MachineRegisterInfo &MRI = MF.getRegInfo(); 806 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>(); 807 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); 808 809 // True when we're tail calling, but without -tailcallopt. 810 bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt; 811 812 // TODO: Right now, regbankselect doesn't know how to handle the rtcGPR64 813 // register class. Until we can do that, we should fall back here. 814 if (F.hasFnAttribute("branch-target-enforcement")) { 815 LLVM_DEBUG( 816 dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n"); 817 return false; 818 } 819 820 // Find out which ABI gets to decide where things go. 821 CallingConv::ID CalleeCC = Info.CallConv; 822 CCAssignFn *AssignFnFixed; 823 CCAssignFn *AssignFnVarArg; 824 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); 825 826 MachineInstrBuilder CallSeqStart; 827 if (!IsSibCall) 828 CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN); 829 830 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true); 831 auto MIB = MIRBuilder.buildInstrNoInsert(Opc); 832 MIB.add(Info.Callee); 833 834 // Byte offset for the tail call. When we are sibcalling, this will always 835 // be 0. 836 MIB.addImm(0); 837 838 // Tell the call which registers are clobbered. 839 auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo(); 840 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC); 841 if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv()) 842 TRI->UpdateCustomCallPreservedMask(MF, &Mask); 843 MIB.addRegMask(Mask); 844 845 if (TRI->isAnyArgRegReserved(MF)) 846 TRI->emitReservedArgRegCallError(MF); 847 848 // FPDiff is the byte offset of the call's argument area from the callee's. 849 // Stores to callee stack arguments will be placed in FixedStackSlots offset 850 // by this amount for a tail call. In a sibling call it must be 0 because the 851 // caller will deallocate the entire stack and the callee still expects its 852 // arguments to begin at SP+0. 853 int FPDiff = 0; 854 855 // This will be 0 for sibcalls, potentially nonzero for tail calls produced 856 // by -tailcallopt. For sibcalls, the memory operands for the call are 857 // already available in the caller's incoming argument space. 858 unsigned NumBytes = 0; 859 if (!IsSibCall) { 860 // We aren't sibcalling, so we need to compute FPDiff. We need to do this 861 // before handling assignments, because FPDiff must be known for memory 862 // arguments. 863 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); 864 SmallVector<CCValAssign, 16> OutLocs; 865 CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext()); 866 analyzeArgInfo(OutInfo, OutArgs, *AssignFnFixed, *AssignFnVarArg); 867 868 // The callee will pop the argument stack as a tail call. Thus, we must 869 // keep it 16-byte aligned. 870 NumBytes = alignTo(OutInfo.getNextStackOffset(), 16); 871 872 // FPDiff will be negative if this tail call requires more space than we 873 // would automatically have in our incoming argument space. Positive if we 874 // actually shrink the stack. 875 FPDiff = NumReusableBytes - NumBytes; 876 877 // The stack pointer must be 16-byte aligned at all times it's used for a 878 // memory operation, which in practice means at *all* times and in 879 // particular across call boundaries. Therefore our own arguments started at 880 // a 16-byte aligned SP and the delta applied for the tail call should 881 // satisfy the same constraint. 882 assert(FPDiff % 16 == 0 && "unaligned stack on tail call"); 883 } 884 885 const auto &Forwards = FuncInfo->getForwardedMustTailRegParms(); 886 887 // Do the actual argument marshalling. 888 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed, 889 AssignFnVarArg, true, FPDiff); 890 if (!handleAssignments(MIRBuilder, OutArgs, Handler)) 891 return false; 892 893 if (Info.IsVarArg && Info.IsMustTailCall) { 894 // Now we know what's being passed to the function. Add uses to the call for 895 // the forwarded registers that we *aren't* passing as parameters. This will 896 // preserve the copies we build earlier. 897 for (const auto &F : Forwards) { 898 Register ForwardedReg = F.PReg; 899 // If the register is already passed, or aliases a register which is 900 // already being passed, then skip it. 901 if (any_of(MIB->uses(), [&ForwardedReg, &TRI](const MachineOperand &Use) { 902 if (!Use.isReg()) 903 return false; 904 return TRI->regsOverlap(Use.getReg(), ForwardedReg); 905 })) 906 continue; 907 908 // We aren't passing it already, so we should add it to the call. 909 MIRBuilder.buildCopy(ForwardedReg, Register(F.VReg)); 910 MIB.addReg(ForwardedReg, RegState::Implicit); 911 } 912 } 913 914 // If we have -tailcallopt, we need to adjust the stack. We'll do the call 915 // sequence start and end here. 916 if (!IsSibCall) { 917 MIB->getOperand(1).setImm(FPDiff); 918 CallSeqStart.addImm(NumBytes).addImm(0); 919 // End the call sequence *before* emitting the call. Normally, we would 920 // tidy the frame up after the call. However, here, we've laid out the 921 // parameters so that when SP is reset, they will be in the correct 922 // location. 923 MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP).addImm(NumBytes).addImm(0); 924 } 925 926 // Now we can add the actual call instruction to the correct basic block. 927 MIRBuilder.insertInstr(MIB); 928 929 // If Callee is a reg, since it is used by a target specific instruction, 930 // it must have a register class matching the constraint of that instruction. 931 if (Info.Callee.isReg()) 932 MIB->getOperand(0).setReg(constrainOperandRegClass( 933 MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(), 934 *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Info.Callee, 935 0)); 936 937 MF.getFrameInfo().setHasTailCall(); 938 Info.LoweredTailCall = true; 939 return true; 940 } 941 942 bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 943 CallLoweringInfo &Info) const { 944 MachineFunction &MF = MIRBuilder.getMF(); 945 const Function &F = MF.getFunction(); 946 MachineRegisterInfo &MRI = MF.getRegInfo(); 947 auto &DL = F.getParent()->getDataLayout(); 948 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>(); 949 950 SmallVector<ArgInfo, 8> OutArgs; 951 for (auto &OrigArg : Info.OrigArgs) { 952 splitToValueTypes(OrigArg, OutArgs, DL, MRI, Info.CallConv); 953 // AAPCS requires that we zero-extend i1 to 8 bits by the caller. 954 if (OrigArg.Ty->isIntegerTy(1)) 955 OutArgs.back().Flags[0].setZExt(); 956 } 957 958 SmallVector<ArgInfo, 8> InArgs; 959 if (!Info.OrigRet.Ty->isVoidTy()) 960 splitToValueTypes(Info.OrigRet, InArgs, DL, MRI, F.getCallingConv()); 961 962 // If we can lower as a tail call, do that instead. 963 bool CanTailCallOpt = 964 isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs); 965 966 // We must emit a tail call if we have musttail. 967 if (Info.IsMustTailCall && !CanTailCallOpt) { 968 // There are types of incoming/outgoing arguments we can't handle yet, so 969 // it doesn't make sense to actually die here like in ISelLowering. Instead, 970 // fall back to SelectionDAG and let it try to handle this. 971 LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n"); 972 return false; 973 } 974 975 if (CanTailCallOpt) 976 return lowerTailCall(MIRBuilder, Info, OutArgs); 977 978 // Find out which ABI gets to decide where things go. 979 CCAssignFn *AssignFnFixed; 980 CCAssignFn *AssignFnVarArg; 981 std::tie(AssignFnFixed, AssignFnVarArg) = 982 getAssignFnsForCC(Info.CallConv, TLI); 983 984 MachineInstrBuilder CallSeqStart; 985 CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN); 986 987 // Create a temporarily-floating call instruction so we can add the implicit 988 // uses of arg registers. 989 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false); 990 991 auto MIB = MIRBuilder.buildInstrNoInsert(Opc); 992 MIB.add(Info.Callee); 993 994 // Tell the call which registers are clobbered. 995 auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo(); 996 const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv); 997 if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv()) 998 TRI->UpdateCustomCallPreservedMask(MF, &Mask); 999 MIB.addRegMask(Mask); 1000 1001 if (TRI->isAnyArgRegReserved(MF)) 1002 TRI->emitReservedArgRegCallError(MF); 1003 1004 // Do the actual argument marshalling. 1005 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed, 1006 AssignFnVarArg, false); 1007 if (!handleAssignments(MIRBuilder, OutArgs, Handler)) 1008 return false; 1009 1010 // Now we can add the actual call instruction to the correct basic block. 1011 MIRBuilder.insertInstr(MIB); 1012 1013 // If Callee is a reg, since it is used by a target specific 1014 // instruction, it must have a register class matching the 1015 // constraint of that instruction. 1016 if (Info.Callee.isReg()) 1017 MIB->getOperand(0).setReg(constrainOperandRegClass( 1018 MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(), 1019 *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Info.Callee, 1020 0)); 1021 1022 // Finally we can copy the returned value back into its virtual-register. In 1023 // symmetry with the arguments, the physical register must be an 1024 // implicit-define of the call instruction. 1025 if (!Info.OrigRet.Ty->isVoidTy()) { 1026 CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv); 1027 CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn); 1028 if (!handleAssignments(MIRBuilder, InArgs, Handler)) 1029 return false; 1030 } 1031 1032 if (Info.SwiftErrorVReg) { 1033 MIB.addDef(AArch64::X21, RegState::Implicit); 1034 MIRBuilder.buildCopy(Info.SwiftErrorVReg, Register(AArch64::X21)); 1035 } 1036 1037 uint64_t CalleePopBytes = 1038 doesCalleeRestoreStack(Info.CallConv, 1039 MF.getTarget().Options.GuaranteedTailCallOpt) 1040 ? alignTo(Handler.StackSize, 16) 1041 : 0; 1042 1043 CallSeqStart.addImm(Handler.StackSize).addImm(0); 1044 MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP) 1045 .addImm(Handler.StackSize) 1046 .addImm(CalleePopBytes); 1047 1048 return true; 1049 } 1050