xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===- AArch64.cpp --------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
11 
12 using namespace clang;
13 using namespace clang::CodeGen;
14 
15 //===----------------------------------------------------------------------===//
16 // AArch64 ABI Implementation
17 //===----------------------------------------------------------------------===//
18 
19 namespace {
20 
21 class AArch64ABIInfo : public ABIInfo {
22   AArch64ABIKind Kind;
23 
24 public:
25   AArch64ABIInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
26       : ABIInfo(CGT), Kind(Kind) {}
27 
28 private:
29   AArch64ABIKind getABIKind() const { return Kind; }
30   bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; }
31 
32   ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
33   ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic,
34                                   unsigned CallingConvention) const;
35   ABIArgInfo coerceIllegalVector(QualType Ty) const;
36   bool isHomogeneousAggregateBaseType(QualType Ty) const override;
37   bool isHomogeneousAggregateSmallEnough(const Type *Ty,
38                                          uint64_t Members) const override;
39   bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
40 
41   bool isIllegalVectorType(QualType Ty) const;
42 
43   void computeInfo(CGFunctionInfo &FI) const override {
44     if (!::classifyReturnType(getCXXABI(), FI, *this))
45       FI.getReturnInfo() =
46           classifyReturnType(FI.getReturnType(), FI.isVariadic());
47 
48     for (auto &it : FI.arguments())
49       it.info = classifyArgumentType(it.type, FI.isVariadic(),
50                                      FI.getCallingConvention());
51   }
52 
53   Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
54                           CodeGenFunction &CGF) const;
55 
56   Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
57                          CodeGenFunction &CGF) const;
58 
59   Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
60                     QualType Ty) const override {
61     llvm::Type *BaseTy = CGF.ConvertType(Ty);
62     if (isa<llvm::ScalableVectorType>(BaseTy))
63       llvm::report_fatal_error("Passing SVE types to variadic functions is "
64                                "currently not supported");
65 
66     return Kind == AArch64ABIKind::Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
67            : isDarwinPCS()               ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
68                                          : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
69   }
70 
71   Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
72                       QualType Ty) const override;
73 
74   bool allowBFloatArgsAndRet() const override {
75     return getTarget().hasBFloat16Type();
76   }
77 };
78 
79 class AArch64SwiftABIInfo : public SwiftABIInfo {
80 public:
81   explicit AArch64SwiftABIInfo(CodeGenTypes &CGT)
82       : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
83 
84   bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
85                          unsigned NumElts) const override;
86 };
87 
88 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
89 public:
90   AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
91       : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {
92     SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
93   }
94 
95   StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
96     return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
97   }
98 
99   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
100     return 31;
101   }
102 
103   bool doesReturnSlotInterfereWithArgs() const override { return false; }
104 
105   void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
106                            CodeGen::CodeGenModule &CGM) const override {
107     const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
108     if (!FD)
109       return;
110 
111     const auto *TA = FD->getAttr<TargetAttr>();
112     if (TA == nullptr)
113       return;
114 
115     ParsedTargetAttr Attr =
116         CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
117     if (Attr.BranchProtection.empty())
118       return;
119 
120     TargetInfo::BranchProtectionInfo BPI;
121     StringRef Error;
122     (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
123                                                    Attr.CPU, BPI, Error);
124     assert(Error.empty());
125 
126     auto *Fn = cast<llvm::Function>(GV);
127     static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
128     Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
129 
130     if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
131       Fn->addFnAttr("sign-return-address-key",
132                     BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
133                         ? "a_key"
134                         : "b_key");
135     }
136 
137     Fn->addFnAttr("branch-target-enforcement",
138                   BPI.BranchTargetEnforcement ? "true" : "false");
139     Fn->addFnAttr("branch-protection-pauth-lr",
140                   BPI.BranchProtectionPAuthLR ? "true" : "false");
141   }
142 
143   bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
144                                 llvm::Type *Ty) const override {
145     if (CGF.getTarget().hasFeature("ls64")) {
146       auto *ST = dyn_cast<llvm::StructType>(Ty);
147       if (ST && ST->getNumElements() == 1) {
148         auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
149         if (AT && AT->getNumElements() == 8 &&
150             AT->getElementType()->isIntegerTy(64))
151           return true;
152       }
153     }
154     return TargetCodeGenInfo::isScalarizableAsmOperand(CGF, Ty);
155   }
156 };
157 
158 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
159 public:
160   WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind K)
161       : AArch64TargetCodeGenInfo(CGT, K) {}
162 
163   void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
164                            CodeGen::CodeGenModule &CGM) const override;
165 
166   void getDependentLibraryOption(llvm::StringRef Lib,
167                                  llvm::SmallString<24> &Opt) const override {
168     Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
169   }
170 
171   void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
172                                llvm::SmallString<32> &Opt) const override {
173     Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
174   }
175 };
176 
177 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
178     const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
179   AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
180   if (GV->isDeclaration())
181     return;
182   addStackProbeTargetAttributes(D, GV, CGM);
183 }
184 }
185 
186 ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
187   assert(Ty->isVectorType() && "expected vector type!");
188 
189   const auto *VT = Ty->castAs<VectorType>();
190   if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) {
191     assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
192     assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==
193                BuiltinType::UChar &&
194            "unexpected builtin type for SVE predicate!");
195     return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
196         llvm::Type::getInt1Ty(getVMContext()), 16));
197   }
198 
199   if (VT->getVectorKind() == VectorKind::SveFixedLengthData) {
200     assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
201 
202     const auto *BT = VT->getElementType()->castAs<BuiltinType>();
203     llvm::ScalableVectorType *ResType = nullptr;
204     switch (BT->getKind()) {
205     default:
206       llvm_unreachable("unexpected builtin type for SVE vector!");
207     case BuiltinType::SChar:
208     case BuiltinType::UChar:
209       ResType = llvm::ScalableVectorType::get(
210           llvm::Type::getInt8Ty(getVMContext()), 16);
211       break;
212     case BuiltinType::Short:
213     case BuiltinType::UShort:
214       ResType = llvm::ScalableVectorType::get(
215           llvm::Type::getInt16Ty(getVMContext()), 8);
216       break;
217     case BuiltinType::Int:
218     case BuiltinType::UInt:
219       ResType = llvm::ScalableVectorType::get(
220           llvm::Type::getInt32Ty(getVMContext()), 4);
221       break;
222     case BuiltinType::Long:
223     case BuiltinType::ULong:
224       ResType = llvm::ScalableVectorType::get(
225           llvm::Type::getInt64Ty(getVMContext()), 2);
226       break;
227     case BuiltinType::Half:
228       ResType = llvm::ScalableVectorType::get(
229           llvm::Type::getHalfTy(getVMContext()), 8);
230       break;
231     case BuiltinType::Float:
232       ResType = llvm::ScalableVectorType::get(
233           llvm::Type::getFloatTy(getVMContext()), 4);
234       break;
235     case BuiltinType::Double:
236       ResType = llvm::ScalableVectorType::get(
237           llvm::Type::getDoubleTy(getVMContext()), 2);
238       break;
239     case BuiltinType::BFloat16:
240       ResType = llvm::ScalableVectorType::get(
241           llvm::Type::getBFloatTy(getVMContext()), 8);
242       break;
243     }
244     return ABIArgInfo::getDirect(ResType);
245   }
246 
247   uint64_t Size = getContext().getTypeSize(Ty);
248   // Android promotes <2 x i8> to i16, not i32
249   if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
250     llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
251     return ABIArgInfo::getDirect(ResType);
252   }
253   if (Size <= 32) {
254     llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
255     return ABIArgInfo::getDirect(ResType);
256   }
257   if (Size == 64) {
258     auto *ResType =
259         llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
260     return ABIArgInfo::getDirect(ResType);
261   }
262   if (Size == 128) {
263     auto *ResType =
264         llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
265     return ABIArgInfo::getDirect(ResType);
266   }
267   return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
268 }
269 
270 ABIArgInfo
271 AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
272                                      unsigned CallingConvention) const {
273   Ty = useFirstFieldIfTransparentUnion(Ty);
274 
275   // Handle illegal vector types here.
276   if (isIllegalVectorType(Ty))
277     return coerceIllegalVector(Ty);
278 
279   if (!isAggregateTypeForABI(Ty)) {
280     // Treat an enum type as its underlying type.
281     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
282       Ty = EnumTy->getDecl()->getIntegerType();
283 
284     if (const auto *EIT = Ty->getAs<BitIntType>())
285       if (EIT->getNumBits() > 128)
286         return getNaturalAlignIndirect(Ty);
287 
288     return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
289                 ? ABIArgInfo::getExtend(Ty)
290                 : ABIArgInfo::getDirect());
291   }
292 
293   // Structures with either a non-trivial destructor or a non-trivial
294   // copy constructor are always indirect.
295   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
296     return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
297                                      CGCXXABI::RAA_DirectInMemory);
298   }
299 
300   // Empty records are always ignored on Darwin, but actually passed in C++ mode
301   // elsewhere for GNU compatibility.
302   uint64_t Size = getContext().getTypeSize(Ty);
303   bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
304   if (IsEmpty || Size == 0) {
305     if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
306       return ABIArgInfo::getIgnore();
307 
308     // GNU C mode. The only argument that gets ignored is an empty one with size
309     // 0.
310     if (IsEmpty && Size == 0)
311       return ABIArgInfo::getIgnore();
312     return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
313   }
314 
315   // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
316   const Type *Base = nullptr;
317   uint64_t Members = 0;
318   bool IsWin64 = Kind == AArch64ABIKind::Win64 ||
319                  CallingConvention == llvm::CallingConv::Win64;
320   bool IsWinVariadic = IsWin64 && IsVariadic;
321   // In variadic functions on Windows, all composite types are treated alike,
322   // no special handling of HFAs/HVAs.
323   if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
324     if (Kind != AArch64ABIKind::AAPCS)
325       return ABIArgInfo::getDirect(
326           llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
327 
328     // For HFAs/HVAs, cap the argument alignment to 16, otherwise
329     // set it to 8 according to the AAPCS64 document.
330     unsigned Align =
331         getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
332     Align = (Align >= 16) ? 16 : 8;
333     return ABIArgInfo::getDirect(
334         llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
335         nullptr, true, Align);
336   }
337 
338   // Aggregates <= 16 bytes are passed directly in registers or on the stack.
339   if (Size <= 128) {
340     // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
341     // same size and alignment.
342     if (getTarget().isRenderScriptTarget()) {
343       return coerceToIntArray(Ty, getContext(), getVMContext());
344     }
345     unsigned Alignment;
346     if (Kind == AArch64ABIKind::AAPCS) {
347       Alignment = getContext().getTypeUnadjustedAlign(Ty);
348       Alignment = Alignment < 128 ? 64 : 128;
349     } else {
350       Alignment =
351           std::max(getContext().getTypeAlign(Ty),
352                    (unsigned)getTarget().getPointerWidth(LangAS::Default));
353     }
354     Size = llvm::alignTo(Size, Alignment);
355 
356     // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
357     // For aggregates with 16-byte alignment, we use i128.
358     llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
359     return ABIArgInfo::getDirect(
360         Size == Alignment ? BaseTy
361                           : llvm::ArrayType::get(BaseTy, Size / Alignment));
362   }
363 
364   return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
365 }
366 
367 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
368                                               bool IsVariadic) const {
369   if (RetTy->isVoidType())
370     return ABIArgInfo::getIgnore();
371 
372   if (const auto *VT = RetTy->getAs<VectorType>()) {
373     if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
374         VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
375       return coerceIllegalVector(RetTy);
376   }
377 
378   // Large vector types should be returned via memory.
379   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
380     return getNaturalAlignIndirect(RetTy);
381 
382   if (!isAggregateTypeForABI(RetTy)) {
383     // Treat an enum type as its underlying type.
384     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
385       RetTy = EnumTy->getDecl()->getIntegerType();
386 
387     if (const auto *EIT = RetTy->getAs<BitIntType>())
388       if (EIT->getNumBits() > 128)
389         return getNaturalAlignIndirect(RetTy);
390 
391     return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
392                 ? ABIArgInfo::getExtend(RetTy)
393                 : ABIArgInfo::getDirect());
394   }
395 
396   uint64_t Size = getContext().getTypeSize(RetTy);
397   if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
398     return ABIArgInfo::getIgnore();
399 
400   const Type *Base = nullptr;
401   uint64_t Members = 0;
402   if (isHomogeneousAggregate(RetTy, Base, Members) &&
403       !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
404         IsVariadic))
405     // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
406     return ABIArgInfo::getDirect();
407 
408   // Aggregates <= 16 bytes are returned directly in registers or on the stack.
409   if (Size <= 128) {
410     // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
411     // same size and alignment.
412     if (getTarget().isRenderScriptTarget()) {
413       return coerceToIntArray(RetTy, getContext(), getVMContext());
414     }
415 
416     if (Size <= 64 && getDataLayout().isLittleEndian()) {
417       // Composite types are returned in lower bits of a 64-bit register for LE,
418       // and in higher bits for BE. However, integer types are always returned
419       // in lower bits for both LE and BE, and they are not rounded up to
420       // 64-bits. We can skip rounding up of composite types for LE, but not for
421       // BE, otherwise composite types will be indistinguishable from integer
422       // types.
423       return ABIArgInfo::getDirect(
424           llvm::IntegerType::get(getVMContext(), Size));
425     }
426 
427     unsigned Alignment = getContext().getTypeAlign(RetTy);
428     Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
429 
430     // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
431     // For aggregates with 16-byte alignment, we use i128.
432     if (Alignment < 128 && Size == 128) {
433       llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
434       return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
435     }
436     return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
437   }
438 
439   return getNaturalAlignIndirect(RetTy);
440 }
441 
442 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
443 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
444   if (const VectorType *VT = Ty->getAs<VectorType>()) {
445     // Check whether VT is a fixed-length SVE vector. These types are
446     // represented as scalable vectors in function args/return and must be
447     // coerced from fixed vectors.
448     if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
449         VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
450       return true;
451 
452     // Check whether VT is legal.
453     unsigned NumElements = VT->getNumElements();
454     uint64_t Size = getContext().getTypeSize(VT);
455     // NumElements should be power of 2.
456     if (!llvm::isPowerOf2_32(NumElements))
457       return true;
458 
459     // arm64_32 has to be compatible with the ARM logic here, which allows huge
460     // vectors for some reason.
461     llvm::Triple Triple = getTarget().getTriple();
462     if (Triple.getArch() == llvm::Triple::aarch64_32 &&
463         Triple.isOSBinFormatMachO())
464       return Size <= 32;
465 
466     return Size != 64 && (Size != 128 || NumElements == 1);
467   }
468   return false;
469 }
470 
471 bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize,
472                                             llvm::Type *EltTy,
473                                             unsigned NumElts) const {
474   if (!llvm::isPowerOf2_32(NumElts))
475     return false;
476   if (VectorSize.getQuantity() != 8 &&
477       (VectorSize.getQuantity() != 16 || NumElts == 1))
478     return false;
479   return true;
480 }
481 
482 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
483   // Homogeneous aggregates for AAPCS64 must have base types of a floating
484   // point type or a short-vector type. This is the same as the 32-bit ABI,
485   // but with the difference that any floating-point type is allowed,
486   // including __fp16.
487   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
488     if (BT->isFloatingPoint())
489       return true;
490   } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
491     unsigned VecSize = getContext().getTypeSize(VT);
492     if (VecSize == 64 || VecSize == 128)
493       return true;
494   }
495   return false;
496 }
497 
498 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
499                                                        uint64_t Members) const {
500   return Members <= 4;
501 }
502 
503 bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
504     const {
505   // AAPCS64 says that the rule for whether something is a homogeneous
506   // aggregate is applied to the output of the data layout decision. So
507   // anything that doesn't affect the data layout also does not affect
508   // homogeneity. In particular, zero-length bitfields don't stop a struct
509   // being homogeneous.
510   return true;
511 }
512 
513 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
514                                        CodeGenFunction &CGF) const {
515   ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
516                                        CGF.CurFnInfo->getCallingConvention());
517   // Empty records are ignored for parameter passing purposes.
518   if (AI.isIgnore()) {
519     uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
520     CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
521     VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
522     auto *Load = CGF.Builder.CreateLoad(VAListAddr);
523     return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize);
524   }
525 
526   bool IsIndirect = AI.isIndirect();
527 
528   llvm::Type *BaseTy = CGF.ConvertType(Ty);
529   if (IsIndirect)
530     BaseTy = llvm::PointerType::getUnqual(BaseTy);
531   else if (AI.getCoerceToType())
532     BaseTy = AI.getCoerceToType();
533 
534   unsigned NumRegs = 1;
535   if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
536     BaseTy = ArrTy->getElementType();
537     NumRegs = ArrTy->getNumElements();
538   }
539   bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
540 
541   // The AArch64 va_list type and handling is specified in the Procedure Call
542   // Standard, section B.4:
543   //
544   // struct {
545   //   void *__stack;
546   //   void *__gr_top;
547   //   void *__vr_top;
548   //   int __gr_offs;
549   //   int __vr_offs;
550   // };
551 
552   llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
553   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
554   llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
555   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
556 
557   CharUnits TySize = getContext().getTypeSizeInChars(Ty);
558   CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
559 
560   Address reg_offs_p = Address::invalid();
561   llvm::Value *reg_offs = nullptr;
562   int reg_top_index;
563   int RegSize = IsIndirect ? 8 : TySize.getQuantity();
564   if (!IsFPR) {
565     // 3 is the field number of __gr_offs
566     reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
567     reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
568     reg_top_index = 1; // field number for __gr_top
569     RegSize = llvm::alignTo(RegSize, 8);
570   } else {
571     // 4 is the field number of __vr_offs.
572     reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
573     reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
574     reg_top_index = 2; // field number for __vr_top
575     RegSize = 16 * NumRegs;
576   }
577 
578   //=======================================
579   // Find out where argument was passed
580   //=======================================
581 
582   // If reg_offs >= 0 we're already using the stack for this type of
583   // argument. We don't want to keep updating reg_offs (in case it overflows,
584   // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
585   // whatever they get).
586   llvm::Value *UsingStack = nullptr;
587   UsingStack = CGF.Builder.CreateICmpSGE(
588       reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
589 
590   CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
591 
592   // Otherwise, at least some kind of argument could go in these registers, the
593   // question is whether this particular type is too big.
594   CGF.EmitBlock(MaybeRegBlock);
595 
596   // Integer arguments may need to correct register alignment (for example a
597   // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
598   // align __gr_offs to calculate the potential address.
599   if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
600     int Align = TyAlign.getQuantity();
601 
602     reg_offs = CGF.Builder.CreateAdd(
603         reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
604         "align_regoffs");
605     reg_offs = CGF.Builder.CreateAnd(
606         reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
607         "aligned_regoffs");
608   }
609 
610   // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
611   // The fact that this is done unconditionally reflects the fact that
612   // allocating an argument to the stack also uses up all the remaining
613   // registers of the appropriate kind.
614   llvm::Value *NewOffset = nullptr;
615   NewOffset = CGF.Builder.CreateAdd(
616       reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
617   CGF.Builder.CreateStore(NewOffset, reg_offs_p);
618 
619   // Now we're in a position to decide whether this argument really was in
620   // registers or not.
621   llvm::Value *InRegs = nullptr;
622   InRegs = CGF.Builder.CreateICmpSLE(
623       NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
624 
625   CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
626 
627   //=======================================
628   // Argument was in registers
629   //=======================================
630 
631   // Now we emit the code for if the argument was originally passed in
632   // registers. First start the appropriate block:
633   CGF.EmitBlock(InRegBlock);
634 
635   llvm::Value *reg_top = nullptr;
636   Address reg_top_p =
637       CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
638   reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
639   Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs),
640                    CGF.Int8Ty, CharUnits::fromQuantity(IsFPR ? 16 : 8));
641   Address RegAddr = Address::invalid();
642   llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty), *ElementTy = MemTy;
643 
644   if (IsIndirect) {
645     // If it's been passed indirectly (actually a struct), whatever we find from
646     // stored registers or on the stack will actually be a struct **.
647     MemTy = llvm::PointerType::getUnqual(MemTy);
648   }
649 
650   const Type *Base = nullptr;
651   uint64_t NumMembers = 0;
652   bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
653   if (IsHFA && NumMembers > 1) {
654     // Homogeneous aggregates passed in registers will have their elements split
655     // and stored 16-bytes apart regardless of size (they're notionally in qN,
656     // qN+1, ...). We reload and store into a temporary local variable
657     // contiguously.
658     assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
659     auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
660     llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
661     llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
662     Address Tmp = CGF.CreateTempAlloca(HFATy,
663                                        std::max(TyAlign, BaseTyInfo.Align));
664 
665     // On big-endian platforms, the value will be right-aligned in its slot.
666     int Offset = 0;
667     if (CGF.CGM.getDataLayout().isBigEndian() &&
668         BaseTyInfo.Width.getQuantity() < 16)
669       Offset = 16 - BaseTyInfo.Width.getQuantity();
670 
671     for (unsigned i = 0; i < NumMembers; ++i) {
672       CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
673       Address LoadAddr =
674         CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
675       LoadAddr = LoadAddr.withElementType(BaseTy);
676 
677       Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
678 
679       llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
680       CGF.Builder.CreateStore(Elem, StoreAddr);
681     }
682 
683     RegAddr = Tmp.withElementType(MemTy);
684   } else {
685     // Otherwise the object is contiguous in memory.
686 
687     // It might be right-aligned in its slot.
688     CharUnits SlotSize = BaseAddr.getAlignment();
689     if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
690         (IsHFA || !isAggregateTypeForABI(Ty)) &&
691         TySize < SlotSize) {
692       CharUnits Offset = SlotSize - TySize;
693       BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
694     }
695 
696     RegAddr = BaseAddr.withElementType(MemTy);
697   }
698 
699   CGF.EmitBranch(ContBlock);
700 
701   //=======================================
702   // Argument was on the stack
703   //=======================================
704   CGF.EmitBlock(OnStackBlock);
705 
706   Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
707   llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
708 
709   // Again, stack arguments may need realignment. In this case both integer and
710   // floating-point ones might be affected.
711   if (!IsIndirect && TyAlign.getQuantity() > 8) {
712     int Align = TyAlign.getQuantity();
713 
714     OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
715 
716     OnStackPtr = CGF.Builder.CreateAdd(
717         OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
718         "align_stack");
719     OnStackPtr = CGF.Builder.CreateAnd(
720         OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
721         "align_stack");
722 
723     OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
724   }
725   Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty,
726                                 std::max(CharUnits::fromQuantity(8), TyAlign));
727 
728   // All stack slots are multiples of 8 bytes.
729   CharUnits StackSlotSize = CharUnits::fromQuantity(8);
730   CharUnits StackSize;
731   if (IsIndirect)
732     StackSize = StackSlotSize;
733   else
734     StackSize = TySize.alignTo(StackSlotSize);
735 
736   llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
737   llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP(
738       CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack");
739 
740   // Write the new value of __stack for the next call to va_arg
741   CGF.Builder.CreateStore(NewStack, stack_p);
742 
743   if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
744       TySize < StackSlotSize) {
745     CharUnits Offset = StackSlotSize - TySize;
746     OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
747   }
748 
749   OnStackAddr = OnStackAddr.withElementType(MemTy);
750 
751   CGF.EmitBranch(ContBlock);
752 
753   //=======================================
754   // Tidy up
755   //=======================================
756   CGF.EmitBlock(ContBlock);
757 
758   Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr,
759                                  OnStackBlock, "vaargs.addr");
760 
761   if (IsIndirect)
762     return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy,
763                    TyAlign);
764 
765   return ResAddr;
766 }
767 
768 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
769                                         CodeGenFunction &CGF) const {
770   // The backend's lowering doesn't support va_arg for aggregates or
771   // illegal vector types.  Lower VAArg here for these cases and use
772   // the LLVM va_arg instruction for everything else.
773   if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
774     return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
775 
776   uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
777   CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
778 
779   // Empty records are ignored for parameter passing purposes.
780   if (isEmptyRecord(getContext(), Ty, true))
781     return Address(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"),
782                    CGF.ConvertTypeForMem(Ty), SlotSize);
783 
784   // The size of the actual thing passed, which might end up just
785   // being a pointer for indirect types.
786   auto TyInfo = getContext().getTypeInfoInChars(Ty);
787 
788   // Arguments bigger than 16 bytes which aren't homogeneous
789   // aggregates should be passed indirectly.
790   bool IsIndirect = false;
791   if (TyInfo.Width.getQuantity() > 16) {
792     const Type *Base = nullptr;
793     uint64_t Members = 0;
794     IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
795   }
796 
797   return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
798                           TyInfo, SlotSize, /*AllowHigherAlign*/ true);
799 }
800 
801 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
802                                     QualType Ty) const {
803   bool IsIndirect = false;
804 
805   // Composites larger than 16 bytes are passed by reference.
806   if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128)
807     IsIndirect = true;
808 
809   return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
810                           CGF.getContext().getTypeInfoInChars(Ty),
811                           CharUnits::fromQuantity(8),
812                           /*allowHigherAlign*/ false);
813 }
814 
815 std::unique_ptr<TargetCodeGenInfo>
816 CodeGen::createAArch64TargetCodeGenInfo(CodeGenModule &CGM,
817                                         AArch64ABIKind Kind) {
818   return std::make_unique<AArch64TargetCodeGenInfo>(CGM.getTypes(), Kind);
819 }
820 
821 std::unique_ptr<TargetCodeGenInfo>
822 CodeGen::createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM,
823                                                AArch64ABIKind K) {
824   return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.getTypes(), K);
825 }
826