xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp (revision a03411e84728e9b267056fd31c7d1d9d1dc1b01e)
1 //===- AArch64.cpp --------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
11 
12 using namespace clang;
13 using namespace clang::CodeGen;
14 
15 //===----------------------------------------------------------------------===//
16 // AArch64 ABI Implementation
17 //===----------------------------------------------------------------------===//
18 
19 namespace {
20 
21 class AArch64ABIInfo : public ABIInfo {
22   AArch64ABIKind Kind;
23 
24 public:
25   AArch64ABIInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
26       : ABIInfo(CGT), Kind(Kind) {}
27 
28 private:
29   AArch64ABIKind getABIKind() const { return Kind; }
30   bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; }
31 
32   ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
33   ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic,
34                                   unsigned CallingConvention) const;
35   ABIArgInfo coerceIllegalVector(QualType Ty) const;
36   bool isHomogeneousAggregateBaseType(QualType Ty) const override;
37   bool isHomogeneousAggregateSmallEnough(const Type *Ty,
38                                          uint64_t Members) const override;
39   bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
40 
41   bool isIllegalVectorType(QualType Ty) const;
42 
43   void computeInfo(CGFunctionInfo &FI) const override {
44     if (!::classifyReturnType(getCXXABI(), FI, *this))
45       FI.getReturnInfo() =
46           classifyReturnType(FI.getReturnType(), FI.isVariadic());
47 
48     for (auto &it : FI.arguments())
49       it.info = classifyArgumentType(it.type, FI.isVariadic(),
50                                      FI.getCallingConvention());
51   }
52 
53   Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
54                           CodeGenFunction &CGF) const;
55 
56   Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
57                          CodeGenFunction &CGF) const;
58 
59   Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
60                     QualType Ty) const override {
61     llvm::Type *BaseTy = CGF.ConvertType(Ty);
62     if (isa<llvm::ScalableVectorType>(BaseTy))
63       llvm::report_fatal_error("Passing SVE types to variadic functions is "
64                                "currently not supported");
65 
66     return Kind == AArch64ABIKind::Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
67            : isDarwinPCS()               ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
68                                          : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
69   }
70 
71   Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
72                       QualType Ty) const override;
73 
74   bool allowBFloatArgsAndRet() const override {
75     return getTarget().hasBFloat16Type();
76   }
77 };
78 
79 class AArch64SwiftABIInfo : public SwiftABIInfo {
80 public:
81   explicit AArch64SwiftABIInfo(CodeGenTypes &CGT)
82       : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
83 
84   bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
85                          unsigned NumElts) const override;
86 };
87 
88 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
89 public:
90   AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
91       : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {
92     SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
93   }
94 
95   StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
96     return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
97   }
98 
99   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
100     return 31;
101   }
102 
103   bool doesReturnSlotInterfereWithArgs() const override { return false; }
104 
105   void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
106                            CodeGen::CodeGenModule &CGM) const override {
107     const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
108     if (!FD)
109       return;
110 
111     const auto *TA = FD->getAttr<TargetAttr>();
112     if (TA == nullptr)
113       return;
114 
115     ParsedTargetAttr Attr =
116         CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
117     if (Attr.BranchProtection.empty())
118       return;
119 
120     TargetInfo::BranchProtectionInfo BPI;
121     StringRef Error;
122     (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
123                                                    Attr.CPU, BPI, Error);
124     assert(Error.empty());
125 
126     auto *Fn = cast<llvm::Function>(GV);
127     static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
128     Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
129 
130     if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
131       Fn->addFnAttr("sign-return-address-key",
132                     BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
133                         ? "a_key"
134                         : "b_key");
135     }
136 
137     Fn->addFnAttr("branch-target-enforcement",
138                   BPI.BranchTargetEnforcement ? "true" : "false");
139   }
140 
141   bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
142                                 llvm::Type *Ty) const override {
143     if (CGF.getTarget().hasFeature("ls64")) {
144       auto *ST = dyn_cast<llvm::StructType>(Ty);
145       if (ST && ST->getNumElements() == 1) {
146         auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
147         if (AT && AT->getNumElements() == 8 &&
148             AT->getElementType()->isIntegerTy(64))
149           return true;
150       }
151     }
152     return TargetCodeGenInfo::isScalarizableAsmOperand(CGF, Ty);
153   }
154 };
155 
156 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
157 public:
158   WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind K)
159       : AArch64TargetCodeGenInfo(CGT, K) {}
160 
161   void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
162                            CodeGen::CodeGenModule &CGM) const override;
163 
164   void getDependentLibraryOption(llvm::StringRef Lib,
165                                  llvm::SmallString<24> &Opt) const override {
166     Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
167   }
168 
169   void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
170                                llvm::SmallString<32> &Opt) const override {
171     Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
172   }
173 };
174 
175 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
176     const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
177   AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
178   if (GV->isDeclaration())
179     return;
180   addStackProbeTargetAttributes(D, GV, CGM);
181 }
182 }
183 
184 ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
185   assert(Ty->isVectorType() && "expected vector type!");
186 
187   const auto *VT = Ty->castAs<VectorType>();
188   if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
189     assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
190     assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==
191                BuiltinType::UChar &&
192            "unexpected builtin type for SVE predicate!");
193     return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
194         llvm::Type::getInt1Ty(getVMContext()), 16));
195   }
196 
197   if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) {
198     assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
199 
200     const auto *BT = VT->getElementType()->castAs<BuiltinType>();
201     llvm::ScalableVectorType *ResType = nullptr;
202     switch (BT->getKind()) {
203     default:
204       llvm_unreachable("unexpected builtin type for SVE vector!");
205     case BuiltinType::SChar:
206     case BuiltinType::UChar:
207       ResType = llvm::ScalableVectorType::get(
208           llvm::Type::getInt8Ty(getVMContext()), 16);
209       break;
210     case BuiltinType::Short:
211     case BuiltinType::UShort:
212       ResType = llvm::ScalableVectorType::get(
213           llvm::Type::getInt16Ty(getVMContext()), 8);
214       break;
215     case BuiltinType::Int:
216     case BuiltinType::UInt:
217       ResType = llvm::ScalableVectorType::get(
218           llvm::Type::getInt32Ty(getVMContext()), 4);
219       break;
220     case BuiltinType::Long:
221     case BuiltinType::ULong:
222       ResType = llvm::ScalableVectorType::get(
223           llvm::Type::getInt64Ty(getVMContext()), 2);
224       break;
225     case BuiltinType::Half:
226       ResType = llvm::ScalableVectorType::get(
227           llvm::Type::getHalfTy(getVMContext()), 8);
228       break;
229     case BuiltinType::Float:
230       ResType = llvm::ScalableVectorType::get(
231           llvm::Type::getFloatTy(getVMContext()), 4);
232       break;
233     case BuiltinType::Double:
234       ResType = llvm::ScalableVectorType::get(
235           llvm::Type::getDoubleTy(getVMContext()), 2);
236       break;
237     case BuiltinType::BFloat16:
238       ResType = llvm::ScalableVectorType::get(
239           llvm::Type::getBFloatTy(getVMContext()), 8);
240       break;
241     }
242     return ABIArgInfo::getDirect(ResType);
243   }
244 
245   uint64_t Size = getContext().getTypeSize(Ty);
246   // Android promotes <2 x i8> to i16, not i32
247   if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
248     llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
249     return ABIArgInfo::getDirect(ResType);
250   }
251   if (Size <= 32) {
252     llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
253     return ABIArgInfo::getDirect(ResType);
254   }
255   if (Size == 64) {
256     auto *ResType =
257         llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
258     return ABIArgInfo::getDirect(ResType);
259   }
260   if (Size == 128) {
261     auto *ResType =
262         llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
263     return ABIArgInfo::getDirect(ResType);
264   }
265   return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
266 }
267 
268 ABIArgInfo
269 AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
270                                      unsigned CallingConvention) const {
271   Ty = useFirstFieldIfTransparentUnion(Ty);
272 
273   // Handle illegal vector types here.
274   if (isIllegalVectorType(Ty))
275     return coerceIllegalVector(Ty);
276 
277   if (!isAggregateTypeForABI(Ty)) {
278     // Treat an enum type as its underlying type.
279     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
280       Ty = EnumTy->getDecl()->getIntegerType();
281 
282     if (const auto *EIT = Ty->getAs<BitIntType>())
283       if (EIT->getNumBits() > 128)
284         return getNaturalAlignIndirect(Ty);
285 
286     return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
287                 ? ABIArgInfo::getExtend(Ty)
288                 : ABIArgInfo::getDirect());
289   }
290 
291   // Structures with either a non-trivial destructor or a non-trivial
292   // copy constructor are always indirect.
293   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
294     return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
295                                      CGCXXABI::RAA_DirectInMemory);
296   }
297 
298   // Empty records are always ignored on Darwin, but actually passed in C++ mode
299   // elsewhere for GNU compatibility.
300   uint64_t Size = getContext().getTypeSize(Ty);
301   bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
302   if (IsEmpty || Size == 0) {
303     if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
304       return ABIArgInfo::getIgnore();
305 
306     // GNU C mode. The only argument that gets ignored is an empty one with size
307     // 0.
308     if (IsEmpty && Size == 0)
309       return ABIArgInfo::getIgnore();
310     return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
311   }
312 
313   // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
314   const Type *Base = nullptr;
315   uint64_t Members = 0;
316   bool IsWin64 = Kind == AArch64ABIKind::Win64 ||
317                  CallingConvention == llvm::CallingConv::Win64;
318   bool IsWinVariadic = IsWin64 && IsVariadic;
319   // In variadic functions on Windows, all composite types are treated alike,
320   // no special handling of HFAs/HVAs.
321   if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
322     if (Kind != AArch64ABIKind::AAPCS)
323       return ABIArgInfo::getDirect(
324           llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
325 
326     // For alignment adjusted HFAs, cap the argument alignment to 16, leave it
327     // default otherwise.
328     unsigned Align =
329         getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
330     unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity();
331     Align = (Align > BaseAlign && Align >= 16) ? 16 : 0;
332     return ABIArgInfo::getDirect(
333         llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
334         nullptr, true, Align);
335   }
336 
337   // Aggregates <= 16 bytes are passed directly in registers or on the stack.
338   if (Size <= 128) {
339     // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
340     // same size and alignment.
341     if (getTarget().isRenderScriptTarget()) {
342       return coerceToIntArray(Ty, getContext(), getVMContext());
343     }
344     unsigned Alignment;
345     if (Kind == AArch64ABIKind::AAPCS) {
346       Alignment = getContext().getTypeUnadjustedAlign(Ty);
347       Alignment = Alignment < 128 ? 64 : 128;
348     } else {
349       Alignment =
350           std::max(getContext().getTypeAlign(Ty),
351                    (unsigned)getTarget().getPointerWidth(LangAS::Default));
352     }
353     Size = llvm::alignTo(Size, Alignment);
354 
355     // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
356     // For aggregates with 16-byte alignment, we use i128.
357     llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
358     return ABIArgInfo::getDirect(
359         Size == Alignment ? BaseTy
360                           : llvm::ArrayType::get(BaseTy, Size / Alignment));
361   }
362 
363   return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
364 }
365 
366 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
367                                               bool IsVariadic) const {
368   if (RetTy->isVoidType())
369     return ABIArgInfo::getIgnore();
370 
371   if (const auto *VT = RetTy->getAs<VectorType>()) {
372     if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
373         VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
374       return coerceIllegalVector(RetTy);
375   }
376 
377   // Large vector types should be returned via memory.
378   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
379     return getNaturalAlignIndirect(RetTy);
380 
381   if (!isAggregateTypeForABI(RetTy)) {
382     // Treat an enum type as its underlying type.
383     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
384       RetTy = EnumTy->getDecl()->getIntegerType();
385 
386     if (const auto *EIT = RetTy->getAs<BitIntType>())
387       if (EIT->getNumBits() > 128)
388         return getNaturalAlignIndirect(RetTy);
389 
390     return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
391                 ? ABIArgInfo::getExtend(RetTy)
392                 : ABIArgInfo::getDirect());
393   }
394 
395   uint64_t Size = getContext().getTypeSize(RetTy);
396   if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
397     return ABIArgInfo::getIgnore();
398 
399   const Type *Base = nullptr;
400   uint64_t Members = 0;
401   if (isHomogeneousAggregate(RetTy, Base, Members) &&
402       !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
403         IsVariadic))
404     // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
405     return ABIArgInfo::getDirect();
406 
407   // Aggregates <= 16 bytes are returned directly in registers or on the stack.
408   if (Size <= 128) {
409     // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
410     // same size and alignment.
411     if (getTarget().isRenderScriptTarget()) {
412       return coerceToIntArray(RetTy, getContext(), getVMContext());
413     }
414 
415     if (Size <= 64 && getDataLayout().isLittleEndian()) {
416       // Composite types are returned in lower bits of a 64-bit register for LE,
417       // and in higher bits for BE. However, integer types are always returned
418       // in lower bits for both LE and BE, and they are not rounded up to
419       // 64-bits. We can skip rounding up of composite types for LE, but not for
420       // BE, otherwise composite types will be indistinguishable from integer
421       // types.
422       return ABIArgInfo::getDirect(
423           llvm::IntegerType::get(getVMContext(), Size));
424     }
425 
426     unsigned Alignment = getContext().getTypeAlign(RetTy);
427     Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
428 
429     // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
430     // For aggregates with 16-byte alignment, we use i128.
431     if (Alignment < 128 && Size == 128) {
432       llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
433       return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
434     }
435     return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
436   }
437 
438   return getNaturalAlignIndirect(RetTy);
439 }
440 
441 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
442 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
443   if (const VectorType *VT = Ty->getAs<VectorType>()) {
444     // Check whether VT is a fixed-length SVE vector. These types are
445     // represented as scalable vectors in function args/return and must be
446     // coerced from fixed vectors.
447     if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
448         VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
449       return true;
450 
451     // Check whether VT is legal.
452     unsigned NumElements = VT->getNumElements();
453     uint64_t Size = getContext().getTypeSize(VT);
454     // NumElements should be power of 2.
455     if (!llvm::isPowerOf2_32(NumElements))
456       return true;
457 
458     // arm64_32 has to be compatible with the ARM logic here, which allows huge
459     // vectors for some reason.
460     llvm::Triple Triple = getTarget().getTriple();
461     if (Triple.getArch() == llvm::Triple::aarch64_32 &&
462         Triple.isOSBinFormatMachO())
463       return Size <= 32;
464 
465     return Size != 64 && (Size != 128 || NumElements == 1);
466   }
467   return false;
468 }
469 
470 bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize,
471                                             llvm::Type *EltTy,
472                                             unsigned NumElts) const {
473   if (!llvm::isPowerOf2_32(NumElts))
474     return false;
475   if (VectorSize.getQuantity() != 8 &&
476       (VectorSize.getQuantity() != 16 || NumElts == 1))
477     return false;
478   return true;
479 }
480 
481 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
482   // Homogeneous aggregates for AAPCS64 must have base types of a floating
483   // point type or a short-vector type. This is the same as the 32-bit ABI,
484   // but with the difference that any floating-point type is allowed,
485   // including __fp16.
486   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
487     if (BT->isFloatingPoint())
488       return true;
489   } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
490     unsigned VecSize = getContext().getTypeSize(VT);
491     if (VecSize == 64 || VecSize == 128)
492       return true;
493   }
494   return false;
495 }
496 
497 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
498                                                        uint64_t Members) const {
499   return Members <= 4;
500 }
501 
502 bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
503     const {
504   // AAPCS64 says that the rule for whether something is a homogeneous
505   // aggregate is applied to the output of the data layout decision. So
506   // anything that doesn't affect the data layout also does not affect
507   // homogeneity. In particular, zero-length bitfields don't stop a struct
508   // being homogeneous.
509   return true;
510 }
511 
512 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
513                                        CodeGenFunction &CGF) const {
514   ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
515                                        CGF.CurFnInfo->getCallingConvention());
516   // Empty records are ignored for parameter passing purposes.
517   if (AI.isIgnore()) {
518     uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
519     CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
520     VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
521     auto *Load = CGF.Builder.CreateLoad(VAListAddr);
522     return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize);
523   }
524 
525   bool IsIndirect = AI.isIndirect();
526 
527   llvm::Type *BaseTy = CGF.ConvertType(Ty);
528   if (IsIndirect)
529     BaseTy = llvm::PointerType::getUnqual(BaseTy);
530   else if (AI.getCoerceToType())
531     BaseTy = AI.getCoerceToType();
532 
533   unsigned NumRegs = 1;
534   if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
535     BaseTy = ArrTy->getElementType();
536     NumRegs = ArrTy->getNumElements();
537   }
538   bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
539 
540   // The AArch64 va_list type and handling is specified in the Procedure Call
541   // Standard, section B.4:
542   //
543   // struct {
544   //   void *__stack;
545   //   void *__gr_top;
546   //   void *__vr_top;
547   //   int __gr_offs;
548   //   int __vr_offs;
549   // };
550 
551   llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
552   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
553   llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
554   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
555 
556   CharUnits TySize = getContext().getTypeSizeInChars(Ty);
557   CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
558 
559   Address reg_offs_p = Address::invalid();
560   llvm::Value *reg_offs = nullptr;
561   int reg_top_index;
562   int RegSize = IsIndirect ? 8 : TySize.getQuantity();
563   if (!IsFPR) {
564     // 3 is the field number of __gr_offs
565     reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
566     reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
567     reg_top_index = 1; // field number for __gr_top
568     RegSize = llvm::alignTo(RegSize, 8);
569   } else {
570     // 4 is the field number of __vr_offs.
571     reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
572     reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
573     reg_top_index = 2; // field number for __vr_top
574     RegSize = 16 * NumRegs;
575   }
576 
577   //=======================================
578   // Find out where argument was passed
579   //=======================================
580 
581   // If reg_offs >= 0 we're already using the stack for this type of
582   // argument. We don't want to keep updating reg_offs (in case it overflows,
583   // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
584   // whatever they get).
585   llvm::Value *UsingStack = nullptr;
586   UsingStack = CGF.Builder.CreateICmpSGE(
587       reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
588 
589   CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
590 
591   // Otherwise, at least some kind of argument could go in these registers, the
592   // question is whether this particular type is too big.
593   CGF.EmitBlock(MaybeRegBlock);
594 
595   // Integer arguments may need to correct register alignment (for example a
596   // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
597   // align __gr_offs to calculate the potential address.
598   if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
599     int Align = TyAlign.getQuantity();
600 
601     reg_offs = CGF.Builder.CreateAdd(
602         reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
603         "align_regoffs");
604     reg_offs = CGF.Builder.CreateAnd(
605         reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
606         "aligned_regoffs");
607   }
608 
609   // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
610   // The fact that this is done unconditionally reflects the fact that
611   // allocating an argument to the stack also uses up all the remaining
612   // registers of the appropriate kind.
613   llvm::Value *NewOffset = nullptr;
614   NewOffset = CGF.Builder.CreateAdd(
615       reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
616   CGF.Builder.CreateStore(NewOffset, reg_offs_p);
617 
618   // Now we're in a position to decide whether this argument really was in
619   // registers or not.
620   llvm::Value *InRegs = nullptr;
621   InRegs = CGF.Builder.CreateICmpSLE(
622       NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
623 
624   CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
625 
626   //=======================================
627   // Argument was in registers
628   //=======================================
629 
630   // Now we emit the code for if the argument was originally passed in
631   // registers. First start the appropriate block:
632   CGF.EmitBlock(InRegBlock);
633 
634   llvm::Value *reg_top = nullptr;
635   Address reg_top_p =
636       CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
637   reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
638   Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs),
639                    CGF.Int8Ty, CharUnits::fromQuantity(IsFPR ? 16 : 8));
640   Address RegAddr = Address::invalid();
641   llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty), *ElementTy = MemTy;
642 
643   if (IsIndirect) {
644     // If it's been passed indirectly (actually a struct), whatever we find from
645     // stored registers or on the stack will actually be a struct **.
646     MemTy = llvm::PointerType::getUnqual(MemTy);
647   }
648 
649   const Type *Base = nullptr;
650   uint64_t NumMembers = 0;
651   bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
652   if (IsHFA && NumMembers > 1) {
653     // Homogeneous aggregates passed in registers will have their elements split
654     // and stored 16-bytes apart regardless of size (they're notionally in qN,
655     // qN+1, ...). We reload and store into a temporary local variable
656     // contiguously.
657     assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
658     auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
659     llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
660     llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
661     Address Tmp = CGF.CreateTempAlloca(HFATy,
662                                        std::max(TyAlign, BaseTyInfo.Align));
663 
664     // On big-endian platforms, the value will be right-aligned in its slot.
665     int Offset = 0;
666     if (CGF.CGM.getDataLayout().isBigEndian() &&
667         BaseTyInfo.Width.getQuantity() < 16)
668       Offset = 16 - BaseTyInfo.Width.getQuantity();
669 
670     for (unsigned i = 0; i < NumMembers; ++i) {
671       CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
672       Address LoadAddr =
673         CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
674       LoadAddr = LoadAddr.withElementType(BaseTy);
675 
676       Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
677 
678       llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
679       CGF.Builder.CreateStore(Elem, StoreAddr);
680     }
681 
682     RegAddr = Tmp.withElementType(MemTy);
683   } else {
684     // Otherwise the object is contiguous in memory.
685 
686     // It might be right-aligned in its slot.
687     CharUnits SlotSize = BaseAddr.getAlignment();
688     if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
689         (IsHFA || !isAggregateTypeForABI(Ty)) &&
690         TySize < SlotSize) {
691       CharUnits Offset = SlotSize - TySize;
692       BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
693     }
694 
695     RegAddr = BaseAddr.withElementType(MemTy);
696   }
697 
698   CGF.EmitBranch(ContBlock);
699 
700   //=======================================
701   // Argument was on the stack
702   //=======================================
703   CGF.EmitBlock(OnStackBlock);
704 
705   Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
706   llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
707 
708   // Again, stack arguments may need realignment. In this case both integer and
709   // floating-point ones might be affected.
710   if (!IsIndirect && TyAlign.getQuantity() > 8) {
711     int Align = TyAlign.getQuantity();
712 
713     OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
714 
715     OnStackPtr = CGF.Builder.CreateAdd(
716         OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
717         "align_stack");
718     OnStackPtr = CGF.Builder.CreateAnd(
719         OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
720         "align_stack");
721 
722     OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
723   }
724   Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty,
725                                 std::max(CharUnits::fromQuantity(8), TyAlign));
726 
727   // All stack slots are multiples of 8 bytes.
728   CharUnits StackSlotSize = CharUnits::fromQuantity(8);
729   CharUnits StackSize;
730   if (IsIndirect)
731     StackSize = StackSlotSize;
732   else
733     StackSize = TySize.alignTo(StackSlotSize);
734 
735   llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
736   llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP(
737       CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack");
738 
739   // Write the new value of __stack for the next call to va_arg
740   CGF.Builder.CreateStore(NewStack, stack_p);
741 
742   if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
743       TySize < StackSlotSize) {
744     CharUnits Offset = StackSlotSize - TySize;
745     OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
746   }
747 
748   OnStackAddr = OnStackAddr.withElementType(MemTy);
749 
750   CGF.EmitBranch(ContBlock);
751 
752   //=======================================
753   // Tidy up
754   //=======================================
755   CGF.EmitBlock(ContBlock);
756 
757   Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr,
758                                  OnStackBlock, "vaargs.addr");
759 
760   if (IsIndirect)
761     return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy,
762                    TyAlign);
763 
764   return ResAddr;
765 }
766 
767 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
768                                         CodeGenFunction &CGF) const {
769   // The backend's lowering doesn't support va_arg for aggregates or
770   // illegal vector types.  Lower VAArg here for these cases and use
771   // the LLVM va_arg instruction for everything else.
772   if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
773     return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
774 
775   uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
776   CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
777 
778   // Empty records are ignored for parameter passing purposes.
779   if (isEmptyRecord(getContext(), Ty, true))
780     return Address(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"),
781                    CGF.ConvertTypeForMem(Ty), SlotSize);
782 
783   // The size of the actual thing passed, which might end up just
784   // being a pointer for indirect types.
785   auto TyInfo = getContext().getTypeInfoInChars(Ty);
786 
787   // Arguments bigger than 16 bytes which aren't homogeneous
788   // aggregates should be passed indirectly.
789   bool IsIndirect = false;
790   if (TyInfo.Width.getQuantity() > 16) {
791     const Type *Base = nullptr;
792     uint64_t Members = 0;
793     IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
794   }
795 
796   return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
797                           TyInfo, SlotSize, /*AllowHigherAlign*/ true);
798 }
799 
800 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
801                                     QualType Ty) const {
802   bool IsIndirect = false;
803 
804   // Composites larger than 16 bytes are passed by reference.
805   if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128)
806     IsIndirect = true;
807 
808   return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
809                           CGF.getContext().getTypeInfoInChars(Ty),
810                           CharUnits::fromQuantity(8),
811                           /*allowHigherAlign*/ false);
812 }
813 
814 std::unique_ptr<TargetCodeGenInfo>
815 CodeGen::createAArch64TargetCodeGenInfo(CodeGenModule &CGM,
816                                         AArch64ABIKind Kind) {
817   return std::make_unique<AArch64TargetCodeGenInfo>(CGM.getTypes(), Kind);
818 }
819 
820 std::unique_ptr<TargetCodeGenInfo>
821 CodeGen::createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM,
822                                                AArch64ABIKind K) {
823   return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.getTypes(), K);
824 }
825