xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp (revision 833a452e9f082a7982a31c21f0da437dbbe0a39d)
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CGRecordLayout.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/Decl.h"
25 #include "clang/AST/DeclCXX.h"
26 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/CodeGenOptions.h"
28 #include "clang/Basic/TargetBuiltins.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "clang/CodeGen/CGFunctionInfo.h"
31 #include "clang/CodeGen/SwiftCallingConv.h"
32 #include "llvm/ADT/StringExtras.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Assumptions.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/InlineAsm.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/Transforms/Utils/Local.h"
42 using namespace clang;
43 using namespace CodeGen;
44 
45 /***/
46 
47 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
48   switch (CC) {
49   default: return llvm::CallingConv::C;
50   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
51   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
52   case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
53   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
54   case CC_Win64: return llvm::CallingConv::Win64;
55   case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
56   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
57   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
58   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
59   // TODO: Add support for __pascal to LLVM.
60   case CC_X86Pascal: return llvm::CallingConv::C;
61   // TODO: Add support for __vectorcall to LLVM.
62   case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
63   case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
64   case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
65   case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
66   case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
67   case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
68   case CC_Swift: return llvm::CallingConv::Swift;
69   case CC_SwiftAsync: return llvm::CallingConv::SwiftTail;
70   }
71 }
72 
73 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
74 /// qualification. Either or both of RD and MD may be null. A null RD indicates
75 /// that there is no meaningful 'this' type, and a null MD can occur when
76 /// calling a method pointer.
77 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
78                                          const CXXMethodDecl *MD) {
79   QualType RecTy;
80   if (RD)
81     RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
82   else
83     RecTy = Context.VoidTy;
84 
85   if (MD)
86     RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
87   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
88 }
89 
90 /// Returns the canonical formal type of the given C++ method.
91 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
92   return MD->getType()->getCanonicalTypeUnqualified()
93            .getAs<FunctionProtoType>();
94 }
95 
96 /// Returns the "extra-canonicalized" return type, which discards
97 /// qualifiers on the return type.  Codegen doesn't care about them,
98 /// and it makes ABI code a little easier to be able to assume that
99 /// all parameter and return types are top-level unqualified.
100 static CanQualType GetReturnType(QualType RetTy) {
101   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
102 }
103 
104 /// Arrange the argument and result information for a value of the given
105 /// unprototyped freestanding function type.
106 const CGFunctionInfo &
107 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
108   // When translating an unprototyped function type, always use a
109   // variadic type.
110   return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
111                                  /*instanceMethod=*/false,
112                                  /*chainCall=*/false, None,
113                                  FTNP->getExtInfo(), {}, RequiredArgs(0));
114 }
115 
116 static void addExtParameterInfosForCall(
117          llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
118                                         const FunctionProtoType *proto,
119                                         unsigned prefixArgs,
120                                         unsigned totalArgs) {
121   assert(proto->hasExtParameterInfos());
122   assert(paramInfos.size() <= prefixArgs);
123   assert(proto->getNumParams() + prefixArgs <= totalArgs);
124 
125   paramInfos.reserve(totalArgs);
126 
127   // Add default infos for any prefix args that don't already have infos.
128   paramInfos.resize(prefixArgs);
129 
130   // Add infos for the prototype.
131   for (const auto &ParamInfo : proto->getExtParameterInfos()) {
132     paramInfos.push_back(ParamInfo);
133     // pass_object_size params have no parameter info.
134     if (ParamInfo.hasPassObjectSize())
135       paramInfos.emplace_back();
136   }
137 
138   assert(paramInfos.size() <= totalArgs &&
139          "Did we forget to insert pass_object_size args?");
140   // Add default infos for the variadic and/or suffix arguments.
141   paramInfos.resize(totalArgs);
142 }
143 
144 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
145 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
146 static void appendParameterTypes(const CodeGenTypes &CGT,
147                                  SmallVectorImpl<CanQualType> &prefix,
148               SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
149                                  CanQual<FunctionProtoType> FPT) {
150   // Fast path: don't touch param info if we don't need to.
151   if (!FPT->hasExtParameterInfos()) {
152     assert(paramInfos.empty() &&
153            "We have paramInfos, but the prototype doesn't?");
154     prefix.append(FPT->param_type_begin(), FPT->param_type_end());
155     return;
156   }
157 
158   unsigned PrefixSize = prefix.size();
159   // In the vast majority of cases, we'll have precisely FPT->getNumParams()
160   // parameters; the only thing that can change this is the presence of
161   // pass_object_size. So, we preallocate for the common case.
162   prefix.reserve(prefix.size() + FPT->getNumParams());
163 
164   auto ExtInfos = FPT->getExtParameterInfos();
165   assert(ExtInfos.size() == FPT->getNumParams());
166   for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
167     prefix.push_back(FPT->getParamType(I));
168     if (ExtInfos[I].hasPassObjectSize())
169       prefix.push_back(CGT.getContext().getSizeType());
170   }
171 
172   addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
173                               prefix.size());
174 }
175 
176 /// Arrange the LLVM function layout for a value of the given function
177 /// type, on top of any implicit parameters already stored.
178 static const CGFunctionInfo &
179 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
180                         SmallVectorImpl<CanQualType> &prefix,
181                         CanQual<FunctionProtoType> FTP) {
182   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
183   RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
184   // FIXME: Kill copy.
185   appendParameterTypes(CGT, prefix, paramInfos, FTP);
186   CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
187 
188   return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
189                                      /*chainCall=*/false, prefix,
190                                      FTP->getExtInfo(), paramInfos,
191                                      Required);
192 }
193 
194 /// Arrange the argument and result information for a value of the
195 /// given freestanding function type.
196 const CGFunctionInfo &
197 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
198   SmallVector<CanQualType, 16> argTypes;
199   return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
200                                    FTP);
201 }
202 
203 static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
204                                                bool IsWindows) {
205   // Set the appropriate calling convention for the Function.
206   if (D->hasAttr<StdCallAttr>())
207     return CC_X86StdCall;
208 
209   if (D->hasAttr<FastCallAttr>())
210     return CC_X86FastCall;
211 
212   if (D->hasAttr<RegCallAttr>())
213     return CC_X86RegCall;
214 
215   if (D->hasAttr<ThisCallAttr>())
216     return CC_X86ThisCall;
217 
218   if (D->hasAttr<VectorCallAttr>())
219     return CC_X86VectorCall;
220 
221   if (D->hasAttr<PascalAttr>())
222     return CC_X86Pascal;
223 
224   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
225     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
226 
227   if (D->hasAttr<AArch64VectorPcsAttr>())
228     return CC_AArch64VectorCall;
229 
230   if (D->hasAttr<IntelOclBiccAttr>())
231     return CC_IntelOclBicc;
232 
233   if (D->hasAttr<MSABIAttr>())
234     return IsWindows ? CC_C : CC_Win64;
235 
236   if (D->hasAttr<SysVABIAttr>())
237     return IsWindows ? CC_X86_64SysV : CC_C;
238 
239   if (D->hasAttr<PreserveMostAttr>())
240     return CC_PreserveMost;
241 
242   if (D->hasAttr<PreserveAllAttr>())
243     return CC_PreserveAll;
244 
245   return CC_C;
246 }
247 
248 /// Arrange the argument and result information for a call to an
249 /// unknown C++ non-static member function of the given abstract type.
250 /// (A null RD means we don't have any meaningful "this" argument type,
251 ///  so fall back to a generic pointer type).
252 /// The member function must be an ordinary function, i.e. not a
253 /// constructor or destructor.
254 const CGFunctionInfo &
255 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
256                                    const FunctionProtoType *FTP,
257                                    const CXXMethodDecl *MD) {
258   SmallVector<CanQualType, 16> argTypes;
259 
260   // Add the 'this' pointer.
261   argTypes.push_back(DeriveThisType(RD, MD));
262 
263   return ::arrangeLLVMFunctionInfo(
264       *this, true, argTypes,
265       FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
266 }
267 
268 /// Set calling convention for CUDA/HIP kernel.
269 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
270                                            const FunctionDecl *FD) {
271   if (FD->hasAttr<CUDAGlobalAttr>()) {
272     const FunctionType *FT = FTy->getAs<FunctionType>();
273     CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
274     FTy = FT->getCanonicalTypeUnqualified();
275   }
276 }
277 
278 /// Arrange the argument and result information for a declaration or
279 /// definition of the given C++ non-static member function.  The
280 /// member function must be an ordinary function, i.e. not a
281 /// constructor or destructor.
282 const CGFunctionInfo &
283 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
284   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
285   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
286 
287   CanQualType FT = GetFormalType(MD).getAs<Type>();
288   setCUDAKernelCallingConvention(FT, CGM, MD);
289   auto prototype = FT.getAs<FunctionProtoType>();
290 
291   if (MD->isInstance()) {
292     // The abstract case is perfectly fine.
293     const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
294     return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
295   }
296 
297   return arrangeFreeFunctionType(prototype);
298 }
299 
300 bool CodeGenTypes::inheritingCtorHasParams(
301     const InheritedConstructor &Inherited, CXXCtorType Type) {
302   // Parameters are unnecessary if we're constructing a base class subobject
303   // and the inherited constructor lives in a virtual base.
304   return Type == Ctor_Complete ||
305          !Inherited.getShadowDecl()->constructsVirtualBase() ||
306          !Target.getCXXABI().hasConstructorVariants();
307 }
308 
309 const CGFunctionInfo &
310 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
311   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
312 
313   SmallVector<CanQualType, 16> argTypes;
314   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
315   argTypes.push_back(DeriveThisType(MD->getParent(), MD));
316 
317   bool PassParams = true;
318 
319   if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
320     // A base class inheriting constructor doesn't get forwarded arguments
321     // needed to construct a virtual base (or base class thereof).
322     if (auto Inherited = CD->getInheritedConstructor())
323       PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
324   }
325 
326   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
327 
328   // Add the formal parameters.
329   if (PassParams)
330     appendParameterTypes(*this, argTypes, paramInfos, FTP);
331 
332   CGCXXABI::AddedStructorArgCounts AddedArgs =
333       TheCXXABI.buildStructorSignature(GD, argTypes);
334   if (!paramInfos.empty()) {
335     // Note: prefix implies after the first param.
336     if (AddedArgs.Prefix)
337       paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
338                         FunctionProtoType::ExtParameterInfo{});
339     if (AddedArgs.Suffix)
340       paramInfos.append(AddedArgs.Suffix,
341                         FunctionProtoType::ExtParameterInfo{});
342   }
343 
344   RequiredArgs required =
345       (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
346                                       : RequiredArgs::All);
347 
348   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
349   CanQualType resultType = TheCXXABI.HasThisReturn(GD)
350                                ? argTypes.front()
351                                : TheCXXABI.hasMostDerivedReturn(GD)
352                                      ? CGM.getContext().VoidPtrTy
353                                      : Context.VoidTy;
354   return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
355                                  /*chainCall=*/false, argTypes, extInfo,
356                                  paramInfos, required);
357 }
358 
359 static SmallVector<CanQualType, 16>
360 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
361   SmallVector<CanQualType, 16> argTypes;
362   for (auto &arg : args)
363     argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
364   return argTypes;
365 }
366 
367 static SmallVector<CanQualType, 16>
368 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
369   SmallVector<CanQualType, 16> argTypes;
370   for (auto &arg : args)
371     argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
372   return argTypes;
373 }
374 
375 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
376 getExtParameterInfosForCall(const FunctionProtoType *proto,
377                             unsigned prefixArgs, unsigned totalArgs) {
378   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
379   if (proto->hasExtParameterInfos()) {
380     addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
381   }
382   return result;
383 }
384 
385 /// Arrange a call to a C++ method, passing the given arguments.
386 ///
387 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
388 /// parameter.
389 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
390 /// args.
391 /// PassProtoArgs indicates whether `args` has args for the parameters in the
392 /// given CXXConstructorDecl.
393 const CGFunctionInfo &
394 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
395                                         const CXXConstructorDecl *D,
396                                         CXXCtorType CtorKind,
397                                         unsigned ExtraPrefixArgs,
398                                         unsigned ExtraSuffixArgs,
399                                         bool PassProtoArgs) {
400   // FIXME: Kill copy.
401   SmallVector<CanQualType, 16> ArgTypes;
402   for (const auto &Arg : args)
403     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
404 
405   // +1 for implicit this, which should always be args[0].
406   unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
407 
408   CanQual<FunctionProtoType> FPT = GetFormalType(D);
409   RequiredArgs Required = PassProtoArgs
410                               ? RequiredArgs::forPrototypePlus(
411                                     FPT, TotalPrefixArgs + ExtraSuffixArgs)
412                               : RequiredArgs::All;
413 
414   GlobalDecl GD(D, CtorKind);
415   CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
416                                ? ArgTypes.front()
417                                : TheCXXABI.hasMostDerivedReturn(GD)
418                                      ? CGM.getContext().VoidPtrTy
419                                      : Context.VoidTy;
420 
421   FunctionType::ExtInfo Info = FPT->getExtInfo();
422   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
423   // If the prototype args are elided, we should only have ABI-specific args,
424   // which never have param info.
425   if (PassProtoArgs && FPT->hasExtParameterInfos()) {
426     // ABI-specific suffix arguments are treated the same as variadic arguments.
427     addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
428                                 ArgTypes.size());
429   }
430   return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
431                                  /*chainCall=*/false, ArgTypes, Info,
432                                  ParamInfos, Required);
433 }
434 
435 /// Arrange the argument and result information for the declaration or
436 /// definition of the given function.
437 const CGFunctionInfo &
438 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
439   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
440     if (MD->isInstance())
441       return arrangeCXXMethodDeclaration(MD);
442 
443   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
444 
445   assert(isa<FunctionType>(FTy));
446   setCUDAKernelCallingConvention(FTy, CGM, FD);
447 
448   // When declaring a function without a prototype, always use a
449   // non-variadic type.
450   if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
451     return arrangeLLVMFunctionInfo(
452         noProto->getReturnType(), /*instanceMethod=*/false,
453         /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
454   }
455 
456   return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
457 }
458 
459 /// Arrange the argument and result information for the declaration or
460 /// definition of an Objective-C method.
461 const CGFunctionInfo &
462 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
463   // It happens that this is the same as a call with no optional
464   // arguments, except also using the formal 'self' type.
465   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
466 }
467 
468 /// Arrange the argument and result information for the function type
469 /// through which to perform a send to the given Objective-C method,
470 /// using the given receiver type.  The receiver type is not always
471 /// the 'self' type of the method or even an Objective-C pointer type.
472 /// This is *not* the right method for actually performing such a
473 /// message send, due to the possibility of optional arguments.
474 const CGFunctionInfo &
475 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
476                                               QualType receiverType) {
477   SmallVector<CanQualType, 16> argTys;
478   SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
479   argTys.push_back(Context.getCanonicalParamType(receiverType));
480   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
481   // FIXME: Kill copy?
482   for (const auto *I : MD->parameters()) {
483     argTys.push_back(Context.getCanonicalParamType(I->getType()));
484     auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
485         I->hasAttr<NoEscapeAttr>());
486     extParamInfos.push_back(extParamInfo);
487   }
488 
489   FunctionType::ExtInfo einfo;
490   bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
491   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
492 
493   if (getContext().getLangOpts().ObjCAutoRefCount &&
494       MD->hasAttr<NSReturnsRetainedAttr>())
495     einfo = einfo.withProducesResult(true);
496 
497   RequiredArgs required =
498     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
499 
500   return arrangeLLVMFunctionInfo(
501       GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
502       /*chainCall=*/false, argTys, einfo, extParamInfos, required);
503 }
504 
505 const CGFunctionInfo &
506 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
507                                                  const CallArgList &args) {
508   auto argTypes = getArgTypesForCall(Context, args);
509   FunctionType::ExtInfo einfo;
510 
511   return arrangeLLVMFunctionInfo(
512       GetReturnType(returnType), /*instanceMethod=*/false,
513       /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
514 }
515 
516 const CGFunctionInfo &
517 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
518   // FIXME: Do we need to handle ObjCMethodDecl?
519   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
520 
521   if (isa<CXXConstructorDecl>(GD.getDecl()) ||
522       isa<CXXDestructorDecl>(GD.getDecl()))
523     return arrangeCXXStructorDeclaration(GD);
524 
525   return arrangeFunctionDeclaration(FD);
526 }
527 
528 /// Arrange a thunk that takes 'this' as the first parameter followed by
529 /// varargs.  Return a void pointer, regardless of the actual return type.
530 /// The body of the thunk will end in a musttail call to a function of the
531 /// correct type, and the caller will bitcast the function to the correct
532 /// prototype.
533 const CGFunctionInfo &
534 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
535   assert(MD->isVirtual() && "only methods have thunks");
536   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
537   CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
538   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
539                                  /*chainCall=*/false, ArgTys,
540                                  FTP->getExtInfo(), {}, RequiredArgs(1));
541 }
542 
543 const CGFunctionInfo &
544 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
545                                    CXXCtorType CT) {
546   assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
547 
548   CanQual<FunctionProtoType> FTP = GetFormalType(CD);
549   SmallVector<CanQualType, 2> ArgTys;
550   const CXXRecordDecl *RD = CD->getParent();
551   ArgTys.push_back(DeriveThisType(RD, CD));
552   if (CT == Ctor_CopyingClosure)
553     ArgTys.push_back(*FTP->param_type_begin());
554   if (RD->getNumVBases() > 0)
555     ArgTys.push_back(Context.IntTy);
556   CallingConv CC = Context.getDefaultCallingConvention(
557       /*IsVariadic=*/false, /*IsCXXMethod=*/true);
558   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
559                                  /*chainCall=*/false, ArgTys,
560                                  FunctionType::ExtInfo(CC), {},
561                                  RequiredArgs::All);
562 }
563 
564 /// Arrange a call as unto a free function, except possibly with an
565 /// additional number of formal parameters considered required.
566 static const CGFunctionInfo &
567 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
568                             CodeGenModule &CGM,
569                             const CallArgList &args,
570                             const FunctionType *fnType,
571                             unsigned numExtraRequiredArgs,
572                             bool chainCall) {
573   assert(args.size() >= numExtraRequiredArgs);
574 
575   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
576 
577   // In most cases, there are no optional arguments.
578   RequiredArgs required = RequiredArgs::All;
579 
580   // If we have a variadic prototype, the required arguments are the
581   // extra prefix plus the arguments in the prototype.
582   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
583     if (proto->isVariadic())
584       required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
585 
586     if (proto->hasExtParameterInfos())
587       addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
588                                   args.size());
589 
590   // If we don't have a prototype at all, but we're supposed to
591   // explicitly use the variadic convention for unprototyped calls,
592   // treat all of the arguments as required but preserve the nominal
593   // possibility of variadics.
594   } else if (CGM.getTargetCodeGenInfo()
595                 .isNoProtoCallVariadic(args,
596                                        cast<FunctionNoProtoType>(fnType))) {
597     required = RequiredArgs(args.size());
598   }
599 
600   // FIXME: Kill copy.
601   SmallVector<CanQualType, 16> argTypes;
602   for (const auto &arg : args)
603     argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
604   return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
605                                      /*instanceMethod=*/false, chainCall,
606                                      argTypes, fnType->getExtInfo(), paramInfos,
607                                      required);
608 }
609 
610 /// Figure out the rules for calling a function with the given formal
611 /// type using the given arguments.  The arguments are necessary
612 /// because the function might be unprototyped, in which case it's
613 /// target-dependent in crazy ways.
614 const CGFunctionInfo &
615 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
616                                       const FunctionType *fnType,
617                                       bool chainCall) {
618   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
619                                      chainCall ? 1 : 0, chainCall);
620 }
621 
622 /// A block function is essentially a free function with an
623 /// extra implicit argument.
624 const CGFunctionInfo &
625 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
626                                        const FunctionType *fnType) {
627   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
628                                      /*chainCall=*/false);
629 }
630 
631 const CGFunctionInfo &
632 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
633                                               const FunctionArgList &params) {
634   auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
635   auto argTypes = getArgTypesForDeclaration(Context, params);
636 
637   return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
638                                  /*instanceMethod*/ false, /*chainCall*/ false,
639                                  argTypes, proto->getExtInfo(), paramInfos,
640                                  RequiredArgs::forPrototypePlus(proto, 1));
641 }
642 
643 const CGFunctionInfo &
644 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
645                                          const CallArgList &args) {
646   // FIXME: Kill copy.
647   SmallVector<CanQualType, 16> argTypes;
648   for (const auto &Arg : args)
649     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
650   return arrangeLLVMFunctionInfo(
651       GetReturnType(resultType), /*instanceMethod=*/false,
652       /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
653       /*paramInfos=*/ {}, RequiredArgs::All);
654 }
655 
656 const CGFunctionInfo &
657 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
658                                                 const FunctionArgList &args) {
659   auto argTypes = getArgTypesForDeclaration(Context, args);
660 
661   return arrangeLLVMFunctionInfo(
662       GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
663       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
664 }
665 
666 const CGFunctionInfo &
667 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
668                                               ArrayRef<CanQualType> argTypes) {
669   return arrangeLLVMFunctionInfo(
670       resultType, /*instanceMethod=*/false, /*chainCall=*/false,
671       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
672 }
673 
674 /// Arrange a call to a C++ method, passing the given arguments.
675 ///
676 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
677 /// does not count `this`.
678 const CGFunctionInfo &
679 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
680                                    const FunctionProtoType *proto,
681                                    RequiredArgs required,
682                                    unsigned numPrefixArgs) {
683   assert(numPrefixArgs + 1 <= args.size() &&
684          "Emitting a call with less args than the required prefix?");
685   // Add one to account for `this`. It's a bit awkward here, but we don't count
686   // `this` in similar places elsewhere.
687   auto paramInfos =
688     getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
689 
690   // FIXME: Kill copy.
691   auto argTypes = getArgTypesForCall(Context, args);
692 
693   FunctionType::ExtInfo info = proto->getExtInfo();
694   return arrangeLLVMFunctionInfo(
695       GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
696       /*chainCall=*/false, argTypes, info, paramInfos, required);
697 }
698 
699 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
700   return arrangeLLVMFunctionInfo(
701       getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
702       None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
703 }
704 
705 const CGFunctionInfo &
706 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
707                           const CallArgList &args) {
708   assert(signature.arg_size() <= args.size());
709   if (signature.arg_size() == args.size())
710     return signature;
711 
712   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
713   auto sigParamInfos = signature.getExtParameterInfos();
714   if (!sigParamInfos.empty()) {
715     paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
716     paramInfos.resize(args.size());
717   }
718 
719   auto argTypes = getArgTypesForCall(Context, args);
720 
721   assert(signature.getRequiredArgs().allowsOptionalArgs());
722   return arrangeLLVMFunctionInfo(signature.getReturnType(),
723                                  signature.isInstanceMethod(),
724                                  signature.isChainCall(),
725                                  argTypes,
726                                  signature.getExtInfo(),
727                                  paramInfos,
728                                  signature.getRequiredArgs());
729 }
730 
731 namespace clang {
732 namespace CodeGen {
733 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
734 }
735 }
736 
737 /// Arrange the argument and result information for an abstract value
738 /// of a given function type.  This is the method which all of the
739 /// above functions ultimately defer to.
740 const CGFunctionInfo &
741 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
742                                       bool instanceMethod,
743                                       bool chainCall,
744                                       ArrayRef<CanQualType> argTypes,
745                                       FunctionType::ExtInfo info,
746                      ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
747                                       RequiredArgs required) {
748   assert(llvm::all_of(argTypes,
749                       [](CanQualType T) { return T.isCanonicalAsParam(); }));
750 
751   // Lookup or create unique function info.
752   llvm::FoldingSetNodeID ID;
753   CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
754                           required, resultType, argTypes);
755 
756   void *insertPos = nullptr;
757   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
758   if (FI)
759     return *FI;
760 
761   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
762 
763   // Construct the function info.  We co-allocate the ArgInfos.
764   FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
765                               paramInfos, resultType, argTypes, required);
766   FunctionInfos.InsertNode(FI, insertPos);
767 
768   bool inserted = FunctionsBeingProcessed.insert(FI).second;
769   (void)inserted;
770   assert(inserted && "Recursively being processed?");
771 
772   // Compute ABI information.
773   if (CC == llvm::CallingConv::SPIR_KERNEL) {
774     // Force target independent argument handling for the host visible
775     // kernel functions.
776     computeSPIRKernelABIInfo(CGM, *FI);
777   } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) {
778     swiftcall::computeABIInfo(CGM, *FI);
779   } else {
780     getABIInfo().computeInfo(*FI);
781   }
782 
783   // Loop over all of the computed argument and return value info.  If any of
784   // them are direct or extend without a specified coerce type, specify the
785   // default now.
786   ABIArgInfo &retInfo = FI->getReturnInfo();
787   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
788     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
789 
790   for (auto &I : FI->arguments())
791     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
792       I.info.setCoerceToType(ConvertType(I.type));
793 
794   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
795   assert(erased && "Not in set?");
796 
797   return *FI;
798 }
799 
800 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
801                                        bool instanceMethod,
802                                        bool chainCall,
803                                        const FunctionType::ExtInfo &info,
804                                        ArrayRef<ExtParameterInfo> paramInfos,
805                                        CanQualType resultType,
806                                        ArrayRef<CanQualType> argTypes,
807                                        RequiredArgs required) {
808   assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
809   assert(!required.allowsOptionalArgs() ||
810          required.getNumRequiredArgs() <= argTypes.size());
811 
812   void *buffer =
813     operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>(
814                                   argTypes.size() + 1, paramInfos.size()));
815 
816   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
817   FI->CallingConvention = llvmCC;
818   FI->EffectiveCallingConvention = llvmCC;
819   FI->ASTCallingConvention = info.getCC();
820   FI->InstanceMethod = instanceMethod;
821   FI->ChainCall = chainCall;
822   FI->CmseNSCall = info.getCmseNSCall();
823   FI->NoReturn = info.getNoReturn();
824   FI->ReturnsRetained = info.getProducesResult();
825   FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
826   FI->NoCfCheck = info.getNoCfCheck();
827   FI->Required = required;
828   FI->HasRegParm = info.getHasRegParm();
829   FI->RegParm = info.getRegParm();
830   FI->ArgStruct = nullptr;
831   FI->ArgStructAlign = 0;
832   FI->NumArgs = argTypes.size();
833   FI->HasExtParameterInfos = !paramInfos.empty();
834   FI->getArgsBuffer()[0].type = resultType;
835   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
836     FI->getArgsBuffer()[i + 1].type = argTypes[i];
837   for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
838     FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
839   return FI;
840 }
841 
842 /***/
843 
844 namespace {
845 // ABIArgInfo::Expand implementation.
846 
847 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
848 struct TypeExpansion {
849   enum TypeExpansionKind {
850     // Elements of constant arrays are expanded recursively.
851     TEK_ConstantArray,
852     // Record fields are expanded recursively (but if record is a union, only
853     // the field with the largest size is expanded).
854     TEK_Record,
855     // For complex types, real and imaginary parts are expanded recursively.
856     TEK_Complex,
857     // All other types are not expandable.
858     TEK_None
859   };
860 
861   const TypeExpansionKind Kind;
862 
863   TypeExpansion(TypeExpansionKind K) : Kind(K) {}
864   virtual ~TypeExpansion() {}
865 };
866 
867 struct ConstantArrayExpansion : TypeExpansion {
868   QualType EltTy;
869   uint64_t NumElts;
870 
871   ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
872       : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
873   static bool classof(const TypeExpansion *TE) {
874     return TE->Kind == TEK_ConstantArray;
875   }
876 };
877 
878 struct RecordExpansion : TypeExpansion {
879   SmallVector<const CXXBaseSpecifier *, 1> Bases;
880 
881   SmallVector<const FieldDecl *, 1> Fields;
882 
883   RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
884                   SmallVector<const FieldDecl *, 1> &&Fields)
885       : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
886         Fields(std::move(Fields)) {}
887   static bool classof(const TypeExpansion *TE) {
888     return TE->Kind == TEK_Record;
889   }
890 };
891 
892 struct ComplexExpansion : TypeExpansion {
893   QualType EltTy;
894 
895   ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
896   static bool classof(const TypeExpansion *TE) {
897     return TE->Kind == TEK_Complex;
898   }
899 };
900 
901 struct NoExpansion : TypeExpansion {
902   NoExpansion() : TypeExpansion(TEK_None) {}
903   static bool classof(const TypeExpansion *TE) {
904     return TE->Kind == TEK_None;
905   }
906 };
907 }  // namespace
908 
909 static std::unique_ptr<TypeExpansion>
910 getTypeExpansion(QualType Ty, const ASTContext &Context) {
911   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
912     return std::make_unique<ConstantArrayExpansion>(
913         AT->getElementType(), AT->getSize().getZExtValue());
914   }
915   if (const RecordType *RT = Ty->getAs<RecordType>()) {
916     SmallVector<const CXXBaseSpecifier *, 1> Bases;
917     SmallVector<const FieldDecl *, 1> Fields;
918     const RecordDecl *RD = RT->getDecl();
919     assert(!RD->hasFlexibleArrayMember() &&
920            "Cannot expand structure with flexible array.");
921     if (RD->isUnion()) {
922       // Unions can be here only in degenerative cases - all the fields are same
923       // after flattening. Thus we have to use the "largest" field.
924       const FieldDecl *LargestFD = nullptr;
925       CharUnits UnionSize = CharUnits::Zero();
926 
927       for (const auto *FD : RD->fields()) {
928         if (FD->isZeroLengthBitField(Context))
929           continue;
930         assert(!FD->isBitField() &&
931                "Cannot expand structure with bit-field members.");
932         CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
933         if (UnionSize < FieldSize) {
934           UnionSize = FieldSize;
935           LargestFD = FD;
936         }
937       }
938       if (LargestFD)
939         Fields.push_back(LargestFD);
940     } else {
941       if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
942         assert(!CXXRD->isDynamicClass() &&
943                "cannot expand vtable pointers in dynamic classes");
944         for (const CXXBaseSpecifier &BS : CXXRD->bases())
945           Bases.push_back(&BS);
946       }
947 
948       for (const auto *FD : RD->fields()) {
949         if (FD->isZeroLengthBitField(Context))
950           continue;
951         assert(!FD->isBitField() &&
952                "Cannot expand structure with bit-field members.");
953         Fields.push_back(FD);
954       }
955     }
956     return std::make_unique<RecordExpansion>(std::move(Bases),
957                                               std::move(Fields));
958   }
959   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
960     return std::make_unique<ComplexExpansion>(CT->getElementType());
961   }
962   return std::make_unique<NoExpansion>();
963 }
964 
965 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
966   auto Exp = getTypeExpansion(Ty, Context);
967   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
968     return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
969   }
970   if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
971     int Res = 0;
972     for (auto BS : RExp->Bases)
973       Res += getExpansionSize(BS->getType(), Context);
974     for (auto FD : RExp->Fields)
975       Res += getExpansionSize(FD->getType(), Context);
976     return Res;
977   }
978   if (isa<ComplexExpansion>(Exp.get()))
979     return 2;
980   assert(isa<NoExpansion>(Exp.get()));
981   return 1;
982 }
983 
984 void
985 CodeGenTypes::getExpandedTypes(QualType Ty,
986                                SmallVectorImpl<llvm::Type *>::iterator &TI) {
987   auto Exp = getTypeExpansion(Ty, Context);
988   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
989     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
990       getExpandedTypes(CAExp->EltTy, TI);
991     }
992   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
993     for (auto BS : RExp->Bases)
994       getExpandedTypes(BS->getType(), TI);
995     for (auto FD : RExp->Fields)
996       getExpandedTypes(FD->getType(), TI);
997   } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
998     llvm::Type *EltTy = ConvertType(CExp->EltTy);
999     *TI++ = EltTy;
1000     *TI++ = EltTy;
1001   } else {
1002     assert(isa<NoExpansion>(Exp.get()));
1003     *TI++ = ConvertType(Ty);
1004   }
1005 }
1006 
1007 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1008                                       ConstantArrayExpansion *CAE,
1009                                       Address BaseAddr,
1010                                       llvm::function_ref<void(Address)> Fn) {
1011   CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1012   CharUnits EltAlign =
1013     BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1014 
1015   for (int i = 0, n = CAE->NumElts; i < n; i++) {
1016     llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32(
1017         BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i);
1018     Fn(Address(EltAddr, EltAlign));
1019   }
1020 }
1021 
1022 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1023                                          llvm::Function::arg_iterator &AI) {
1024   assert(LV.isSimple() &&
1025          "Unexpected non-simple lvalue during struct expansion.");
1026 
1027   auto Exp = getTypeExpansion(Ty, getContext());
1028   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1029     forConstantArrayExpansion(
1030         *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1031           LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1032           ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1033         });
1034   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1035     Address This = LV.getAddress(*this);
1036     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1037       // Perform a single step derived-to-base conversion.
1038       Address Base =
1039           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1040                                 /*NullCheckValue=*/false, SourceLocation());
1041       LValue SubLV = MakeAddrLValue(Base, BS->getType());
1042 
1043       // Recurse onto bases.
1044       ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1045     }
1046     for (auto FD : RExp->Fields) {
1047       // FIXME: What are the right qualifiers here?
1048       LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1049       ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1050     }
1051   } else if (isa<ComplexExpansion>(Exp.get())) {
1052     auto realValue = &*AI++;
1053     auto imagValue = &*AI++;
1054     EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1055   } else {
1056     // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1057     // primitive store.
1058     assert(isa<NoExpansion>(Exp.get()));
1059     if (LV.isBitField())
1060       EmitStoreThroughLValue(RValue::get(&*AI++), LV);
1061     else
1062       EmitStoreOfScalar(&*AI++, LV);
1063   }
1064 }
1065 
1066 void CodeGenFunction::ExpandTypeToArgs(
1067     QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1068     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1069   auto Exp = getTypeExpansion(Ty, getContext());
1070   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1071     Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1072                                    : Arg.getKnownRValue().getAggregateAddress();
1073     forConstantArrayExpansion(
1074         *this, CAExp, Addr, [&](Address EltAddr) {
1075           CallArg EltArg = CallArg(
1076               convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1077               CAExp->EltTy);
1078           ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1079                            IRCallArgPos);
1080         });
1081   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1082     Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1083                                    : Arg.getKnownRValue().getAggregateAddress();
1084     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1085       // Perform a single step derived-to-base conversion.
1086       Address Base =
1087           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1088                                 /*NullCheckValue=*/false, SourceLocation());
1089       CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1090 
1091       // Recurse onto bases.
1092       ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1093                        IRCallArgPos);
1094     }
1095 
1096     LValue LV = MakeAddrLValue(This, Ty);
1097     for (auto FD : RExp->Fields) {
1098       CallArg FldArg =
1099           CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1100       ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1101                        IRCallArgPos);
1102     }
1103   } else if (isa<ComplexExpansion>(Exp.get())) {
1104     ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1105     IRCallArgs[IRCallArgPos++] = CV.first;
1106     IRCallArgs[IRCallArgPos++] = CV.second;
1107   } else {
1108     assert(isa<NoExpansion>(Exp.get()));
1109     auto RV = Arg.getKnownRValue();
1110     assert(RV.isScalar() &&
1111            "Unexpected non-scalar rvalue during struct expansion.");
1112 
1113     // Insert a bitcast as needed.
1114     llvm::Value *V = RV.getScalarVal();
1115     if (IRCallArgPos < IRFuncTy->getNumParams() &&
1116         V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1117       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1118 
1119     IRCallArgs[IRCallArgPos++] = V;
1120   }
1121 }
1122 
1123 /// Create a temporary allocation for the purposes of coercion.
1124 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1125                                            CharUnits MinAlign,
1126                                            const Twine &Name = "tmp") {
1127   // Don't use an alignment that's worse than what LLVM would prefer.
1128   auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1129   CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1130 
1131   return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce");
1132 }
1133 
1134 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1135 /// accessing some number of bytes out of it, try to gep into the struct to get
1136 /// at its inner goodness.  Dive as deep as possible without entering an element
1137 /// with an in-memory size smaller than DstSize.
1138 static Address
1139 EnterStructPointerForCoercedAccess(Address SrcPtr,
1140                                    llvm::StructType *SrcSTy,
1141                                    uint64_t DstSize, CodeGenFunction &CGF) {
1142   // We can't dive into a zero-element struct.
1143   if (SrcSTy->getNumElements() == 0) return SrcPtr;
1144 
1145   llvm::Type *FirstElt = SrcSTy->getElementType(0);
1146 
1147   // If the first elt is at least as large as what we're looking for, or if the
1148   // first element is the same size as the whole struct, we can enter it. The
1149   // comparison must be made on the store size and not the alloca size. Using
1150   // the alloca size may overstate the size of the load.
1151   uint64_t FirstEltSize =
1152     CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1153   if (FirstEltSize < DstSize &&
1154       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1155     return SrcPtr;
1156 
1157   // GEP into the first element.
1158   SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1159 
1160   // If the first element is a struct, recurse.
1161   llvm::Type *SrcTy = SrcPtr.getElementType();
1162   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1163     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1164 
1165   return SrcPtr;
1166 }
1167 
1168 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1169 /// are either integers or pointers.  This does a truncation of the value if it
1170 /// is too large or a zero extension if it is too small.
1171 ///
1172 /// This behaves as if the value were coerced through memory, so on big-endian
1173 /// targets the high bits are preserved in a truncation, while little-endian
1174 /// targets preserve the low bits.
1175 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1176                                              llvm::Type *Ty,
1177                                              CodeGenFunction &CGF) {
1178   if (Val->getType() == Ty)
1179     return Val;
1180 
1181   if (isa<llvm::PointerType>(Val->getType())) {
1182     // If this is Pointer->Pointer avoid conversion to and from int.
1183     if (isa<llvm::PointerType>(Ty))
1184       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1185 
1186     // Convert the pointer to an integer so we can play with its width.
1187     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1188   }
1189 
1190   llvm::Type *DestIntTy = Ty;
1191   if (isa<llvm::PointerType>(DestIntTy))
1192     DestIntTy = CGF.IntPtrTy;
1193 
1194   if (Val->getType() != DestIntTy) {
1195     const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1196     if (DL.isBigEndian()) {
1197       // Preserve the high bits on big-endian targets.
1198       // That is what memory coercion does.
1199       uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1200       uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1201 
1202       if (SrcSize > DstSize) {
1203         Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1204         Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1205       } else {
1206         Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1207         Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1208       }
1209     } else {
1210       // Little-endian targets preserve the low bits. No shifts required.
1211       Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1212     }
1213   }
1214 
1215   if (isa<llvm::PointerType>(Ty))
1216     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1217   return Val;
1218 }
1219 
1220 
1221 
1222 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1223 /// a pointer to an object of type \arg Ty, known to be aligned to
1224 /// \arg SrcAlign bytes.
1225 ///
1226 /// This safely handles the case when the src type is smaller than the
1227 /// destination type; in this situation the values of bits which not
1228 /// present in the src are undefined.
1229 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1230                                       CodeGenFunction &CGF) {
1231   llvm::Type *SrcTy = Src.getElementType();
1232 
1233   // If SrcTy and Ty are the same, just do a load.
1234   if (SrcTy == Ty)
1235     return CGF.Builder.CreateLoad(Src);
1236 
1237   llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1238 
1239   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1240     Src = EnterStructPointerForCoercedAccess(Src, SrcSTy,
1241                                              DstSize.getFixedSize(), CGF);
1242     SrcTy = Src.getElementType();
1243   }
1244 
1245   llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1246 
1247   // If the source and destination are integer or pointer types, just do an
1248   // extension or truncation to the desired type.
1249   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1250       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1251     llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1252     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1253   }
1254 
1255   // If load is legal, just bitcast the src pointer.
1256   if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1257       SrcSize.getFixedSize() >= DstSize.getFixedSize()) {
1258     // Generally SrcSize is never greater than DstSize, since this means we are
1259     // losing bits. However, this can happen in cases where the structure has
1260     // additional padding, for example due to a user specified alignment.
1261     //
1262     // FIXME: Assert that we aren't truncating non-padding bits when have access
1263     // to that information.
1264     Src = CGF.Builder.CreateBitCast(Src,
1265                                     Ty->getPointerTo(Src.getAddressSpace()));
1266     return CGF.Builder.CreateLoad(Src);
1267   }
1268 
1269   // If coercing a fixed vector to a scalable vector for ABI compatibility, and
1270   // the types match, use the llvm.experimental.vector.insert intrinsic to
1271   // perform the conversion.
1272   if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1273     if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1274       if (ScalableDst->getElementType() == FixedSrc->getElementType()) {
1275         auto *Load = CGF.Builder.CreateLoad(Src);
1276         auto *UndefVec = llvm::UndefValue::get(ScalableDst);
1277         auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
1278         return CGF.Builder.CreateInsertVector(ScalableDst, UndefVec, Load, Zero,
1279                                               "castScalableSve");
1280       }
1281     }
1282   }
1283 
1284   // Otherwise do coercion through memory. This is stupid, but simple.
1285   Address Tmp =
1286       CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
1287   CGF.Builder.CreateMemCpy(
1288       Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
1289       Src.getAlignment().getAsAlign(),
1290       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize()));
1291   return CGF.Builder.CreateLoad(Tmp);
1292 }
1293 
1294 // Function to store a first-class aggregate into memory.  We prefer to
1295 // store the elements rather than the aggregate to be more friendly to
1296 // fast-isel.
1297 // FIXME: Do we need to recurse here?
1298 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
1299                                          bool DestIsVolatile) {
1300   // Prefer scalar stores to first-class aggregate stores.
1301   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
1302     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1303       Address EltPtr = Builder.CreateStructGEP(Dest, i);
1304       llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
1305       Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1306     }
1307   } else {
1308     Builder.CreateStore(Val, Dest, DestIsVolatile);
1309   }
1310 }
1311 
1312 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1313 /// where the source and destination may have different types.  The
1314 /// destination is known to be aligned to \arg DstAlign bytes.
1315 ///
1316 /// This safely handles the case when the src type is larger than the
1317 /// destination type; the upper bits of the src will be lost.
1318 static void CreateCoercedStore(llvm::Value *Src,
1319                                Address Dst,
1320                                bool DstIsVolatile,
1321                                CodeGenFunction &CGF) {
1322   llvm::Type *SrcTy = Src->getType();
1323   llvm::Type *DstTy = Dst.getElementType();
1324   if (SrcTy == DstTy) {
1325     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1326     return;
1327   }
1328 
1329   llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1330 
1331   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1332     Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
1333                                              SrcSize.getFixedSize(), CGF);
1334     DstTy = Dst.getElementType();
1335   }
1336 
1337   llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1338   llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1339   if (SrcPtrTy && DstPtrTy &&
1340       SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1341     Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
1342     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1343     return;
1344   }
1345 
1346   // If the source and destination are integer or pointer types, just do an
1347   // extension or truncation to the desired type.
1348   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1349       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1350     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1351     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1352     return;
1353   }
1354 
1355   llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1356 
1357   // If store is legal, just bitcast the src pointer.
1358   if (isa<llvm::ScalableVectorType>(SrcTy) ||
1359       isa<llvm::ScalableVectorType>(DstTy) ||
1360       SrcSize.getFixedSize() <= DstSize.getFixedSize()) {
1361     Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1362     CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
1363   } else {
1364     // Otherwise do coercion through memory. This is stupid, but
1365     // simple.
1366 
1367     // Generally SrcSize is never greater than DstSize, since this means we are
1368     // losing bits. However, this can happen in cases where the structure has
1369     // additional padding, for example due to a user specified alignment.
1370     //
1371     // FIXME: Assert that we aren't truncating non-padding bits when have access
1372     // to that information.
1373     Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1374     CGF.Builder.CreateStore(Src, Tmp);
1375     CGF.Builder.CreateMemCpy(
1376         Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
1377         Tmp.getAlignment().getAsAlign(),
1378         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize()));
1379   }
1380 }
1381 
1382 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1383                                    const ABIArgInfo &info) {
1384   if (unsigned offset = info.getDirectOffset()) {
1385     addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1386     addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1387                                              CharUnits::fromQuantity(offset));
1388     addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1389   }
1390   return addr;
1391 }
1392 
1393 namespace {
1394 
1395 /// Encapsulates information about the way function arguments from
1396 /// CGFunctionInfo should be passed to actual LLVM IR function.
1397 class ClangToLLVMArgMapping {
1398   static const unsigned InvalidIndex = ~0U;
1399   unsigned InallocaArgNo;
1400   unsigned SRetArgNo;
1401   unsigned TotalIRArgs;
1402 
1403   /// Arguments of LLVM IR function corresponding to single Clang argument.
1404   struct IRArgs {
1405     unsigned PaddingArgIndex;
1406     // Argument is expanded to IR arguments at positions
1407     // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1408     unsigned FirstArgIndex;
1409     unsigned NumberOfArgs;
1410 
1411     IRArgs()
1412         : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1413           NumberOfArgs(0) {}
1414   };
1415 
1416   SmallVector<IRArgs, 8> ArgInfo;
1417 
1418 public:
1419   ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1420                         bool OnlyRequiredArgs = false)
1421       : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1422         ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1423     construct(Context, FI, OnlyRequiredArgs);
1424   }
1425 
1426   bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1427   unsigned getInallocaArgNo() const {
1428     assert(hasInallocaArg());
1429     return InallocaArgNo;
1430   }
1431 
1432   bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1433   unsigned getSRetArgNo() const {
1434     assert(hasSRetArg());
1435     return SRetArgNo;
1436   }
1437 
1438   unsigned totalIRArgs() const { return TotalIRArgs; }
1439 
1440   bool hasPaddingArg(unsigned ArgNo) const {
1441     assert(ArgNo < ArgInfo.size());
1442     return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1443   }
1444   unsigned getPaddingArgNo(unsigned ArgNo) const {
1445     assert(hasPaddingArg(ArgNo));
1446     return ArgInfo[ArgNo].PaddingArgIndex;
1447   }
1448 
1449   /// Returns index of first IR argument corresponding to ArgNo, and their
1450   /// quantity.
1451   std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1452     assert(ArgNo < ArgInfo.size());
1453     return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1454                           ArgInfo[ArgNo].NumberOfArgs);
1455   }
1456 
1457 private:
1458   void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1459                  bool OnlyRequiredArgs);
1460 };
1461 
1462 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1463                                       const CGFunctionInfo &FI,
1464                                       bool OnlyRequiredArgs) {
1465   unsigned IRArgNo = 0;
1466   bool SwapThisWithSRet = false;
1467   const ABIArgInfo &RetAI = FI.getReturnInfo();
1468 
1469   if (RetAI.getKind() == ABIArgInfo::Indirect) {
1470     SwapThisWithSRet = RetAI.isSRetAfterThis();
1471     SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1472   }
1473 
1474   unsigned ArgNo = 0;
1475   unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1476   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1477        ++I, ++ArgNo) {
1478     assert(I != FI.arg_end());
1479     QualType ArgType = I->type;
1480     const ABIArgInfo &AI = I->info;
1481     // Collect data about IR arguments corresponding to Clang argument ArgNo.
1482     auto &IRArgs = ArgInfo[ArgNo];
1483 
1484     if (AI.getPaddingType())
1485       IRArgs.PaddingArgIndex = IRArgNo++;
1486 
1487     switch (AI.getKind()) {
1488     case ABIArgInfo::Extend:
1489     case ABIArgInfo::Direct: {
1490       // FIXME: handle sseregparm someday...
1491       llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1492       if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1493         IRArgs.NumberOfArgs = STy->getNumElements();
1494       } else {
1495         IRArgs.NumberOfArgs = 1;
1496       }
1497       break;
1498     }
1499     case ABIArgInfo::Indirect:
1500     case ABIArgInfo::IndirectAliased:
1501       IRArgs.NumberOfArgs = 1;
1502       break;
1503     case ABIArgInfo::Ignore:
1504     case ABIArgInfo::InAlloca:
1505       // ignore and inalloca doesn't have matching LLVM parameters.
1506       IRArgs.NumberOfArgs = 0;
1507       break;
1508     case ABIArgInfo::CoerceAndExpand:
1509       IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1510       break;
1511     case ABIArgInfo::Expand:
1512       IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1513       break;
1514     }
1515 
1516     if (IRArgs.NumberOfArgs > 0) {
1517       IRArgs.FirstArgIndex = IRArgNo;
1518       IRArgNo += IRArgs.NumberOfArgs;
1519     }
1520 
1521     // Skip over the sret parameter when it comes second.  We already handled it
1522     // above.
1523     if (IRArgNo == 1 && SwapThisWithSRet)
1524       IRArgNo++;
1525   }
1526   assert(ArgNo == ArgInfo.size());
1527 
1528   if (FI.usesInAlloca())
1529     InallocaArgNo = IRArgNo++;
1530 
1531   TotalIRArgs = IRArgNo;
1532 }
1533 }  // namespace
1534 
1535 /***/
1536 
1537 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1538   const auto &RI = FI.getReturnInfo();
1539   return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1540 }
1541 
1542 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1543   return ReturnTypeUsesSRet(FI) &&
1544          getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1545 }
1546 
1547 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1548   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1549     switch (BT->getKind()) {
1550     default:
1551       return false;
1552     case BuiltinType::Float:
1553       return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1554     case BuiltinType::Double:
1555       return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1556     case BuiltinType::LongDouble:
1557       return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1558     }
1559   }
1560 
1561   return false;
1562 }
1563 
1564 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1565   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1566     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1567       if (BT->getKind() == BuiltinType::LongDouble)
1568         return getTarget().useObjCFP2RetForComplexLongDouble();
1569     }
1570   }
1571 
1572   return false;
1573 }
1574 
1575 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1576   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1577   return GetFunctionType(FI);
1578 }
1579 
1580 llvm::FunctionType *
1581 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1582 
1583   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1584   (void)Inserted;
1585   assert(Inserted && "Recursively being processed?");
1586 
1587   llvm::Type *resultType = nullptr;
1588   const ABIArgInfo &retAI = FI.getReturnInfo();
1589   switch (retAI.getKind()) {
1590   case ABIArgInfo::Expand:
1591   case ABIArgInfo::IndirectAliased:
1592     llvm_unreachable("Invalid ABI kind for return argument");
1593 
1594   case ABIArgInfo::Extend:
1595   case ABIArgInfo::Direct:
1596     resultType = retAI.getCoerceToType();
1597     break;
1598 
1599   case ABIArgInfo::InAlloca:
1600     if (retAI.getInAllocaSRet()) {
1601       // sret things on win32 aren't void, they return the sret pointer.
1602       QualType ret = FI.getReturnType();
1603       llvm::Type *ty = ConvertType(ret);
1604       unsigned addressSpace = Context.getTargetAddressSpace(ret);
1605       resultType = llvm::PointerType::get(ty, addressSpace);
1606     } else {
1607       resultType = llvm::Type::getVoidTy(getLLVMContext());
1608     }
1609     break;
1610 
1611   case ABIArgInfo::Indirect:
1612   case ABIArgInfo::Ignore:
1613     resultType = llvm::Type::getVoidTy(getLLVMContext());
1614     break;
1615 
1616   case ABIArgInfo::CoerceAndExpand:
1617     resultType = retAI.getUnpaddedCoerceAndExpandType();
1618     break;
1619   }
1620 
1621   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1622   SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1623 
1624   // Add type for sret argument.
1625   if (IRFunctionArgs.hasSRetArg()) {
1626     QualType Ret = FI.getReturnType();
1627     llvm::Type *Ty = ConvertType(Ret);
1628     unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1629     ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1630         llvm::PointerType::get(Ty, AddressSpace);
1631   }
1632 
1633   // Add type for inalloca argument.
1634   if (IRFunctionArgs.hasInallocaArg()) {
1635     auto ArgStruct = FI.getArgStruct();
1636     assert(ArgStruct);
1637     ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1638   }
1639 
1640   // Add in all of the required arguments.
1641   unsigned ArgNo = 0;
1642   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1643                                      ie = it + FI.getNumRequiredArgs();
1644   for (; it != ie; ++it, ++ArgNo) {
1645     const ABIArgInfo &ArgInfo = it->info;
1646 
1647     // Insert a padding type to ensure proper alignment.
1648     if (IRFunctionArgs.hasPaddingArg(ArgNo))
1649       ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1650           ArgInfo.getPaddingType();
1651 
1652     unsigned FirstIRArg, NumIRArgs;
1653     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1654 
1655     switch (ArgInfo.getKind()) {
1656     case ABIArgInfo::Ignore:
1657     case ABIArgInfo::InAlloca:
1658       assert(NumIRArgs == 0);
1659       break;
1660 
1661     case ABIArgInfo::Indirect: {
1662       assert(NumIRArgs == 1);
1663       // indirect arguments are always on the stack, which is alloca addr space.
1664       llvm::Type *LTy = ConvertTypeForMem(it->type);
1665       ArgTypes[FirstIRArg] = LTy->getPointerTo(
1666           CGM.getDataLayout().getAllocaAddrSpace());
1667       break;
1668     }
1669     case ABIArgInfo::IndirectAliased: {
1670       assert(NumIRArgs == 1);
1671       llvm::Type *LTy = ConvertTypeForMem(it->type);
1672       ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace());
1673       break;
1674     }
1675     case ABIArgInfo::Extend:
1676     case ABIArgInfo::Direct: {
1677       // Fast-isel and the optimizer generally like scalar values better than
1678       // FCAs, so we flatten them if this is safe to do for this argument.
1679       llvm::Type *argType = ArgInfo.getCoerceToType();
1680       llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1681       if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1682         assert(NumIRArgs == st->getNumElements());
1683         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1684           ArgTypes[FirstIRArg + i] = st->getElementType(i);
1685       } else {
1686         assert(NumIRArgs == 1);
1687         ArgTypes[FirstIRArg] = argType;
1688       }
1689       break;
1690     }
1691 
1692     case ABIArgInfo::CoerceAndExpand: {
1693       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1694       for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1695         *ArgTypesIter++ = EltTy;
1696       }
1697       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1698       break;
1699     }
1700 
1701     case ABIArgInfo::Expand:
1702       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1703       getExpandedTypes(it->type, ArgTypesIter);
1704       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1705       break;
1706     }
1707   }
1708 
1709   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1710   assert(Erased && "Not in set?");
1711 
1712   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1713 }
1714 
1715 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1716   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1717   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1718 
1719   if (!isFuncTypeConvertible(FPT))
1720     return llvm::StructType::get(getLLVMContext());
1721 
1722   return GetFunctionType(GD);
1723 }
1724 
1725 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1726                                                llvm::AttrBuilder &FuncAttrs,
1727                                                const FunctionProtoType *FPT) {
1728   if (!FPT)
1729     return;
1730 
1731   if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1732       FPT->isNothrow())
1733     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1734 }
1735 
1736 bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context,
1737                                           QualType ReturnType) {
1738   // We can't just discard the return value for a record type with a
1739   // complex destructor or a non-trivially copyable type.
1740   if (const RecordType *RT =
1741           ReturnType.getCanonicalType()->getAs<RecordType>()) {
1742     if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1743       return ClassDecl->hasTrivialDestructor();
1744   }
1745   return ReturnType.isTriviallyCopyableType(Context);
1746 }
1747 
1748 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
1749                                                  bool HasOptnone,
1750                                                  bool AttrOnCallSite,
1751                                                llvm::AttrBuilder &FuncAttrs) {
1752   // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1753   if (!HasOptnone) {
1754     if (CodeGenOpts.OptimizeSize)
1755       FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1756     if (CodeGenOpts.OptimizeSize == 2)
1757       FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1758   }
1759 
1760   if (CodeGenOpts.DisableRedZone)
1761     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1762   if (CodeGenOpts.IndirectTlsSegRefs)
1763     FuncAttrs.addAttribute("indirect-tls-seg-refs");
1764   if (CodeGenOpts.NoImplicitFloat)
1765     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1766 
1767   if (AttrOnCallSite) {
1768     // Attributes that should go on the call site only.
1769     if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name))
1770       FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1771     if (!CodeGenOpts.TrapFuncName.empty())
1772       FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1773   } else {
1774     StringRef FpKind;
1775     switch (CodeGenOpts.getFramePointer()) {
1776     case CodeGenOptions::FramePointerKind::None:
1777       FpKind = "none";
1778       break;
1779     case CodeGenOptions::FramePointerKind::NonLeaf:
1780       FpKind = "non-leaf";
1781       break;
1782     case CodeGenOptions::FramePointerKind::All:
1783       FpKind = "all";
1784       break;
1785     }
1786     FuncAttrs.addAttribute("frame-pointer", FpKind);
1787 
1788     if (CodeGenOpts.LessPreciseFPMAD)
1789       FuncAttrs.addAttribute("less-precise-fpmad", "true");
1790 
1791     if (CodeGenOpts.NullPointerIsValid)
1792       FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1793 
1794     if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE())
1795       FuncAttrs.addAttribute("denormal-fp-math",
1796                              CodeGenOpts.FPDenormalMode.str());
1797     if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) {
1798       FuncAttrs.addAttribute(
1799           "denormal-fp-math-f32",
1800           CodeGenOpts.FP32DenormalMode.str());
1801     }
1802 
1803     if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore)
1804       FuncAttrs.addAttribute("no-trapping-math", "true");
1805 
1806     // Strict (compliant) code is the default, so only add this attribute to
1807     // indicate that we are trying to workaround a problem case.
1808     if (!CodeGenOpts.StrictFloatCastOverflow)
1809       FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1810 
1811     // TODO: Are these all needed?
1812     // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1813     if (LangOpts.NoHonorInfs)
1814       FuncAttrs.addAttribute("no-infs-fp-math", "true");
1815     if (LangOpts.NoHonorNaNs)
1816       FuncAttrs.addAttribute("no-nans-fp-math", "true");
1817     if (LangOpts.UnsafeFPMath)
1818       FuncAttrs.addAttribute("unsafe-fp-math", "true");
1819     if (CodeGenOpts.SoftFloat)
1820       FuncAttrs.addAttribute("use-soft-float", "true");
1821     FuncAttrs.addAttribute("stack-protector-buffer-size",
1822                            llvm::utostr(CodeGenOpts.SSPBufferSize));
1823     if (LangOpts.NoSignedZero)
1824       FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true");
1825 
1826     // TODO: Reciprocal estimate codegen options should apply to instructions?
1827     const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1828     if (!Recips.empty())
1829       FuncAttrs.addAttribute("reciprocal-estimates",
1830                              llvm::join(Recips, ","));
1831 
1832     if (!CodeGenOpts.PreferVectorWidth.empty() &&
1833         CodeGenOpts.PreferVectorWidth != "none")
1834       FuncAttrs.addAttribute("prefer-vector-width",
1835                              CodeGenOpts.PreferVectorWidth);
1836 
1837     if (CodeGenOpts.StackRealignment)
1838       FuncAttrs.addAttribute("stackrealign");
1839     if (CodeGenOpts.Backchain)
1840       FuncAttrs.addAttribute("backchain");
1841     if (CodeGenOpts.EnableSegmentedStacks)
1842       FuncAttrs.addAttribute("split-stack");
1843 
1844     if (CodeGenOpts.SpeculativeLoadHardening)
1845       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1846   }
1847 
1848   if (getLangOpts().assumeFunctionsAreConvergent()) {
1849     // Conservatively, mark all functions and calls in CUDA and OpenCL as
1850     // convergent (meaning, they may call an intrinsically convergent op, such
1851     // as __syncthreads() / barrier(), and so can't have certain optimizations
1852     // applied around them).  LLVM will remove this attribute where it safely
1853     // can.
1854     FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1855   }
1856 
1857   if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1858     // Exceptions aren't supported in CUDA device code.
1859     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1860   }
1861 
1862   for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1863     StringRef Var, Value;
1864     std::tie(Var, Value) = Attr.split('=');
1865     FuncAttrs.addAttribute(Var, Value);
1866   }
1867 }
1868 
1869 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
1870   llvm::AttrBuilder FuncAttrs;
1871   getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
1872                                /* AttrOnCallSite = */ false, FuncAttrs);
1873   // TODO: call GetCPUAndFeaturesAttributes?
1874   F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1875 }
1876 
1877 void CodeGenModule::addDefaultFunctionDefinitionAttributes(
1878                                                    llvm::AttrBuilder &attrs) {
1879   getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
1880                                /*for call*/ false, attrs);
1881   GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
1882 }
1883 
1884 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
1885                                    const LangOptions &LangOpts,
1886                                    const NoBuiltinAttr *NBA = nullptr) {
1887   auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
1888     SmallString<32> AttributeName;
1889     AttributeName += "no-builtin-";
1890     AttributeName += BuiltinName;
1891     FuncAttrs.addAttribute(AttributeName);
1892   };
1893 
1894   // First, handle the language options passed through -fno-builtin.
1895   if (LangOpts.NoBuiltin) {
1896     // -fno-builtin disables them all.
1897     FuncAttrs.addAttribute("no-builtins");
1898     return;
1899   }
1900 
1901   // Then, add attributes for builtins specified through -fno-builtin-<name>.
1902   llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
1903 
1904   // Now, let's check the __attribute__((no_builtin("...")) attribute added to
1905   // the source.
1906   if (!NBA)
1907     return;
1908 
1909   // If there is a wildcard in the builtin names specified through the
1910   // attribute, disable them all.
1911   if (llvm::is_contained(NBA->builtinNames(), "*")) {
1912     FuncAttrs.addAttribute("no-builtins");
1913     return;
1914   }
1915 
1916   // And last, add the rest of the builtin names.
1917   llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
1918 }
1919 
1920 static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types,
1921                              const llvm::DataLayout &DL, const ABIArgInfo &AI,
1922                              bool CheckCoerce = true) {
1923   llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
1924   if (AI.getKind() == ABIArgInfo::Indirect)
1925     return true;
1926   if (AI.getKind() == ABIArgInfo::Extend)
1927     return true;
1928   if (!DL.typeSizeEqualsStoreSize(Ty))
1929     // TODO: This will result in a modest amount of values not marked noundef
1930     // when they could be. We care about values that *invisibly* contain undef
1931     // bits from the perspective of LLVM IR.
1932     return false;
1933   if (CheckCoerce && AI.canHaveCoerceToType()) {
1934     llvm::Type *CoerceTy = AI.getCoerceToType();
1935     if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
1936                                   DL.getTypeSizeInBits(Ty)))
1937       // If we're coercing to a type with a greater size than the canonical one,
1938       // we're introducing new undef bits.
1939       // Coercing to a type of smaller or equal size is ok, as we know that
1940       // there's no internal padding (typeSizeEqualsStoreSize).
1941       return false;
1942   }
1943   if (QTy->isExtIntType())
1944     return true;
1945   if (QTy->isReferenceType())
1946     return true;
1947   if (QTy->isNullPtrType())
1948     return false;
1949   if (QTy->isMemberPointerType())
1950     // TODO: Some member pointers are `noundef`, but it depends on the ABI. For
1951     // now, never mark them.
1952     return false;
1953   if (QTy->isScalarType()) {
1954     if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy))
1955       return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false);
1956     return true;
1957   }
1958   if (const VectorType *Vector = dyn_cast<VectorType>(QTy))
1959     return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false);
1960   if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
1961     return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false);
1962   if (const ArrayType *Array = dyn_cast<ArrayType>(QTy))
1963     return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false);
1964 
1965   // TODO: Some structs may be `noundef`, in specific situations.
1966   return false;
1967 }
1968 
1969 /// Construct the IR attribute list of a function or call.
1970 ///
1971 /// When adding an attribute, please consider where it should be handled:
1972 ///
1973 ///   - getDefaultFunctionAttributes is for attributes that are essentially
1974 ///     part of the global target configuration (but perhaps can be
1975 ///     overridden on a per-function basis).  Adding attributes there
1976 ///     will cause them to also be set in frontends that build on Clang's
1977 ///     target-configuration logic, as well as for code defined in library
1978 ///     modules such as CUDA's libdevice.
1979 ///
1980 ///   - ConstructAttributeList builds on top of getDefaultFunctionAttributes
1981 ///     and adds declaration-specific, convention-specific, and
1982 ///     frontend-specific logic.  The last is of particular importance:
1983 ///     attributes that restrict how the frontend generates code must be
1984 ///     added here rather than getDefaultFunctionAttributes.
1985 ///
1986 void CodeGenModule::ConstructAttributeList(StringRef Name,
1987                                            const CGFunctionInfo &FI,
1988                                            CGCalleeInfo CalleeInfo,
1989                                            llvm::AttributeList &AttrList,
1990                                            unsigned &CallingConv,
1991                                            bool AttrOnCallSite, bool IsThunk) {
1992   llvm::AttrBuilder FuncAttrs;
1993   llvm::AttrBuilder RetAttrs;
1994 
1995   // Collect function IR attributes from the CC lowering.
1996   // We'll collect the paramete and result attributes later.
1997   CallingConv = FI.getEffectiveCallingConvention();
1998   if (FI.isNoReturn())
1999     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2000   if (FI.isCmseNSCall())
2001     FuncAttrs.addAttribute("cmse_nonsecure_call");
2002 
2003   // Collect function IR attributes from the callee prototype if we have one.
2004   AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
2005                                      CalleeInfo.getCalleeFunctionProtoType());
2006 
2007   const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
2008 
2009   bool HasOptnone = false;
2010   // The NoBuiltinAttr attached to the target FunctionDecl.
2011   const NoBuiltinAttr *NBA = nullptr;
2012 
2013   // Collect function IR attributes based on declaration-specific
2014   // information.
2015   // FIXME: handle sseregparm someday...
2016   if (TargetDecl) {
2017     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
2018       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2019     if (TargetDecl->hasAttr<NoThrowAttr>())
2020       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2021     if (TargetDecl->hasAttr<NoReturnAttr>())
2022       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2023     if (TargetDecl->hasAttr<ColdAttr>())
2024       FuncAttrs.addAttribute(llvm::Attribute::Cold);
2025     if (TargetDecl->hasAttr<HotAttr>())
2026       FuncAttrs.addAttribute(llvm::Attribute::Hot);
2027     if (TargetDecl->hasAttr<NoDuplicateAttr>())
2028       FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2029     if (TargetDecl->hasAttr<ConvergentAttr>())
2030       FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2031 
2032     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2033       AddAttributesFromFunctionProtoType(
2034           getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
2035       if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2036         // A sane operator new returns a non-aliasing pointer.
2037         auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2038         if (getCodeGenOpts().AssumeSaneOperatorNew &&
2039             (Kind == OO_New || Kind == OO_Array_New))
2040           RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2041       }
2042       const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
2043       const bool IsVirtualCall = MD && MD->isVirtual();
2044       // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
2045       // virtual function. These attributes are not inherited by overloads.
2046       if (!(AttrOnCallSite && IsVirtualCall)) {
2047         if (Fn->isNoReturn())
2048           FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2049         NBA = Fn->getAttr<NoBuiltinAttr>();
2050       }
2051       // Only place nomerge attribute on call sites, never functions. This
2052       // allows it to work on indirect virtual function calls.
2053       if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>())
2054         FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2055 
2056       // Add known guaranteed alignment for allocation functions.
2057       if (unsigned BuiltinID = Fn->getBuiltinID()) {
2058         switch (BuiltinID) {
2059         case Builtin::BIaligned_alloc:
2060         case Builtin::BIcalloc:
2061         case Builtin::BImalloc:
2062         case Builtin::BImemalign:
2063         case Builtin::BIrealloc:
2064         case Builtin::BIstrdup:
2065         case Builtin::BIstrndup:
2066           RetAttrs.addAlignmentAttr(Context.getTargetInfo().getNewAlign() /
2067                                     Context.getTargetInfo().getCharWidth());
2068           break;
2069         default:
2070           break;
2071         }
2072       }
2073     }
2074 
2075     // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
2076     if (TargetDecl->hasAttr<ConstAttr>()) {
2077       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
2078       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2079       // gcc specifies that 'const' functions have greater restrictions than
2080       // 'pure' functions, so they also cannot have infinite loops.
2081       FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2082     } else if (TargetDecl->hasAttr<PureAttr>()) {
2083       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
2084       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2085       // gcc specifies that 'pure' functions cannot have infinite loops.
2086       FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2087     } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
2088       FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
2089       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2090     }
2091     if (TargetDecl->hasAttr<RestrictAttr>())
2092       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2093     if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
2094         !CodeGenOpts.NullPointerIsValid)
2095       RetAttrs.addAttribute(llvm::Attribute::NonNull);
2096     if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2097       FuncAttrs.addAttribute("no_caller_saved_registers");
2098     if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
2099       FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2100     if (TargetDecl->hasAttr<LeafAttr>())
2101       FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2102 
2103     HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
2104     if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
2105       Optional<unsigned> NumElemsParam;
2106       if (AllocSize->getNumElemsParam().isValid())
2107         NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2108       FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2109                                  NumElemsParam);
2110     }
2111 
2112     if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
2113       if (getLangOpts().OpenCLVersion <= 120) {
2114         // OpenCL v1.2 Work groups are always uniform
2115         FuncAttrs.addAttribute("uniform-work-group-size", "true");
2116       } else {
2117         // OpenCL v2.0 Work groups may be whether uniform or not.
2118         // '-cl-uniform-work-group-size' compile option gets a hint
2119         // to the compiler that the global work-size be a multiple of
2120         // the work-group size specified to clEnqueueNDRangeKernel
2121         // (i.e. work groups are uniform).
2122         FuncAttrs.addAttribute("uniform-work-group-size",
2123                                llvm::toStringRef(CodeGenOpts.UniformWGSize));
2124       }
2125     }
2126 
2127     std::string AssumptionValueStr;
2128     for (AssumptionAttr *AssumptionA :
2129          TargetDecl->specific_attrs<AssumptionAttr>()) {
2130       std::string AS = AssumptionA->getAssumption().str();
2131       if (!AS.empty() && !AssumptionValueStr.empty())
2132         AssumptionValueStr += ",";
2133       AssumptionValueStr += AS;
2134     }
2135 
2136     if (!AssumptionValueStr.empty())
2137       FuncAttrs.addAttribute(llvm::AssumptionAttrKey, AssumptionValueStr);
2138   }
2139 
2140   // Attach "no-builtins" attributes to:
2141   // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
2142   // * definitions: "no-builtins" or "no-builtin-<name>" only.
2143   // The attributes can come from:
2144   // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
2145   // * FunctionDecl attributes: __attribute__((no_builtin(...)))
2146   addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
2147 
2148   // Collect function IR attributes based on global settiings.
2149   getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2150 
2151   // Override some default IR attributes based on declaration-specific
2152   // information.
2153   if (TargetDecl) {
2154     if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
2155       FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2156     if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
2157       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2158     if (TargetDecl->hasAttr<NoSplitStackAttr>())
2159       FuncAttrs.removeAttribute("split-stack");
2160 
2161     // Add NonLazyBind attribute to function declarations when -fno-plt
2162     // is used.
2163     // FIXME: what if we just haven't processed the function definition
2164     // yet, or if it's an external definition like C99 inline?
2165     if (CodeGenOpts.NoPLT) {
2166       if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2167         if (!Fn->isDefined() && !AttrOnCallSite) {
2168           FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2169         }
2170       }
2171     }
2172   }
2173 
2174   // Add "sample-profile-suffix-elision-policy" attribute for internal linkage
2175   // functions with -funique-internal-linkage-names.
2176   if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2177     if (isa<FunctionDecl>(TargetDecl)) {
2178       if (this->getFunctionLinkage(CalleeInfo.getCalleeDecl()) ==
2179           llvm::GlobalValue::InternalLinkage)
2180         FuncAttrs.addAttribute("sample-profile-suffix-elision-policy",
2181                                "selected");
2182     }
2183   }
2184 
2185   // Collect non-call-site function IR attributes from declaration-specific
2186   // information.
2187   if (!AttrOnCallSite) {
2188     if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
2189       FuncAttrs.addAttribute("cmse_nonsecure_entry");
2190 
2191     // Whether tail calls are enabled.
2192     auto shouldDisableTailCalls = [&] {
2193       // Should this be honored in getDefaultFunctionAttributes?
2194       if (CodeGenOpts.DisableTailCalls)
2195         return true;
2196 
2197       if (!TargetDecl)
2198         return false;
2199 
2200       if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
2201           TargetDecl->hasAttr<AnyX86InterruptAttr>())
2202         return true;
2203 
2204       if (CodeGenOpts.NoEscapingBlockTailCalls) {
2205         if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2206           if (!BD->doesNotEscape())
2207             return true;
2208       }
2209 
2210       return false;
2211     };
2212     if (shouldDisableTailCalls())
2213       FuncAttrs.addAttribute("disable-tail-calls", "true");
2214 
2215     // CPU/feature overrides.  addDefaultFunctionDefinitionAttributes
2216     // handles these separately to set them based on the global defaults.
2217     GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
2218   }
2219 
2220   // Collect attributes from arguments and return values.
2221   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
2222 
2223   QualType RetTy = FI.getReturnType();
2224   const ABIArgInfo &RetAI = FI.getReturnInfo();
2225   const llvm::DataLayout &DL = getDataLayout();
2226 
2227   // C++ explicitly makes returning undefined values UB. C's rule only applies
2228   // to used values, so we never mark them noundef for now.
2229   bool HasStrictReturn = getLangOpts().CPlusPlus;
2230   if (TargetDecl) {
2231     if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl))
2232       HasStrictReturn &= !FDecl->isExternC();
2233     else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl))
2234       // Function pointer
2235       HasStrictReturn &= !VDecl->isExternC();
2236   }
2237 
2238   // We don't want to be too aggressive with the return checking, unless
2239   // it's explicit in the code opts or we're using an appropriate sanitizer.
2240   // Try to respect what the programmer intended.
2241   HasStrictReturn &= getCodeGenOpts().StrictReturn ||
2242                      !MayDropFunctionReturn(getContext(), RetTy) ||
2243                      getLangOpts().Sanitize.has(SanitizerKind::Memory) ||
2244                      getLangOpts().Sanitize.has(SanitizerKind::Return);
2245 
2246   // Determine if the return type could be partially undef
2247   if (CodeGenOpts.EnableNoundefAttrs && HasStrictReturn) {
2248     if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect &&
2249         DetermineNoUndef(RetTy, getTypes(), DL, RetAI))
2250       RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2251   }
2252 
2253   switch (RetAI.getKind()) {
2254   case ABIArgInfo::Extend:
2255     if (RetAI.isSignExt())
2256       RetAttrs.addAttribute(llvm::Attribute::SExt);
2257     else
2258       RetAttrs.addAttribute(llvm::Attribute::ZExt);
2259     LLVM_FALLTHROUGH;
2260   case ABIArgInfo::Direct:
2261     if (RetAI.getInReg())
2262       RetAttrs.addAttribute(llvm::Attribute::InReg);
2263     break;
2264   case ABIArgInfo::Ignore:
2265     break;
2266 
2267   case ABIArgInfo::InAlloca:
2268   case ABIArgInfo::Indirect: {
2269     // inalloca and sret disable readnone and readonly
2270     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2271       .removeAttribute(llvm::Attribute::ReadNone);
2272     break;
2273   }
2274 
2275   case ABIArgInfo::CoerceAndExpand:
2276     break;
2277 
2278   case ABIArgInfo::Expand:
2279   case ABIArgInfo::IndirectAliased:
2280     llvm_unreachable("Invalid ABI kind for return argument");
2281   }
2282 
2283   if (!IsThunk) {
2284     // FIXME: fix this properly, https://reviews.llvm.org/D100388
2285     if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2286       QualType PTy = RefTy->getPointeeType();
2287       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2288         RetAttrs.addDereferenceableAttr(
2289             getMinimumObjectSize(PTy).getQuantity());
2290       if (getContext().getTargetAddressSpace(PTy) == 0 &&
2291           !CodeGenOpts.NullPointerIsValid)
2292         RetAttrs.addAttribute(llvm::Attribute::NonNull);
2293       if (PTy->isObjectType()) {
2294         llvm::Align Alignment =
2295             getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
2296         RetAttrs.addAlignmentAttr(Alignment);
2297       }
2298     }
2299   }
2300 
2301   bool hasUsedSRet = false;
2302   SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2303 
2304   // Attach attributes to sret.
2305   if (IRFunctionArgs.hasSRetArg()) {
2306     llvm::AttrBuilder SRETAttrs;
2307     SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
2308     hasUsedSRet = true;
2309     if (RetAI.getInReg())
2310       SRETAttrs.addAttribute(llvm::Attribute::InReg);
2311     SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
2312     ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2313         llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2314   }
2315 
2316   // Attach attributes to inalloca argument.
2317   if (IRFunctionArgs.hasInallocaArg()) {
2318     llvm::AttrBuilder Attrs;
2319     Attrs.addInAllocaAttr(FI.getArgStruct());
2320     ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2321         llvm::AttributeSet::get(getLLVMContext(), Attrs);
2322   }
2323 
2324   // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument,
2325   // unless this is a thunk function.
2326   // FIXME: fix this properly, https://reviews.llvm.org/D100388
2327   if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() &&
2328       !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) {
2329     auto IRArgs = IRFunctionArgs.getIRArgs(0);
2330 
2331     assert(IRArgs.second == 1 && "Expected only a single `this` pointer.");
2332 
2333     llvm::AttrBuilder Attrs;
2334 
2335     QualType ThisTy =
2336         FI.arg_begin()->type.castAs<PointerType>()->getPointeeType();
2337 
2338     if (!CodeGenOpts.NullPointerIsValid &&
2339         getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
2340       Attrs.addAttribute(llvm::Attribute::NonNull);
2341       Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity());
2342     } else {
2343       // FIXME dereferenceable should be correct here, regardless of
2344       // NullPointerIsValid. However, dereferenceable currently does not always
2345       // respect NullPointerIsValid and may imply nonnull and break the program.
2346       // See https://reviews.llvm.org/D66618 for discussions.
2347       Attrs.addDereferenceableOrNullAttr(
2348           getMinimumObjectSize(
2349               FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
2350               .getQuantity());
2351     }
2352 
2353     llvm::Align Alignment =
2354         getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr,
2355                                 /*TBAAInfo=*/nullptr, /*forPointeeType=*/true)
2356             .getAsAlign();
2357     Attrs.addAlignmentAttr(Alignment);
2358 
2359     ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs);
2360   }
2361 
2362   unsigned ArgNo = 0;
2363   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2364                                           E = FI.arg_end();
2365        I != E; ++I, ++ArgNo) {
2366     QualType ParamType = I->type;
2367     const ABIArgInfo &AI = I->info;
2368     llvm::AttrBuilder Attrs;
2369 
2370     // Add attribute for padding argument, if necessary.
2371     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2372       if (AI.getPaddingInReg()) {
2373         ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2374             llvm::AttributeSet::get(
2375                 getLLVMContext(),
2376                 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2377       }
2378     }
2379 
2380     // Decide whether the argument we're handling could be partially undef
2381     bool ArgNoUndef = DetermineNoUndef(ParamType, getTypes(), DL, AI);
2382     if (CodeGenOpts.EnableNoundefAttrs && ArgNoUndef)
2383       Attrs.addAttribute(llvm::Attribute::NoUndef);
2384 
2385     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2386     // have the corresponding parameter variable.  It doesn't make
2387     // sense to do it here because parameters are so messed up.
2388     switch (AI.getKind()) {
2389     case ABIArgInfo::Extend:
2390       if (AI.isSignExt())
2391         Attrs.addAttribute(llvm::Attribute::SExt);
2392       else
2393         Attrs.addAttribute(llvm::Attribute::ZExt);
2394       LLVM_FALLTHROUGH;
2395     case ABIArgInfo::Direct:
2396       if (ArgNo == 0 && FI.isChainCall())
2397         Attrs.addAttribute(llvm::Attribute::Nest);
2398       else if (AI.getInReg())
2399         Attrs.addAttribute(llvm::Attribute::InReg);
2400       Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign()));
2401       break;
2402 
2403     case ABIArgInfo::Indirect: {
2404       if (AI.getInReg())
2405         Attrs.addAttribute(llvm::Attribute::InReg);
2406 
2407       if (AI.getIndirectByVal())
2408         Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2409 
2410       auto *Decl = ParamType->getAsRecordDecl();
2411       if (CodeGenOpts.PassByValueIsNoAlias && Decl &&
2412           Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs)
2413         // When calling the function, the pointer passed in will be the only
2414         // reference to the underlying object. Mark it accordingly.
2415         Attrs.addAttribute(llvm::Attribute::NoAlias);
2416 
2417       // TODO: We could add the byref attribute if not byval, but it would
2418       // require updating many testcases.
2419 
2420       CharUnits Align = AI.getIndirectAlign();
2421 
2422       // In a byval argument, it is important that the required
2423       // alignment of the type is honored, as LLVM might be creating a
2424       // *new* stack object, and needs to know what alignment to give
2425       // it. (Sometimes it can deduce a sensible alignment on its own,
2426       // but not if clang decides it must emit a packed struct, or the
2427       // user specifies increased alignment requirements.)
2428       //
2429       // This is different from indirect *not* byval, where the object
2430       // exists already, and the align attribute is purely
2431       // informative.
2432       assert(!Align.isZero());
2433 
2434       // For now, only add this when we have a byval argument.
2435       // TODO: be less lazy about updating test cases.
2436       if (AI.getIndirectByVal())
2437         Attrs.addAlignmentAttr(Align.getQuantity());
2438 
2439       // byval disables readnone and readonly.
2440       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2441         .removeAttribute(llvm::Attribute::ReadNone);
2442 
2443       break;
2444     }
2445     case ABIArgInfo::IndirectAliased: {
2446       CharUnits Align = AI.getIndirectAlign();
2447       Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType));
2448       Attrs.addAlignmentAttr(Align.getQuantity());
2449       break;
2450     }
2451     case ABIArgInfo::Ignore:
2452     case ABIArgInfo::Expand:
2453     case ABIArgInfo::CoerceAndExpand:
2454       break;
2455 
2456     case ABIArgInfo::InAlloca:
2457       // inalloca disables readnone and readonly.
2458       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2459           .removeAttribute(llvm::Attribute::ReadNone);
2460       continue;
2461     }
2462 
2463     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2464       QualType PTy = RefTy->getPointeeType();
2465       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2466         Attrs.addDereferenceableAttr(
2467             getMinimumObjectSize(PTy).getQuantity());
2468       if (getContext().getTargetAddressSpace(PTy) == 0 &&
2469           !CodeGenOpts.NullPointerIsValid)
2470         Attrs.addAttribute(llvm::Attribute::NonNull);
2471       if (PTy->isObjectType()) {
2472         llvm::Align Alignment =
2473             getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
2474         Attrs.addAlignmentAttr(Alignment);
2475       }
2476     }
2477 
2478     switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2479     case ParameterABI::Ordinary:
2480       break;
2481 
2482     case ParameterABI::SwiftIndirectResult: {
2483       // Add 'sret' if we haven't already used it for something, but
2484       // only if the result is void.
2485       if (!hasUsedSRet && RetTy->isVoidType()) {
2486         Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType));
2487         hasUsedSRet = true;
2488       }
2489 
2490       // Add 'noalias' in either case.
2491       Attrs.addAttribute(llvm::Attribute::NoAlias);
2492 
2493       // Add 'dereferenceable' and 'alignment'.
2494       auto PTy = ParamType->getPointeeType();
2495       if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2496         auto info = getContext().getTypeInfoInChars(PTy);
2497         Attrs.addDereferenceableAttr(info.Width.getQuantity());
2498         Attrs.addAlignmentAttr(info.Align.getAsAlign());
2499       }
2500       break;
2501     }
2502 
2503     case ParameterABI::SwiftErrorResult:
2504       Attrs.addAttribute(llvm::Attribute::SwiftError);
2505       break;
2506 
2507     case ParameterABI::SwiftContext:
2508       Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2509       break;
2510 
2511     case ParameterABI::SwiftAsyncContext:
2512       Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2513       break;
2514     }
2515 
2516     if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2517       Attrs.addAttribute(llvm::Attribute::NoCapture);
2518 
2519     if (Attrs.hasAttributes()) {
2520       unsigned FirstIRArg, NumIRArgs;
2521       std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2522       for (unsigned i = 0; i < NumIRArgs; i++)
2523         ArgAttrs[FirstIRArg + i] =
2524             llvm::AttributeSet::get(getLLVMContext(), Attrs);
2525     }
2526   }
2527   assert(ArgNo == FI.arg_size());
2528 
2529   AttrList = llvm::AttributeList::get(
2530       getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2531       llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2532 }
2533 
2534 /// An argument came in as a promoted argument; demote it back to its
2535 /// declared type.
2536 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2537                                          const VarDecl *var,
2538                                          llvm::Value *value) {
2539   llvm::Type *varType = CGF.ConvertType(var->getType());
2540 
2541   // This can happen with promotions that actually don't change the
2542   // underlying type, like the enum promotions.
2543   if (value->getType() == varType) return value;
2544 
2545   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2546          && "unexpected promotion type");
2547 
2548   if (isa<llvm::IntegerType>(varType))
2549     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2550 
2551   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2552 }
2553 
2554 /// Returns the attribute (either parameter attribute, or function
2555 /// attribute), which declares argument ArgNo to be non-null.
2556 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2557                                          QualType ArgType, unsigned ArgNo) {
2558   // FIXME: __attribute__((nonnull)) can also be applied to:
2559   //   - references to pointers, where the pointee is known to be
2560   //     nonnull (apparently a Clang extension)
2561   //   - transparent unions containing pointers
2562   // In the former case, LLVM IR cannot represent the constraint. In
2563   // the latter case, we have no guarantee that the transparent union
2564   // is in fact passed as a pointer.
2565   if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2566     return nullptr;
2567   // First, check attribute on parameter itself.
2568   if (PVD) {
2569     if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2570       return ParmNNAttr;
2571   }
2572   // Check function attributes.
2573   if (!FD)
2574     return nullptr;
2575   for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2576     if (NNAttr->isNonNull(ArgNo))
2577       return NNAttr;
2578   }
2579   return nullptr;
2580 }
2581 
2582 namespace {
2583   struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2584     Address Temp;
2585     Address Arg;
2586     CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2587     void Emit(CodeGenFunction &CGF, Flags flags) override {
2588       llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2589       CGF.Builder.CreateStore(errorValue, Arg);
2590     }
2591   };
2592 }
2593 
2594 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2595                                          llvm::Function *Fn,
2596                                          const FunctionArgList &Args) {
2597   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2598     // Naked functions don't have prologues.
2599     return;
2600 
2601   // If this is an implicit-return-zero function, go ahead and
2602   // initialize the return value.  TODO: it might be nice to have
2603   // a more general mechanism for this that didn't require synthesized
2604   // return statements.
2605   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2606     if (FD->hasImplicitReturnZero()) {
2607       QualType RetTy = FD->getReturnType().getUnqualifiedType();
2608       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2609       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2610       Builder.CreateStore(Zero, ReturnValue);
2611     }
2612   }
2613 
2614   // FIXME: We no longer need the types from FunctionArgList; lift up and
2615   // simplify.
2616 
2617   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2618   assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2619 
2620   // If we're using inalloca, all the memory arguments are GEPs off of the last
2621   // parameter, which is a pointer to the complete memory area.
2622   Address ArgStruct = Address::invalid();
2623   if (IRFunctionArgs.hasInallocaArg()) {
2624     ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2625                         FI.getArgStructAlignment());
2626 
2627     assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2628   }
2629 
2630   // Name the struct return parameter.
2631   if (IRFunctionArgs.hasSRetArg()) {
2632     auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2633     AI->setName("agg.result");
2634     AI->addAttr(llvm::Attribute::NoAlias);
2635   }
2636 
2637   // Track if we received the parameter as a pointer (indirect, byval, or
2638   // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
2639   // into a local alloca for us.
2640   SmallVector<ParamValue, 16> ArgVals;
2641   ArgVals.reserve(Args.size());
2642 
2643   // Create a pointer value for every parameter declaration.  This usually
2644   // entails copying one or more LLVM IR arguments into an alloca.  Don't push
2645   // any cleanups or do anything that might unwind.  We do that separately, so
2646   // we can push the cleanups in the correct order for the ABI.
2647   assert(FI.arg_size() == Args.size() &&
2648          "Mismatch between function signature & arguments.");
2649   unsigned ArgNo = 0;
2650   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2651   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2652        i != e; ++i, ++info_it, ++ArgNo) {
2653     const VarDecl *Arg = *i;
2654     const ABIArgInfo &ArgI = info_it->info;
2655 
2656     bool isPromoted =
2657       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2658     // We are converting from ABIArgInfo type to VarDecl type directly, unless
2659     // the parameter is promoted. In this case we convert to
2660     // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2661     QualType Ty = isPromoted ? info_it->type : Arg->getType();
2662     assert(hasScalarEvaluationKind(Ty) ==
2663            hasScalarEvaluationKind(Arg->getType()));
2664 
2665     unsigned FirstIRArg, NumIRArgs;
2666     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2667 
2668     switch (ArgI.getKind()) {
2669     case ABIArgInfo::InAlloca: {
2670       assert(NumIRArgs == 0);
2671       auto FieldIndex = ArgI.getInAllocaFieldIndex();
2672       Address V =
2673           Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2674       if (ArgI.getInAllocaIndirect())
2675         V = Address(Builder.CreateLoad(V),
2676                     getContext().getTypeAlignInChars(Ty));
2677       ArgVals.push_back(ParamValue::forIndirect(V));
2678       break;
2679     }
2680 
2681     case ABIArgInfo::Indirect:
2682     case ABIArgInfo::IndirectAliased: {
2683       assert(NumIRArgs == 1);
2684       Address ParamAddr =
2685           Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign());
2686 
2687       if (!hasScalarEvaluationKind(Ty)) {
2688         // Aggregates and complex variables are accessed by reference. All we
2689         // need to do is realign the value, if requested. Also, if the address
2690         // may be aliased, copy it to ensure that the parameter variable is
2691         // mutable and has a unique adress, as C requires.
2692         Address V = ParamAddr;
2693         if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) {
2694           Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2695 
2696           // Copy from the incoming argument pointer to the temporary with the
2697           // appropriate alignment.
2698           //
2699           // FIXME: We should have a common utility for generating an aggregate
2700           // copy.
2701           CharUnits Size = getContext().getTypeSizeInChars(Ty);
2702           Builder.CreateMemCpy(
2703               AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
2704               ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
2705               llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
2706           V = AlignedTemp;
2707         }
2708         ArgVals.push_back(ParamValue::forIndirect(V));
2709       } else {
2710         // Load scalar value from indirect argument.
2711         llvm::Value *V =
2712             EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2713 
2714         if (isPromoted)
2715           V = emitArgumentDemotion(*this, Arg, V);
2716         ArgVals.push_back(ParamValue::forDirect(V));
2717       }
2718       break;
2719     }
2720 
2721     case ABIArgInfo::Extend:
2722     case ABIArgInfo::Direct: {
2723       auto AI = Fn->getArg(FirstIRArg);
2724       llvm::Type *LTy = ConvertType(Arg->getType());
2725 
2726       // Prepare parameter attributes. So far, only attributes for pointer
2727       // parameters are prepared. See
2728       // http://llvm.org/docs/LangRef.html#paramattrs.
2729       if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
2730           ArgI.getCoerceToType()->isPointerTy()) {
2731         assert(NumIRArgs == 1);
2732 
2733         if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2734           // Set `nonnull` attribute if any.
2735           if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2736                              PVD->getFunctionScopeIndex()) &&
2737               !CGM.getCodeGenOpts().NullPointerIsValid)
2738             AI->addAttr(llvm::Attribute::NonNull);
2739 
2740           QualType OTy = PVD->getOriginalType();
2741           if (const auto *ArrTy =
2742               getContext().getAsConstantArrayType(OTy)) {
2743             // A C99 array parameter declaration with the static keyword also
2744             // indicates dereferenceability, and if the size is constant we can
2745             // use the dereferenceable attribute (which requires the size in
2746             // bytes).
2747             if (ArrTy->getSizeModifier() == ArrayType::Static) {
2748               QualType ETy = ArrTy->getElementType();
2749               llvm::Align Alignment =
2750                   CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2751               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2752               uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2753               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2754                   ArrSize) {
2755                 llvm::AttrBuilder Attrs;
2756                 Attrs.addDereferenceableAttr(
2757                     getContext().getTypeSizeInChars(ETy).getQuantity() *
2758                     ArrSize);
2759                 AI->addAttrs(Attrs);
2760               } else if (getContext().getTargetInfo().getNullPointerValue(
2761                              ETy.getAddressSpace()) == 0 &&
2762                          !CGM.getCodeGenOpts().NullPointerIsValid) {
2763                 AI->addAttr(llvm::Attribute::NonNull);
2764               }
2765             }
2766           } else if (const auto *ArrTy =
2767                      getContext().getAsVariableArrayType(OTy)) {
2768             // For C99 VLAs with the static keyword, we don't know the size so
2769             // we can't use the dereferenceable attribute, but in addrspace(0)
2770             // we know that it must be nonnull.
2771             if (ArrTy->getSizeModifier() == VariableArrayType::Static) {
2772               QualType ETy = ArrTy->getElementType();
2773               llvm::Align Alignment =
2774                   CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2775               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2776               if (!getContext().getTargetAddressSpace(ETy) &&
2777                   !CGM.getCodeGenOpts().NullPointerIsValid)
2778                 AI->addAttr(llvm::Attribute::NonNull);
2779             }
2780           }
2781 
2782           // Set `align` attribute if any.
2783           const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2784           if (!AVAttr)
2785             if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2786               AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2787           if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2788             // If alignment-assumption sanitizer is enabled, we do *not* add
2789             // alignment attribute here, but emit normal alignment assumption,
2790             // so the UBSAN check could function.
2791             llvm::ConstantInt *AlignmentCI =
2792                 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
2793             unsigned AlignmentInt =
2794                 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
2795             if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
2796               AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
2797               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(
2798                   llvm::Align(AlignmentInt)));
2799             }
2800           }
2801         }
2802 
2803         // Set 'noalias' if an argument type has the `restrict` qualifier.
2804         if (Arg->getType().isRestrictQualified())
2805           AI->addAttr(llvm::Attribute::NoAlias);
2806       }
2807 
2808       // Prepare the argument value. If we have the trivial case, handle it
2809       // with no muss and fuss.
2810       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2811           ArgI.getCoerceToType() == ConvertType(Ty) &&
2812           ArgI.getDirectOffset() == 0) {
2813         assert(NumIRArgs == 1);
2814 
2815         // LLVM expects swifterror parameters to be used in very restricted
2816         // ways.  Copy the value into a less-restricted temporary.
2817         llvm::Value *V = AI;
2818         if (FI.getExtParameterInfo(ArgNo).getABI()
2819               == ParameterABI::SwiftErrorResult) {
2820           QualType pointeeTy = Ty->getPointeeType();
2821           assert(pointeeTy->isPointerType());
2822           Address temp =
2823             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2824           Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2825           llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2826           Builder.CreateStore(incomingErrorValue, temp);
2827           V = temp.getPointer();
2828 
2829           // Push a cleanup to copy the value back at the end of the function.
2830           // The convention does not guarantee that the value will be written
2831           // back if the function exits with an unwind exception.
2832           EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2833         }
2834 
2835         // Ensure the argument is the correct type.
2836         if (V->getType() != ArgI.getCoerceToType())
2837           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2838 
2839         if (isPromoted)
2840           V = emitArgumentDemotion(*this, Arg, V);
2841 
2842         // Because of merging of function types from multiple decls it is
2843         // possible for the type of an argument to not match the corresponding
2844         // type in the function type. Since we are codegening the callee
2845         // in here, add a cast to the argument type.
2846         llvm::Type *LTy = ConvertType(Arg->getType());
2847         if (V->getType() != LTy)
2848           V = Builder.CreateBitCast(V, LTy);
2849 
2850         ArgVals.push_back(ParamValue::forDirect(V));
2851         break;
2852       }
2853 
2854       // VLST arguments are coerced to VLATs at the function boundary for
2855       // ABI consistency. If this is a VLST that was coerced to
2856       // a VLAT at the function boundary and the types match up, use
2857       // llvm.experimental.vector.extract to convert back to the original
2858       // VLST.
2859       if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) {
2860         auto *Coerced = Fn->getArg(FirstIRArg);
2861         if (auto *VecTyFrom =
2862                 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
2863           if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
2864             llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2865 
2866             assert(NumIRArgs == 1);
2867             Coerced->setName(Arg->getName() + ".coerce");
2868             ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector(
2869                 VecTyTo, Coerced, Zero, "castFixedSve")));
2870             break;
2871           }
2872         }
2873       }
2874 
2875       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2876                                      Arg->getName());
2877 
2878       // Pointer to store into.
2879       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2880 
2881       // Fast-isel and the optimizer generally like scalar values better than
2882       // FCAs, so we flatten them if this is safe to do for this argument.
2883       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2884       if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2885           STy->getNumElements() > 1) {
2886         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2887         llvm::Type *DstTy = Ptr.getElementType();
2888         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2889 
2890         Address AddrToStoreInto = Address::invalid();
2891         if (SrcSize <= DstSize) {
2892           AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2893         } else {
2894           AddrToStoreInto =
2895             CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2896         }
2897 
2898         assert(STy->getNumElements() == NumIRArgs);
2899         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2900           auto AI = Fn->getArg(FirstIRArg + i);
2901           AI->setName(Arg->getName() + ".coerce" + Twine(i));
2902           Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2903           Builder.CreateStore(AI, EltPtr);
2904         }
2905 
2906         if (SrcSize > DstSize) {
2907           Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2908         }
2909 
2910       } else {
2911         // Simple case, just do a coerced store of the argument into the alloca.
2912         assert(NumIRArgs == 1);
2913         auto AI = Fn->getArg(FirstIRArg);
2914         AI->setName(Arg->getName() + ".coerce");
2915         CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
2916       }
2917 
2918       // Match to what EmitParmDecl is expecting for this type.
2919       if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2920         llvm::Value *V =
2921             EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2922         if (isPromoted)
2923           V = emitArgumentDemotion(*this, Arg, V);
2924         ArgVals.push_back(ParamValue::forDirect(V));
2925       } else {
2926         ArgVals.push_back(ParamValue::forIndirect(Alloca));
2927       }
2928       break;
2929     }
2930 
2931     case ABIArgInfo::CoerceAndExpand: {
2932       // Reconstruct into a temporary.
2933       Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2934       ArgVals.push_back(ParamValue::forIndirect(alloca));
2935 
2936       auto coercionType = ArgI.getCoerceAndExpandType();
2937       alloca = Builder.CreateElementBitCast(alloca, coercionType);
2938 
2939       unsigned argIndex = FirstIRArg;
2940       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2941         llvm::Type *eltType = coercionType->getElementType(i);
2942         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2943           continue;
2944 
2945         auto eltAddr = Builder.CreateStructGEP(alloca, i);
2946         auto elt = Fn->getArg(argIndex++);
2947         Builder.CreateStore(elt, eltAddr);
2948       }
2949       assert(argIndex == FirstIRArg + NumIRArgs);
2950       break;
2951     }
2952 
2953     case ABIArgInfo::Expand: {
2954       // If this structure was expanded into multiple arguments then
2955       // we need to create a temporary and reconstruct it from the
2956       // arguments.
2957       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2958       LValue LV = MakeAddrLValue(Alloca, Ty);
2959       ArgVals.push_back(ParamValue::forIndirect(Alloca));
2960 
2961       auto FnArgIter = Fn->arg_begin() + FirstIRArg;
2962       ExpandTypeFromArgs(Ty, LV, FnArgIter);
2963       assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
2964       for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2965         auto AI = Fn->getArg(FirstIRArg + i);
2966         AI->setName(Arg->getName() + "." + Twine(i));
2967       }
2968       break;
2969     }
2970 
2971     case ABIArgInfo::Ignore:
2972       assert(NumIRArgs == 0);
2973       // Initialize the local variable appropriately.
2974       if (!hasScalarEvaluationKind(Ty)) {
2975         ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2976       } else {
2977         llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2978         ArgVals.push_back(ParamValue::forDirect(U));
2979       }
2980       break;
2981     }
2982   }
2983 
2984   if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2985     for (int I = Args.size() - 1; I >= 0; --I)
2986       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2987   } else {
2988     for (unsigned I = 0, E = Args.size(); I != E; ++I)
2989       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2990   }
2991 }
2992 
2993 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2994   while (insn->use_empty()) {
2995     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2996     if (!bitcast) return;
2997 
2998     // This is "safe" because we would have used a ConstantExpr otherwise.
2999     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3000     bitcast->eraseFromParent();
3001   }
3002 }
3003 
3004 /// Try to emit a fused autorelease of a return result.
3005 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
3006                                                     llvm::Value *result) {
3007   // We must be immediately followed the cast.
3008   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
3009   if (BB->empty()) return nullptr;
3010   if (&BB->back() != result) return nullptr;
3011 
3012   llvm::Type *resultType = result->getType();
3013 
3014   // result is in a BasicBlock and is therefore an Instruction.
3015   llvm::Instruction *generator = cast<llvm::Instruction>(result);
3016 
3017   SmallVector<llvm::Instruction *, 4> InstsToKill;
3018 
3019   // Look for:
3020   //  %generator = bitcast %type1* %generator2 to %type2*
3021   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3022     // We would have emitted this as a constant if the operand weren't
3023     // an Instruction.
3024     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3025 
3026     // Require the generator to be immediately followed by the cast.
3027     if (generator->getNextNode() != bitcast)
3028       return nullptr;
3029 
3030     InstsToKill.push_back(bitcast);
3031   }
3032 
3033   // Look for:
3034   //   %generator = call i8* @objc_retain(i8* %originalResult)
3035   // or
3036   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
3037   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3038   if (!call) return nullptr;
3039 
3040   bool doRetainAutorelease;
3041 
3042   if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
3043     doRetainAutorelease = true;
3044   } else if (call->getCalledOperand() ==
3045              CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) {
3046     doRetainAutorelease = false;
3047 
3048     // If we emitted an assembly marker for this call (and the
3049     // ARCEntrypoints field should have been set if so), go looking
3050     // for that call.  If we can't find it, we can't do this
3051     // optimization.  But it should always be the immediately previous
3052     // instruction, unless we needed bitcasts around the call.
3053     if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
3054       llvm::Instruction *prev = call->getPrevNode();
3055       assert(prev);
3056       if (isa<llvm::BitCastInst>(prev)) {
3057         prev = prev->getPrevNode();
3058         assert(prev);
3059       }
3060       assert(isa<llvm::CallInst>(prev));
3061       assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3062              CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
3063       InstsToKill.push_back(prev);
3064     }
3065   } else {
3066     return nullptr;
3067   }
3068 
3069   result = call->getArgOperand(0);
3070   InstsToKill.push_back(call);
3071 
3072   // Keep killing bitcasts, for sanity.  Note that we no longer care
3073   // about precise ordering as long as there's exactly one use.
3074   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3075     if (!bitcast->hasOneUse()) break;
3076     InstsToKill.push_back(bitcast);
3077     result = bitcast->getOperand(0);
3078   }
3079 
3080   // Delete all the unnecessary instructions, from latest to earliest.
3081   for (auto *I : InstsToKill)
3082     I->eraseFromParent();
3083 
3084   // Do the fused retain/autorelease if we were asked to.
3085   if (doRetainAutorelease)
3086     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
3087 
3088   // Cast back to the result type.
3089   return CGF.Builder.CreateBitCast(result, resultType);
3090 }
3091 
3092 /// If this is a +1 of the value of an immutable 'self', remove it.
3093 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
3094                                           llvm::Value *result) {
3095   // This is only applicable to a method with an immutable 'self'.
3096   const ObjCMethodDecl *method =
3097     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
3098   if (!method) return nullptr;
3099   const VarDecl *self = method->getSelfDecl();
3100   if (!self->getType().isConstQualified()) return nullptr;
3101 
3102   // Look for a retain call.
3103   llvm::CallInst *retainCall =
3104     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
3105   if (!retainCall || retainCall->getCalledOperand() !=
3106                          CGF.CGM.getObjCEntrypoints().objc_retain)
3107     return nullptr;
3108 
3109   // Look for an ordinary load of 'self'.
3110   llvm::Value *retainedValue = retainCall->getArgOperand(0);
3111   llvm::LoadInst *load =
3112     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3113   if (!load || load->isAtomic() || load->isVolatile() ||
3114       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
3115     return nullptr;
3116 
3117   // Okay!  Burn it all down.  This relies for correctness on the
3118   // assumption that the retain is emitted as part of the return and
3119   // that thereafter everything is used "linearly".
3120   llvm::Type *resultType = result->getType();
3121   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
3122   assert(retainCall->use_empty());
3123   retainCall->eraseFromParent();
3124   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
3125 
3126   return CGF.Builder.CreateBitCast(load, resultType);
3127 }
3128 
3129 /// Emit an ARC autorelease of the result of a function.
3130 ///
3131 /// \return the value to actually return from the function
3132 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
3133                                             llvm::Value *result) {
3134   // If we're returning 'self', kill the initial retain.  This is a
3135   // heuristic attempt to "encourage correctness" in the really unfortunate
3136   // case where we have a return of self during a dealloc and we desperately
3137   // need to avoid the possible autorelease.
3138   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
3139     return self;
3140 
3141   // At -O0, try to emit a fused retain/autorelease.
3142   if (CGF.shouldUseFusedARCCalls())
3143     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
3144       return fused;
3145 
3146   return CGF.EmitARCAutoreleaseReturnValue(result);
3147 }
3148 
3149 /// Heuristically search for a dominating store to the return-value slot.
3150 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
3151   // Check if a User is a store which pointerOperand is the ReturnValue.
3152   // We are looking for stores to the ReturnValue, not for stores of the
3153   // ReturnValue to some other location.
3154   auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
3155     auto *SI = dyn_cast<llvm::StoreInst>(U);
3156     if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
3157       return nullptr;
3158     // These aren't actually possible for non-coerced returns, and we
3159     // only care about non-coerced returns on this code path.
3160     assert(!SI->isAtomic() && !SI->isVolatile());
3161     return SI;
3162   };
3163   // If there are multiple uses of the return-value slot, just check
3164   // for something immediately preceding the IP.  Sometimes this can
3165   // happen with how we generate implicit-returns; it can also happen
3166   // with noreturn cleanups.
3167   if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
3168     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3169     if (IP->empty()) return nullptr;
3170     llvm::Instruction *I = &IP->back();
3171 
3172     // Skip lifetime markers
3173     for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
3174                                             IE = IP->rend();
3175          II != IE; ++II) {
3176       if (llvm::IntrinsicInst *Intrinsic =
3177               dyn_cast<llvm::IntrinsicInst>(&*II)) {
3178         if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
3179           const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
3180           ++II;
3181           if (II == IE)
3182             break;
3183           if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
3184             continue;
3185         }
3186       }
3187       I = &*II;
3188       break;
3189     }
3190 
3191     return GetStoreIfValid(I);
3192   }
3193 
3194   llvm::StoreInst *store =
3195       GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
3196   if (!store) return nullptr;
3197 
3198   // Now do a first-and-dirty dominance check: just walk up the
3199   // single-predecessors chain from the current insertion point.
3200   llvm::BasicBlock *StoreBB = store->getParent();
3201   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3202   while (IP != StoreBB) {
3203     if (!(IP = IP->getSinglePredecessor()))
3204       return nullptr;
3205   }
3206 
3207   // Okay, the store's basic block dominates the insertion point; we
3208   // can do our thing.
3209   return store;
3210 }
3211 
3212 // Helper functions for EmitCMSEClearRecord
3213 
3214 // Set the bits corresponding to a field having width `BitWidth` and located at
3215 // offset `BitOffset` (from the least significant bit) within a storage unit of
3216 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte.
3217 // Use little-endian layout, i.e.`Bits[0]` is the LSB.
3218 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset,
3219                         int BitWidth, int CharWidth) {
3220   assert(CharWidth <= 64);
3221   assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3222 
3223   int Pos = 0;
3224   if (BitOffset >= CharWidth) {
3225     Pos += BitOffset / CharWidth;
3226     BitOffset = BitOffset % CharWidth;
3227   }
3228 
3229   const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
3230   if (BitOffset + BitWidth >= CharWidth) {
3231     Bits[Pos++] |= (Used << BitOffset) & Used;
3232     BitWidth -= CharWidth - BitOffset;
3233     BitOffset = 0;
3234   }
3235 
3236   while (BitWidth >= CharWidth) {
3237     Bits[Pos++] = Used;
3238     BitWidth -= CharWidth;
3239   }
3240 
3241   if (BitWidth > 0)
3242     Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
3243 }
3244 
3245 // Set the bits corresponding to a field having width `BitWidth` and located at
3246 // offset `BitOffset` (from the least significant bit) within a storage unit of
3247 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of
3248 // `Bits` corresponds to one target byte. Use target endian layout.
3249 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset,
3250                         int StorageSize, int BitOffset, int BitWidth,
3251                         int CharWidth, bool BigEndian) {
3252 
3253   SmallVector<uint64_t, 8> TmpBits(StorageSize);
3254   setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3255 
3256   if (BigEndian)
3257     std::reverse(TmpBits.begin(), TmpBits.end());
3258 
3259   for (uint64_t V : TmpBits)
3260     Bits[StorageOffset++] |= V;
3261 }
3262 
3263 static void setUsedBits(CodeGenModule &, QualType, int,
3264                         SmallVectorImpl<uint64_t> &);
3265 
3266 // Set the bits in `Bits`, which correspond to the value representations of
3267 // the actual members of the record type `RTy`. Note that this function does
3268 // not handle base classes, virtual tables, etc, since they cannot happen in
3269 // CMSE function arguments or return. The bit mask corresponds to the target
3270 // memory layout, i.e. it's endian dependent.
3271 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
3272                         SmallVectorImpl<uint64_t> &Bits) {
3273   ASTContext &Context = CGM.getContext();
3274   int CharWidth = Context.getCharWidth();
3275   const RecordDecl *RD = RTy->getDecl()->getDefinition();
3276   const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
3277   const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
3278 
3279   int Idx = 0;
3280   for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
3281     const FieldDecl *F = *I;
3282 
3283     if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
3284         F->getType()->isIncompleteArrayType())
3285       continue;
3286 
3287     if (F->isBitField()) {
3288       const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
3289       setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
3290                   BFI.StorageSize / CharWidth, BFI.Offset,
3291                   BFI.Size, CharWidth,
3292                   CGM.getDataLayout().isBigEndian());
3293       continue;
3294     }
3295 
3296     setUsedBits(CGM, F->getType(),
3297                 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
3298   }
3299 }
3300 
3301 // Set the bits in `Bits`, which correspond to the value representations of
3302 // the elements of an array type `ATy`.
3303 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
3304                         int Offset, SmallVectorImpl<uint64_t> &Bits) {
3305   const ASTContext &Context = CGM.getContext();
3306 
3307   QualType ETy = Context.getBaseElementType(ATy);
3308   int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3309   SmallVector<uint64_t, 4> TmpBits(Size);
3310   setUsedBits(CGM, ETy, 0, TmpBits);
3311 
3312   for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3313     auto Src = TmpBits.begin();
3314     auto Dst = Bits.begin() + Offset + I * Size;
3315     for (int J = 0; J < Size; ++J)
3316       *Dst++ |= *Src++;
3317   }
3318 }
3319 
3320 // Set the bits in `Bits`, which correspond to the value representations of
3321 // the type `QTy`.
3322 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
3323                         SmallVectorImpl<uint64_t> &Bits) {
3324   if (const auto *RTy = QTy->getAs<RecordType>())
3325     return setUsedBits(CGM, RTy, Offset, Bits);
3326 
3327   ASTContext &Context = CGM.getContext();
3328   if (const auto *ATy = Context.getAsConstantArrayType(QTy))
3329     return setUsedBits(CGM, ATy, Offset, Bits);
3330 
3331   int Size = Context.getTypeSizeInChars(QTy).getQuantity();
3332   if (Size <= 0)
3333     return;
3334 
3335   std::fill_n(Bits.begin() + Offset, Size,
3336               (uint64_t(1) << Context.getCharWidth()) - 1);
3337 }
3338 
3339 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits,
3340                                    int Pos, int Size, int CharWidth,
3341                                    bool BigEndian) {
3342   assert(Size > 0);
3343   uint64_t Mask = 0;
3344   if (BigEndian) {
3345     for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
3346          ++P)
3347       Mask = (Mask << CharWidth) | *P;
3348   } else {
3349     auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3350     do
3351       Mask = (Mask << CharWidth) | *--P;
3352     while (P != End);
3353   }
3354   return Mask;
3355 }
3356 
3357 // Emit code to clear the bits in a record, which aren't a part of any user
3358 // declared member, when the record is a function return.
3359 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3360                                                   llvm::IntegerType *ITy,
3361                                                   QualType QTy) {
3362   assert(Src->getType() == ITy);
3363   assert(ITy->getScalarSizeInBits() <= 64);
3364 
3365   const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3366   int Size = DataLayout.getTypeStoreSize(ITy);
3367   SmallVector<uint64_t, 4> Bits(Size);
3368   setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3369 
3370   int CharWidth = CGM.getContext().getCharWidth();
3371   uint64_t Mask =
3372       buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
3373 
3374   return Builder.CreateAnd(Src, Mask, "cmse.clear");
3375 }
3376 
3377 // Emit code to clear the bits in a record, which aren't a part of any user
3378 // declared member, when the record is a function argument.
3379 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3380                                                   llvm::ArrayType *ATy,
3381                                                   QualType QTy) {
3382   const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3383   int Size = DataLayout.getTypeStoreSize(ATy);
3384   SmallVector<uint64_t, 16> Bits(Size);
3385   setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3386 
3387   // Clear each element of the LLVM array.
3388   int CharWidth = CGM.getContext().getCharWidth();
3389   int CharsPerElt =
3390       ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3391   int MaskIndex = 0;
3392   llvm::Value *R = llvm::UndefValue::get(ATy);
3393   for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3394     uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
3395                                        DataLayout.isBigEndian());
3396     MaskIndex += CharsPerElt;
3397     llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
3398     llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
3399     R = Builder.CreateInsertValue(R, T1, I);
3400   }
3401 
3402   return R;
3403 }
3404 
3405 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
3406                                          bool EmitRetDbgLoc,
3407                                          SourceLocation EndLoc) {
3408   if (FI.isNoReturn()) {
3409     // Noreturn functions don't return.
3410     EmitUnreachable(EndLoc);
3411     return;
3412   }
3413 
3414   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
3415     // Naked functions don't have epilogues.
3416     Builder.CreateUnreachable();
3417     return;
3418   }
3419 
3420   // Functions with no result always return void.
3421   if (!ReturnValue.isValid()) {
3422     Builder.CreateRetVoid();
3423     return;
3424   }
3425 
3426   llvm::DebugLoc RetDbgLoc;
3427   llvm::Value *RV = nullptr;
3428   QualType RetTy = FI.getReturnType();
3429   const ABIArgInfo &RetAI = FI.getReturnInfo();
3430 
3431   switch (RetAI.getKind()) {
3432   case ABIArgInfo::InAlloca:
3433     // Aggregrates get evaluated directly into the destination.  Sometimes we
3434     // need to return the sret value in a register, though.
3435     assert(hasAggregateEvaluationKind(RetTy));
3436     if (RetAI.getInAllocaSRet()) {
3437       llvm::Function::arg_iterator EI = CurFn->arg_end();
3438       --EI;
3439       llvm::Value *ArgStruct = &*EI;
3440       llvm::Value *SRet = Builder.CreateStructGEP(
3441           EI->getType()->getPointerElementType(), ArgStruct,
3442           RetAI.getInAllocaFieldIndex());
3443       llvm::Type *Ty =
3444           cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3445       RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret");
3446     }
3447     break;
3448 
3449   case ABIArgInfo::Indirect: {
3450     auto AI = CurFn->arg_begin();
3451     if (RetAI.isSRetAfterThis())
3452       ++AI;
3453     switch (getEvaluationKind(RetTy)) {
3454     case TEK_Complex: {
3455       ComplexPairTy RT =
3456         EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
3457       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
3458                          /*isInit*/ true);
3459       break;
3460     }
3461     case TEK_Aggregate:
3462       // Do nothing; aggregrates get evaluated directly into the destination.
3463       break;
3464     case TEK_Scalar:
3465       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
3466                         MakeNaturalAlignAddrLValue(&*AI, RetTy),
3467                         /*isInit*/ true);
3468       break;
3469     }
3470     break;
3471   }
3472 
3473   case ABIArgInfo::Extend:
3474   case ABIArgInfo::Direct:
3475     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
3476         RetAI.getDirectOffset() == 0) {
3477       // The internal return value temp always will have pointer-to-return-type
3478       // type, just do a load.
3479 
3480       // If there is a dominating store to ReturnValue, we can elide
3481       // the load, zap the store, and usually zap the alloca.
3482       if (llvm::StoreInst *SI =
3483               findDominatingStoreToReturnValue(*this)) {
3484         // Reuse the debug location from the store unless there is
3485         // cleanup code to be emitted between the store and return
3486         // instruction.
3487         if (EmitRetDbgLoc && !AutoreleaseResult)
3488           RetDbgLoc = SI->getDebugLoc();
3489         // Get the stored value and nuke the now-dead store.
3490         RV = SI->getValueOperand();
3491         SI->eraseFromParent();
3492 
3493       // Otherwise, we have to do a simple load.
3494       } else {
3495         RV = Builder.CreateLoad(ReturnValue);
3496       }
3497     } else {
3498       // If the value is offset in memory, apply the offset now.
3499       Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
3500 
3501       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
3502     }
3503 
3504     // In ARC, end functions that return a retainable type with a call
3505     // to objc_autoreleaseReturnValue.
3506     if (AutoreleaseResult) {
3507 #ifndef NDEBUG
3508       // Type::isObjCRetainabletype has to be called on a QualType that hasn't
3509       // been stripped of the typedefs, so we cannot use RetTy here. Get the
3510       // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
3511       // CurCodeDecl or BlockInfo.
3512       QualType RT;
3513 
3514       if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
3515         RT = FD->getReturnType();
3516       else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
3517         RT = MD->getReturnType();
3518       else if (isa<BlockDecl>(CurCodeDecl))
3519         RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
3520       else
3521         llvm_unreachable("Unexpected function/method type");
3522 
3523       assert(getLangOpts().ObjCAutoRefCount &&
3524              !FI.isReturnsRetained() &&
3525              RT->isObjCRetainableType());
3526 #endif
3527       RV = emitAutoreleaseOfResult(*this, RV);
3528     }
3529 
3530     break;
3531 
3532   case ABIArgInfo::Ignore:
3533     break;
3534 
3535   case ABIArgInfo::CoerceAndExpand: {
3536     auto coercionType = RetAI.getCoerceAndExpandType();
3537 
3538     // Load all of the coerced elements out into results.
3539     llvm::SmallVector<llvm::Value*, 4> results;
3540     Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
3541     for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3542       auto coercedEltType = coercionType->getElementType(i);
3543       if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
3544         continue;
3545 
3546       auto eltAddr = Builder.CreateStructGEP(addr, i);
3547       auto elt = Builder.CreateLoad(eltAddr);
3548       results.push_back(elt);
3549     }
3550 
3551     // If we have one result, it's the single direct result type.
3552     if (results.size() == 1) {
3553       RV = results[0];
3554 
3555     // Otherwise, we need to make a first-class aggregate.
3556     } else {
3557       // Construct a return type that lacks padding elements.
3558       llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
3559 
3560       RV = llvm::UndefValue::get(returnType);
3561       for (unsigned i = 0, e = results.size(); i != e; ++i) {
3562         RV = Builder.CreateInsertValue(RV, results[i], i);
3563       }
3564     }
3565     break;
3566   }
3567   case ABIArgInfo::Expand:
3568   case ABIArgInfo::IndirectAliased:
3569     llvm_unreachable("Invalid ABI kind for return argument");
3570   }
3571 
3572   llvm::Instruction *Ret;
3573   if (RV) {
3574     if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
3575       // For certain return types, clear padding bits, as they may reveal
3576       // sensitive information.
3577       // Small struct/union types are passed as integers.
3578       auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3579       if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType()))
3580         RV = EmitCMSEClearRecord(RV, ITy, RetTy);
3581     }
3582     EmitReturnValueCheck(RV);
3583     Ret = Builder.CreateRet(RV);
3584   } else {
3585     Ret = Builder.CreateRetVoid();
3586   }
3587 
3588   if (RetDbgLoc)
3589     Ret->setDebugLoc(std::move(RetDbgLoc));
3590 }
3591 
3592 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3593   // A current decl may not be available when emitting vtable thunks.
3594   if (!CurCodeDecl)
3595     return;
3596 
3597   // If the return block isn't reachable, neither is this check, so don't emit
3598   // it.
3599   if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
3600     return;
3601 
3602   ReturnsNonNullAttr *RetNNAttr = nullptr;
3603   if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3604     RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3605 
3606   if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3607     return;
3608 
3609   // Prefer the returns_nonnull attribute if it's present.
3610   SourceLocation AttrLoc;
3611   SanitizerMask CheckKind;
3612   SanitizerHandler Handler;
3613   if (RetNNAttr) {
3614     assert(!requiresReturnValueNullabilityCheck() &&
3615            "Cannot check nullability and the nonnull attribute");
3616     AttrLoc = RetNNAttr->getLocation();
3617     CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3618     Handler = SanitizerHandler::NonnullReturn;
3619   } else {
3620     if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3621       if (auto *TSI = DD->getTypeSourceInfo())
3622         if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
3623           AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3624     CheckKind = SanitizerKind::NullabilityReturn;
3625     Handler = SanitizerHandler::NullabilityReturn;
3626   }
3627 
3628   SanitizerScope SanScope(this);
3629 
3630   // Make sure the "return" source location is valid. If we're checking a
3631   // nullability annotation, make sure the preconditions for the check are met.
3632   llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3633   llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3634   llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3635   llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3636   if (requiresReturnValueNullabilityCheck())
3637     CanNullCheck =
3638         Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3639   Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3640   EmitBlock(Check);
3641 
3642   // Now do the null check.
3643   llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3644   llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3645   llvm::Value *DynamicData[] = {SLocPtr};
3646   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3647 
3648   EmitBlock(NoCheck);
3649 
3650 #ifndef NDEBUG
3651   // The return location should not be used after the check has been emitted.
3652   ReturnLocation = Address::invalid();
3653 #endif
3654 }
3655 
3656 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3657   const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3658   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3659 }
3660 
3661 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3662                                           QualType Ty) {
3663   // FIXME: Generate IR in one pass, rather than going back and fixing up these
3664   // placeholders.
3665   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3666   llvm::Type *IRPtrTy = IRTy->getPointerTo();
3667   llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3668 
3669   // FIXME: When we generate this IR in one pass, we shouldn't need
3670   // this win32-specific alignment hack.
3671   CharUnits Align = CharUnits::fromQuantity(4);
3672   Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3673 
3674   return AggValueSlot::forAddr(Address(Placeholder, Align),
3675                                Ty.getQualifiers(),
3676                                AggValueSlot::IsNotDestructed,
3677                                AggValueSlot::DoesNotNeedGCBarriers,
3678                                AggValueSlot::IsNotAliased,
3679                                AggValueSlot::DoesNotOverlap);
3680 }
3681 
3682 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3683                                           const VarDecl *param,
3684                                           SourceLocation loc) {
3685   // StartFunction converted the ABI-lowered parameter(s) into a
3686   // local alloca.  We need to turn that into an r-value suitable
3687   // for EmitCall.
3688   Address local = GetAddrOfLocalVar(param);
3689 
3690   QualType type = param->getType();
3691 
3692   if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3693     CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3694   }
3695 
3696   // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3697   // but the argument needs to be the original pointer.
3698   if (type->isReferenceType()) {
3699     args.add(RValue::get(Builder.CreateLoad(local)), type);
3700 
3701   // In ARC, move out of consumed arguments so that the release cleanup
3702   // entered by StartFunction doesn't cause an over-release.  This isn't
3703   // optimal -O0 code generation, but it should get cleaned up when
3704   // optimization is enabled.  This also assumes that delegate calls are
3705   // performed exactly once for a set of arguments, but that should be safe.
3706   } else if (getLangOpts().ObjCAutoRefCount &&
3707              param->hasAttr<NSConsumedAttr>() &&
3708              type->isObjCRetainableType()) {
3709     llvm::Value *ptr = Builder.CreateLoad(local);
3710     auto null =
3711       llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3712     Builder.CreateStore(null, local);
3713     args.add(RValue::get(ptr), type);
3714 
3715   // For the most part, we just need to load the alloca, except that
3716   // aggregate r-values are actually pointers to temporaries.
3717   } else {
3718     args.add(convertTempToRValue(local, type, loc), type);
3719   }
3720 
3721   // Deactivate the cleanup for the callee-destructed param that was pushed.
3722   if (type->isRecordType() && !CurFuncIsThunk &&
3723       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3724       param->needsDestruction(getContext())) {
3725     EHScopeStack::stable_iterator cleanup =
3726         CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3727     assert(cleanup.isValid() &&
3728            "cleanup for callee-destructed param not recorded");
3729     // This unreachable is a temporary marker which will be removed later.
3730     llvm::Instruction *isActive = Builder.CreateUnreachable();
3731     args.addArgCleanupDeactivation(cleanup, isActive);
3732   }
3733 }
3734 
3735 static bool isProvablyNull(llvm::Value *addr) {
3736   return isa<llvm::ConstantPointerNull>(addr);
3737 }
3738 
3739 /// Emit the actual writing-back of a writeback.
3740 static void emitWriteback(CodeGenFunction &CGF,
3741                           const CallArgList::Writeback &writeback) {
3742   const LValue &srcLV = writeback.Source;
3743   Address srcAddr = srcLV.getAddress(CGF);
3744   assert(!isProvablyNull(srcAddr.getPointer()) &&
3745          "shouldn't have writeback for provably null argument");
3746 
3747   llvm::BasicBlock *contBB = nullptr;
3748 
3749   // If the argument wasn't provably non-null, we need to null check
3750   // before doing the store.
3751   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3752                                               CGF.CGM.getDataLayout());
3753   if (!provablyNonNull) {
3754     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3755     contBB = CGF.createBasicBlock("icr.done");
3756 
3757     llvm::Value *isNull =
3758       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3759     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3760     CGF.EmitBlock(writebackBB);
3761   }
3762 
3763   // Load the value to writeback.
3764   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3765 
3766   // Cast it back, in case we're writing an id to a Foo* or something.
3767   value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3768                                     "icr.writeback-cast");
3769 
3770   // Perform the writeback.
3771 
3772   // If we have a "to use" value, it's something we need to emit a use
3773   // of.  This has to be carefully threaded in: if it's done after the
3774   // release it's potentially undefined behavior (and the optimizer
3775   // will ignore it), and if it happens before the retain then the
3776   // optimizer could move the release there.
3777   if (writeback.ToUse) {
3778     assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3779 
3780     // Retain the new value.  No need to block-copy here:  the block's
3781     // being passed up the stack.
3782     value = CGF.EmitARCRetainNonBlock(value);
3783 
3784     // Emit the intrinsic use here.
3785     CGF.EmitARCIntrinsicUse(writeback.ToUse);
3786 
3787     // Load the old value (primitively).
3788     llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3789 
3790     // Put the new value in place (primitively).
3791     CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3792 
3793     // Release the old value.
3794     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3795 
3796   // Otherwise, we can just do a normal lvalue store.
3797   } else {
3798     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3799   }
3800 
3801   // Jump to the continuation block.
3802   if (!provablyNonNull)
3803     CGF.EmitBlock(contBB);
3804 }
3805 
3806 static void emitWritebacks(CodeGenFunction &CGF,
3807                            const CallArgList &args) {
3808   for (const auto &I : args.writebacks())
3809     emitWriteback(CGF, I);
3810 }
3811 
3812 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3813                                             const CallArgList &CallArgs) {
3814   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3815     CallArgs.getCleanupsToDeactivate();
3816   // Iterate in reverse to increase the likelihood of popping the cleanup.
3817   for (const auto &I : llvm::reverse(Cleanups)) {
3818     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3819     I.IsActiveIP->eraseFromParent();
3820   }
3821 }
3822 
3823 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3824   if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3825     if (uop->getOpcode() == UO_AddrOf)
3826       return uop->getSubExpr();
3827   return nullptr;
3828 }
3829 
3830 /// Emit an argument that's being passed call-by-writeback.  That is,
3831 /// we are passing the address of an __autoreleased temporary; it
3832 /// might be copy-initialized with the current value of the given
3833 /// address, but it will definitely be copied out of after the call.
3834 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3835                              const ObjCIndirectCopyRestoreExpr *CRE) {
3836   LValue srcLV;
3837 
3838   // Make an optimistic effort to emit the address as an l-value.
3839   // This can fail if the argument expression is more complicated.
3840   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3841     srcLV = CGF.EmitLValue(lvExpr);
3842 
3843   // Otherwise, just emit it as a scalar.
3844   } else {
3845     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3846 
3847     QualType srcAddrType =
3848       CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3849     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3850   }
3851   Address srcAddr = srcLV.getAddress(CGF);
3852 
3853   // The dest and src types don't necessarily match in LLVM terms
3854   // because of the crazy ObjC compatibility rules.
3855 
3856   llvm::PointerType *destType =
3857     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3858 
3859   // If the address is a constant null, just pass the appropriate null.
3860   if (isProvablyNull(srcAddr.getPointer())) {
3861     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3862              CRE->getType());
3863     return;
3864   }
3865 
3866   // Create the temporary.
3867   Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3868                                       CGF.getPointerAlign(),
3869                                       "icr.temp");
3870   // Loading an l-value can introduce a cleanup if the l-value is __weak,
3871   // and that cleanup will be conditional if we can't prove that the l-value
3872   // isn't null, so we need to register a dominating point so that the cleanups
3873   // system will make valid IR.
3874   CodeGenFunction::ConditionalEvaluation condEval(CGF);
3875 
3876   // Zero-initialize it if we're not doing a copy-initialization.
3877   bool shouldCopy = CRE->shouldCopy();
3878   if (!shouldCopy) {
3879     llvm::Value *null =
3880       llvm::ConstantPointerNull::get(
3881         cast<llvm::PointerType>(destType->getElementType()));
3882     CGF.Builder.CreateStore(null, temp);
3883   }
3884 
3885   llvm::BasicBlock *contBB = nullptr;
3886   llvm::BasicBlock *originBB = nullptr;
3887 
3888   // If the address is *not* known to be non-null, we need to switch.
3889   llvm::Value *finalArgument;
3890 
3891   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3892                                               CGF.CGM.getDataLayout());
3893   if (provablyNonNull) {
3894     finalArgument = temp.getPointer();
3895   } else {
3896     llvm::Value *isNull =
3897       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3898 
3899     finalArgument = CGF.Builder.CreateSelect(isNull,
3900                                    llvm::ConstantPointerNull::get(destType),
3901                                              temp.getPointer(), "icr.argument");
3902 
3903     // If we need to copy, then the load has to be conditional, which
3904     // means we need control flow.
3905     if (shouldCopy) {
3906       originBB = CGF.Builder.GetInsertBlock();
3907       contBB = CGF.createBasicBlock("icr.cont");
3908       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3909       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3910       CGF.EmitBlock(copyBB);
3911       condEval.begin(CGF);
3912     }
3913   }
3914 
3915   llvm::Value *valueToUse = nullptr;
3916 
3917   // Perform a copy if necessary.
3918   if (shouldCopy) {
3919     RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3920     assert(srcRV.isScalar());
3921 
3922     llvm::Value *src = srcRV.getScalarVal();
3923     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3924                                     "icr.cast");
3925 
3926     // Use an ordinary store, not a store-to-lvalue.
3927     CGF.Builder.CreateStore(src, temp);
3928 
3929     // If optimization is enabled, and the value was held in a
3930     // __strong variable, we need to tell the optimizer that this
3931     // value has to stay alive until we're doing the store back.
3932     // This is because the temporary is effectively unretained,
3933     // and so otherwise we can violate the high-level semantics.
3934     if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3935         srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3936       valueToUse = src;
3937     }
3938   }
3939 
3940   // Finish the control flow if we needed it.
3941   if (shouldCopy && !provablyNonNull) {
3942     llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3943     CGF.EmitBlock(contBB);
3944 
3945     // Make a phi for the value to intrinsically use.
3946     if (valueToUse) {
3947       llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3948                                                       "icr.to-use");
3949       phiToUse->addIncoming(valueToUse, copyBB);
3950       phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3951                             originBB);
3952       valueToUse = phiToUse;
3953     }
3954 
3955     condEval.end(CGF);
3956   }
3957 
3958   args.addWriteback(srcLV, temp, valueToUse);
3959   args.add(RValue::get(finalArgument), CRE->getType());
3960 }
3961 
3962 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3963   assert(!StackBase);
3964 
3965   // Save the stack.
3966   llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3967   StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3968 }
3969 
3970 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3971   if (StackBase) {
3972     // Restore the stack after the call.
3973     llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3974     CGF.Builder.CreateCall(F, StackBase);
3975   }
3976 }
3977 
3978 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3979                                           SourceLocation ArgLoc,
3980                                           AbstractCallee AC,
3981                                           unsigned ParmNum) {
3982   if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3983                          SanOpts.has(SanitizerKind::NullabilityArg)))
3984     return;
3985 
3986   // The param decl may be missing in a variadic function.
3987   auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3988   unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3989 
3990   // Prefer the nonnull attribute if it's present.
3991   const NonNullAttr *NNAttr = nullptr;
3992   if (SanOpts.has(SanitizerKind::NonnullAttribute))
3993     NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3994 
3995   bool CanCheckNullability = false;
3996   if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3997     auto Nullability = PVD->getType()->getNullability(getContext());
3998     CanCheckNullability = Nullability &&
3999                           *Nullability == NullabilityKind::NonNull &&
4000                           PVD->getTypeSourceInfo();
4001   }
4002 
4003   if (!NNAttr && !CanCheckNullability)
4004     return;
4005 
4006   SourceLocation AttrLoc;
4007   SanitizerMask CheckKind;
4008   SanitizerHandler Handler;
4009   if (NNAttr) {
4010     AttrLoc = NNAttr->getLocation();
4011     CheckKind = SanitizerKind::NonnullAttribute;
4012     Handler = SanitizerHandler::NonnullArg;
4013   } else {
4014     AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4015     CheckKind = SanitizerKind::NullabilityArg;
4016     Handler = SanitizerHandler::NullabilityArg;
4017   }
4018 
4019   SanitizerScope SanScope(this);
4020   llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType);
4021   llvm::Constant *StaticData[] = {
4022       EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
4023       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
4024   };
4025   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
4026 }
4027 
4028 // Check if the call is going to use the inalloca convention. This needs to
4029 // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged
4030 // later, so we can't check it directly.
4031 static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC,
4032                             ArrayRef<QualType> ArgTypes) {
4033   // The Swift calling conventions don't go through the target-specific
4034   // argument classification, they never use inalloca.
4035   // TODO: Consider limiting inalloca use to only calling conventions supported
4036   // by MSVC.
4037   if (ExplicitCC == CC_Swift || ExplicitCC == CC_SwiftAsync)
4038     return false;
4039   if (!CGM.getTarget().getCXXABI().isMicrosoft())
4040     return false;
4041   return llvm::any_of(ArgTypes, [&](QualType Ty) {
4042     return isInAllocaArgument(CGM.getCXXABI(), Ty);
4043   });
4044 }
4045 
4046 #ifndef NDEBUG
4047 // Determine whether the given argument is an Objective-C method
4048 // that may have type parameters in its signature.
4049 static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
4050   const DeclContext *dc = method->getDeclContext();
4051   if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) {
4052     return classDecl->getTypeParamListAsWritten();
4053   }
4054 
4055   if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
4056     return catDecl->getTypeParamList();
4057   }
4058 
4059   return false;
4060 }
4061 #endif
4062 
4063 /// EmitCallArgs - Emit call arguments for a function.
4064 void CodeGenFunction::EmitCallArgs(
4065     CallArgList &Args, PrototypeWrapper Prototype,
4066     llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4067     AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
4068   SmallVector<QualType, 16> ArgTypes;
4069 
4070   assert((ParamsToSkip == 0 || Prototype.P) &&
4071          "Can't skip parameters if type info is not provided");
4072 
4073   // This variable only captures *explicitly* written conventions, not those
4074   // applied by default via command line flags or target defaults, such as
4075   // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would
4076   // require knowing if this is a C++ instance method or being able to see
4077   // unprototyped FunctionTypes.
4078   CallingConv ExplicitCC = CC_C;
4079 
4080   // First, if a prototype was provided, use those argument types.
4081   bool IsVariadic = false;
4082   if (Prototype.P) {
4083     const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>();
4084     if (MD) {
4085       IsVariadic = MD->isVariadic();
4086       ExplicitCC = getCallingConventionForDecl(
4087           MD, CGM.getTarget().getTriple().isOSWindows());
4088       ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4089                       MD->param_type_end());
4090     } else {
4091       const auto *FPT = Prototype.P.get<const FunctionProtoType *>();
4092       IsVariadic = FPT->isVariadic();
4093       ExplicitCC = FPT->getExtInfo().getCC();
4094       ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4095                       FPT->param_type_end());
4096     }
4097 
4098 #ifndef NDEBUG
4099     // Check that the prototyped types match the argument expression types.
4100     bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD);
4101     CallExpr::const_arg_iterator Arg = ArgRange.begin();
4102     for (QualType Ty : ArgTypes) {
4103       assert(Arg != ArgRange.end() && "Running over edge of argument list!");
4104       assert(
4105           (isGenericMethod || Ty->isVariablyModifiedType() ||
4106            Ty.getNonReferenceType()->isObjCRetainableType() ||
4107            getContext()
4108                    .getCanonicalType(Ty.getNonReferenceType())
4109                    .getTypePtr() ==
4110                getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4111           "type mismatch in call argument!");
4112       ++Arg;
4113     }
4114 
4115     // Either we've emitted all the call args, or we have a call to variadic
4116     // function.
4117     assert((Arg == ArgRange.end() || IsVariadic) &&
4118            "Extra arguments in non-variadic function!");
4119 #endif
4120   }
4121 
4122   // If we still have any arguments, emit them using the type of the argument.
4123   for (auto *A : llvm::make_range(std::next(ArgRange.begin(), ArgTypes.size()),
4124                                   ArgRange.end()))
4125     ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4126   assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4127 
4128   // We must evaluate arguments from right to left in the MS C++ ABI,
4129   // because arguments are destroyed left to right in the callee. As a special
4130   // case, there are certain language constructs that require left-to-right
4131   // evaluation, and in those cases we consider the evaluation order requirement
4132   // to trump the "destruction order is reverse construction order" guarantee.
4133   bool LeftToRight =
4134       CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
4135           ? Order == EvaluationOrder::ForceLeftToRight
4136           : Order != EvaluationOrder::ForceRightToLeft;
4137 
4138   auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
4139                                          RValue EmittedArg) {
4140     if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4141       return;
4142     auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4143     if (PS == nullptr)
4144       return;
4145 
4146     const auto &Context = getContext();
4147     auto SizeTy = Context.getSizeType();
4148     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
4149     assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
4150     llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
4151                                                      EmittedArg.getScalarVal(),
4152                                                      PS->isDynamic());
4153     Args.add(RValue::get(V), SizeTy);
4154     // If we're emitting args in reverse, be sure to do so with
4155     // pass_object_size, as well.
4156     if (!LeftToRight)
4157       std::swap(Args.back(), *(&Args.back() - 1));
4158   };
4159 
4160   // Insert a stack save if we're going to need any inalloca args.
4161   if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) {
4162     assert(getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4163            "inalloca only supported on x86");
4164     Args.allocateArgumentMemory(*this);
4165   }
4166 
4167   // Evaluate each argument in the appropriate order.
4168   size_t CallArgsStart = Args.size();
4169   for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4170     unsigned Idx = LeftToRight ? I : E - I - 1;
4171     CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
4172     unsigned InitialArgSize = Args.size();
4173     // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
4174     // the argument and parameter match or the objc method is parameterized.
4175     assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4176             getContext().hasSameUnqualifiedType((*Arg)->getType(),
4177                                                 ArgTypes[Idx]) ||
4178             (isa<ObjCMethodDecl>(AC.getDecl()) &&
4179              isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
4180            "Argument and parameter types don't match");
4181     EmitCallArg(Args, *Arg, ArgTypes[Idx]);
4182     // In particular, we depend on it being the last arg in Args, and the
4183     // objectsize bits depend on there only being one arg if !LeftToRight.
4184     assert(InitialArgSize + 1 == Args.size() &&
4185            "The code below depends on only adding one arg per EmitCallArg");
4186     (void)InitialArgSize;
4187     // Since pointer argument are never emitted as LValue, it is safe to emit
4188     // non-null argument check for r-value only.
4189     if (!Args.back().hasLValue()) {
4190       RValue RVArg = Args.back().getKnownRValue();
4191       EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
4192                           ParamsToSkip + Idx);
4193       // @llvm.objectsize should never have side-effects and shouldn't need
4194       // destruction/cleanups, so we can safely "emit" it after its arg,
4195       // regardless of right-to-leftness
4196       MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4197     }
4198   }
4199 
4200   if (!LeftToRight) {
4201     // Un-reverse the arguments we just evaluated so they match up with the LLVM
4202     // IR function.
4203     std::reverse(Args.begin() + CallArgsStart, Args.end());
4204   }
4205 }
4206 
4207 namespace {
4208 
4209 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
4210   DestroyUnpassedArg(Address Addr, QualType Ty)
4211       : Addr(Addr), Ty(Ty) {}
4212 
4213   Address Addr;
4214   QualType Ty;
4215 
4216   void Emit(CodeGenFunction &CGF, Flags flags) override {
4217     QualType::DestructionKind DtorKind = Ty.isDestructedType();
4218     if (DtorKind == QualType::DK_cxx_destructor) {
4219       const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
4220       assert(!Dtor->isTrivial());
4221       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
4222                                 /*Delegating=*/false, Addr, Ty);
4223     } else {
4224       CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
4225     }
4226   }
4227 };
4228 
4229 struct DisableDebugLocationUpdates {
4230   CodeGenFunction &CGF;
4231   bool disabledDebugInfo;
4232   DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
4233     if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
4234       CGF.disableDebugInfo();
4235   }
4236   ~DisableDebugLocationUpdates() {
4237     if (disabledDebugInfo)
4238       CGF.enableDebugInfo();
4239   }
4240 };
4241 
4242 } // end anonymous namespace
4243 
4244 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
4245   if (!HasLV)
4246     return RV;
4247   LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
4248   CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
4249                         LV.isVolatile());
4250   IsUsed = true;
4251   return RValue::getAggregate(Copy.getAddress(CGF));
4252 }
4253 
4254 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
4255   LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
4256   if (!HasLV && RV.isScalar())
4257     CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
4258   else if (!HasLV && RV.isComplex())
4259     CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
4260   else {
4261     auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
4262     LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
4263     // We assume that call args are never copied into subobjects.
4264     CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
4265                           HasLV ? LV.isVolatileQualified()
4266                                 : RV.isVolatileQualified());
4267   }
4268   IsUsed = true;
4269 }
4270 
4271 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
4272                                   QualType type) {
4273   DisableDebugLocationUpdates Dis(*this, E);
4274   if (const ObjCIndirectCopyRestoreExpr *CRE
4275         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
4276     assert(getLangOpts().ObjCAutoRefCount);
4277     return emitWritebackArg(*this, args, CRE);
4278   }
4279 
4280   assert(type->isReferenceType() == E->isGLValue() &&
4281          "reference binding to unmaterialized r-value!");
4282 
4283   if (E->isGLValue()) {
4284     assert(E->getObjectKind() == OK_Ordinary);
4285     return args.add(EmitReferenceBindingToExpr(E), type);
4286   }
4287 
4288   bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
4289 
4290   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
4291   // However, we still have to push an EH-only cleanup in case we unwind before
4292   // we make it to the call.
4293   if (type->isRecordType() &&
4294       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
4295     // If we're using inalloca, use the argument memory.  Otherwise, use a
4296     // temporary.
4297     AggValueSlot Slot;
4298     if (args.isUsingInAlloca())
4299       Slot = createPlaceholderSlot(*this, type);
4300     else
4301       Slot = CreateAggTemp(type, "agg.tmp");
4302 
4303     bool DestroyedInCallee = true, NeedsEHCleanup = true;
4304     if (const auto *RD = type->getAsCXXRecordDecl())
4305       DestroyedInCallee = RD->hasNonTrivialDestructor();
4306     else
4307       NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
4308 
4309     if (DestroyedInCallee)
4310       Slot.setExternallyDestructed();
4311 
4312     EmitAggExpr(E, Slot);
4313     RValue RV = Slot.asRValue();
4314     args.add(RV, type);
4315 
4316     if (DestroyedInCallee && NeedsEHCleanup) {
4317       // Create a no-op GEP between the placeholder and the cleanup so we can
4318       // RAUW it successfully.  It also serves as a marker of the first
4319       // instruction where the cleanup is active.
4320       pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
4321                                               type);
4322       // This unreachable is a temporary marker which will be removed later.
4323       llvm::Instruction *IsActive = Builder.CreateUnreachable();
4324       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
4325     }
4326     return;
4327   }
4328 
4329   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
4330       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
4331     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
4332     assert(L.isSimple());
4333     args.addUncopiedAggregate(L, type);
4334     return;
4335   }
4336 
4337   args.add(EmitAnyExprToTemp(E), type);
4338 }
4339 
4340 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
4341   // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
4342   // implicitly widens null pointer constants that are arguments to varargs
4343   // functions to pointer-sized ints.
4344   if (!getTarget().getTriple().isOSWindows())
4345     return Arg->getType();
4346 
4347   if (Arg->getType()->isIntegerType() &&
4348       getContext().getTypeSize(Arg->getType()) <
4349           getContext().getTargetInfo().getPointerWidth(0) &&
4350       Arg->isNullPointerConstant(getContext(),
4351                                  Expr::NPC_ValueDependentIsNotNull)) {
4352     return getContext().getIntPtrType();
4353   }
4354 
4355   return Arg->getType();
4356 }
4357 
4358 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4359 // optimizer it can aggressively ignore unwind edges.
4360 void
4361 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4362   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4363       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
4364     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
4365                       CGM.getNoObjCARCExceptionsMetadata());
4366 }
4367 
4368 /// Emits a call to the given no-arguments nounwind runtime function.
4369 llvm::CallInst *
4370 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4371                                          const llvm::Twine &name) {
4372   return EmitNounwindRuntimeCall(callee, None, name);
4373 }
4374 
4375 /// Emits a call to the given nounwind runtime function.
4376 llvm::CallInst *
4377 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4378                                          ArrayRef<llvm::Value *> args,
4379                                          const llvm::Twine &name) {
4380   llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
4381   call->setDoesNotThrow();
4382   return call;
4383 }
4384 
4385 /// Emits a simple call (never an invoke) to the given no-arguments
4386 /// runtime function.
4387 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4388                                                  const llvm::Twine &name) {
4389   return EmitRuntimeCall(callee, None, name);
4390 }
4391 
4392 // Calls which may throw must have operand bundles indicating which funclet
4393 // they are nested within.
4394 SmallVector<llvm::OperandBundleDef, 1>
4395 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
4396   SmallVector<llvm::OperandBundleDef, 1> BundleList;
4397   // There is no need for a funclet operand bundle if we aren't inside a
4398   // funclet.
4399   if (!CurrentFuncletPad)
4400     return BundleList;
4401 
4402   // Skip intrinsics which cannot throw.
4403   auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
4404   if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
4405     return BundleList;
4406 
4407   BundleList.emplace_back("funclet", CurrentFuncletPad);
4408   return BundleList;
4409 }
4410 
4411 /// Emits a simple call (never an invoke) to the given runtime function.
4412 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4413                                                  ArrayRef<llvm::Value *> args,
4414                                                  const llvm::Twine &name) {
4415   llvm::CallInst *call = Builder.CreateCall(
4416       callee, args, getBundlesForFunclet(callee.getCallee()), name);
4417   call->setCallingConv(getRuntimeCC());
4418   return call;
4419 }
4420 
4421 /// Emits a call or invoke to the given noreturn runtime function.
4422 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
4423     llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
4424   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4425       getBundlesForFunclet(callee.getCallee());
4426 
4427   if (getInvokeDest()) {
4428     llvm::InvokeInst *invoke =
4429       Builder.CreateInvoke(callee,
4430                            getUnreachableBlock(),
4431                            getInvokeDest(),
4432                            args,
4433                            BundleList);
4434     invoke->setDoesNotReturn();
4435     invoke->setCallingConv(getRuntimeCC());
4436   } else {
4437     llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
4438     call->setDoesNotReturn();
4439     call->setCallingConv(getRuntimeCC());
4440     Builder.CreateUnreachable();
4441   }
4442 }
4443 
4444 /// Emits a call or invoke instruction to the given nullary runtime function.
4445 llvm::CallBase *
4446 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4447                                          const Twine &name) {
4448   return EmitRuntimeCallOrInvoke(callee, None, name);
4449 }
4450 
4451 /// Emits a call or invoke instruction to the given runtime function.
4452 llvm::CallBase *
4453 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4454                                          ArrayRef<llvm::Value *> args,
4455                                          const Twine &name) {
4456   llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
4457   call->setCallingConv(getRuntimeCC());
4458   return call;
4459 }
4460 
4461 /// Emits a call or invoke instruction to the given function, depending
4462 /// on the current state of the EH stack.
4463 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
4464                                                   ArrayRef<llvm::Value *> Args,
4465                                                   const Twine &Name) {
4466   llvm::BasicBlock *InvokeDest = getInvokeDest();
4467   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4468       getBundlesForFunclet(Callee.getCallee());
4469 
4470   llvm::CallBase *Inst;
4471   if (!InvokeDest)
4472     Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
4473   else {
4474     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
4475     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4476                                 Name);
4477     EmitBlock(ContBB);
4478   }
4479 
4480   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4481   // optimizer it can aggressively ignore unwind edges.
4482   if (CGM.getLangOpts().ObjCAutoRefCount)
4483     AddObjCARCExceptionMetadata(Inst);
4484 
4485   return Inst;
4486 }
4487 
4488 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4489                                                   llvm::Value *New) {
4490   DeferredReplacements.push_back(
4491       std::make_pair(llvm::WeakTrackingVH(Old), New));
4492 }
4493 
4494 namespace {
4495 
4496 /// Specify given \p NewAlign as the alignment of return value attribute. If
4497 /// such attribute already exists, re-set it to the maximal one of two options.
4498 LLVM_NODISCARD llvm::AttributeList
4499 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4500                                 const llvm::AttributeList &Attrs,
4501                                 llvm::Align NewAlign) {
4502   llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4503   if (CurAlign >= NewAlign)
4504     return Attrs;
4505   llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4506   return Attrs
4507       .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex,
4508                        llvm::Attribute::AttrKind::Alignment)
4509       .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr);
4510 }
4511 
4512 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
4513 protected:
4514   CodeGenFunction &CGF;
4515 
4516   /// We do nothing if this is, or becomes, nullptr.
4517   const AlignedAttrTy *AA = nullptr;
4518 
4519   llvm::Value *Alignment = nullptr;      // May or may not be a constant.
4520   llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
4521 
4522   AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4523       : CGF(CGF_) {
4524     if (!FuncDecl)
4525       return;
4526     AA = FuncDecl->getAttr<AlignedAttrTy>();
4527   }
4528 
4529 public:
4530   /// If we can, materialize the alignment as an attribute on return value.
4531   LLVM_NODISCARD llvm::AttributeList
4532   TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
4533     if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
4534       return Attrs;
4535     const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4536     if (!AlignmentCI)
4537       return Attrs;
4538     // We may legitimately have non-power-of-2 alignment here.
4539     // If so, this is UB land, emit it via `@llvm.assume` instead.
4540     if (!AlignmentCI->getValue().isPowerOf2())
4541       return Attrs;
4542     llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4543         CGF.getLLVMContext(), Attrs,
4544         llvm::Align(
4545             AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4546     AA = nullptr; // We're done. Disallow doing anything else.
4547     return NewAttrs;
4548   }
4549 
4550   /// Emit alignment assumption.
4551   /// This is a general fallback that we take if either there is an offset,
4552   /// or the alignment is variable or we are sanitizing for alignment.
4553   void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
4554     if (!AA)
4555       return;
4556     CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
4557                                 AA->getLocation(), Alignment, OffsetCI);
4558     AA = nullptr; // We're done. Disallow doing anything else.
4559   }
4560 };
4561 
4562 /// Helper data structure to emit `AssumeAlignedAttr`.
4563 class AssumeAlignedAttrEmitter final
4564     : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4565 public:
4566   AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4567       : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4568     if (!AA)
4569       return;
4570     // It is guaranteed that the alignment/offset are constants.
4571     Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
4572     if (Expr *Offset = AA->getOffset()) {
4573       OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
4574       if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
4575         OffsetCI = nullptr;
4576     }
4577   }
4578 };
4579 
4580 /// Helper data structure to emit `AllocAlignAttr`.
4581 class AllocAlignAttrEmitter final
4582     : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4583 public:
4584   AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
4585                         const CallArgList &CallArgs)
4586       : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4587     if (!AA)
4588       return;
4589     // Alignment may or may not be a constant, and that is okay.
4590     Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
4591                     .getRValue(CGF)
4592                     .getScalarVal();
4593   }
4594 };
4595 
4596 } // namespace
4597 
4598 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
4599                                  const CGCallee &Callee,
4600                                  ReturnValueSlot ReturnValue,
4601                                  const CallArgList &CallArgs,
4602                                  llvm::CallBase **callOrInvoke, bool IsMustTail,
4603                                  SourceLocation Loc) {
4604   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
4605 
4606   assert(Callee.isOrdinary() || Callee.isVirtual());
4607 
4608   // Handle struct-return functions by passing a pointer to the
4609   // location that we would like to return into.
4610   QualType RetTy = CallInfo.getReturnType();
4611   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
4612 
4613   llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
4614 
4615   const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4616   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
4617     // We can only guarantee that a function is called from the correct
4618     // context/function based on the appropriate target attributes,
4619     // so only check in the case where we have both always_inline and target
4620     // since otherwise we could be making a conditional call after a check for
4621     // the proper cpu features (and it won't cause code generation issues due to
4622     // function based code generation).
4623     if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
4624         TargetDecl->hasAttr<TargetAttr>())
4625       checkTargetFeatures(Loc, FD);
4626 
4627     // Some architectures (such as x86-64) have the ABI changed based on
4628     // attribute-target/features. Give them a chance to diagnose.
4629     CGM.getTargetCodeGenInfo().checkFunctionCallABI(
4630         CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
4631   }
4632 
4633 #ifndef NDEBUG
4634   if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
4635     // For an inalloca varargs function, we don't expect CallInfo to match the
4636     // function pointer's type, because the inalloca struct a will have extra
4637     // fields in it for the varargs parameters.  Code later in this function
4638     // bitcasts the function pointer to the type derived from CallInfo.
4639     //
4640     // In other cases, we assert that the types match up (until pointers stop
4641     // having pointee types).
4642     llvm::Type *TypeFromVal;
4643     if (Callee.isVirtual())
4644       TypeFromVal = Callee.getVirtualFunctionType();
4645     else
4646       TypeFromVal =
4647           Callee.getFunctionPointer()->getType()->getPointerElementType();
4648     assert(IRFuncTy == TypeFromVal);
4649   }
4650 #endif
4651 
4652   // 1. Set up the arguments.
4653 
4654   // If we're using inalloca, insert the allocation after the stack save.
4655   // FIXME: Do this earlier rather than hacking it in here!
4656   Address ArgMemory = Address::invalid();
4657   if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
4658     const llvm::DataLayout &DL = CGM.getDataLayout();
4659     llvm::Instruction *IP = CallArgs.getStackBase();
4660     llvm::AllocaInst *AI;
4661     if (IP) {
4662       IP = IP->getNextNode();
4663       AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
4664                                 "argmem", IP);
4665     } else {
4666       AI = CreateTempAlloca(ArgStruct, "argmem");
4667     }
4668     auto Align = CallInfo.getArgStructAlignment();
4669     AI->setAlignment(Align.getAsAlign());
4670     AI->setUsedWithInAlloca(true);
4671     assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
4672     ArgMemory = Address(AI, Align);
4673   }
4674 
4675   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
4676   SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
4677 
4678   // If the call returns a temporary with struct return, create a temporary
4679   // alloca to hold the result, unless one is given to us.
4680   Address SRetPtr = Address::invalid();
4681   Address SRetAlloca = Address::invalid();
4682   llvm::Value *UnusedReturnSizePtr = nullptr;
4683   if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
4684     if (!ReturnValue.isNull()) {
4685       SRetPtr = ReturnValue.getValue();
4686     } else {
4687       SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
4688       if (HaveInsertPoint() && ReturnValue.isUnused()) {
4689         llvm::TypeSize size =
4690             CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
4691         UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
4692       }
4693     }
4694     if (IRFunctionArgs.hasSRetArg()) {
4695       IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
4696     } else if (RetAI.isInAlloca()) {
4697       Address Addr =
4698           Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
4699       Builder.CreateStore(SRetPtr.getPointer(), Addr);
4700     }
4701   }
4702 
4703   Address swiftErrorTemp = Address::invalid();
4704   Address swiftErrorArg = Address::invalid();
4705 
4706   // When passing arguments using temporary allocas, we need to add the
4707   // appropriate lifetime markers. This vector keeps track of all the lifetime
4708   // markers that need to be ended right after the call.
4709   SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
4710 
4711   // Translate all of the arguments as necessary to match the IR lowering.
4712   assert(CallInfo.arg_size() == CallArgs.size() &&
4713          "Mismatch between function signature & arguments.");
4714   unsigned ArgNo = 0;
4715   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
4716   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
4717        I != E; ++I, ++info_it, ++ArgNo) {
4718     const ABIArgInfo &ArgInfo = info_it->info;
4719 
4720     // Insert a padding argument to ensure proper alignment.
4721     if (IRFunctionArgs.hasPaddingArg(ArgNo))
4722       IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
4723           llvm::UndefValue::get(ArgInfo.getPaddingType());
4724 
4725     unsigned FirstIRArg, NumIRArgs;
4726     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
4727 
4728     switch (ArgInfo.getKind()) {
4729     case ABIArgInfo::InAlloca: {
4730       assert(NumIRArgs == 0);
4731       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
4732       if (I->isAggregate()) {
4733         Address Addr = I->hasLValue()
4734                            ? I->getKnownLValue().getAddress(*this)
4735                            : I->getKnownRValue().getAggregateAddress();
4736         llvm::Instruction *Placeholder =
4737             cast<llvm::Instruction>(Addr.getPointer());
4738 
4739         if (!ArgInfo.getInAllocaIndirect()) {
4740           // Replace the placeholder with the appropriate argument slot GEP.
4741           CGBuilderTy::InsertPoint IP = Builder.saveIP();
4742           Builder.SetInsertPoint(Placeholder);
4743           Addr = Builder.CreateStructGEP(ArgMemory,
4744                                          ArgInfo.getInAllocaFieldIndex());
4745           Builder.restoreIP(IP);
4746         } else {
4747           // For indirect things such as overaligned structs, replace the
4748           // placeholder with a regular aggregate temporary alloca. Store the
4749           // address of this alloca into the struct.
4750           Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
4751           Address ArgSlot = Builder.CreateStructGEP(
4752               ArgMemory, ArgInfo.getInAllocaFieldIndex());
4753           Builder.CreateStore(Addr.getPointer(), ArgSlot);
4754         }
4755         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
4756       } else if (ArgInfo.getInAllocaIndirect()) {
4757         // Make a temporary alloca and store the address of it into the argument
4758         // struct.
4759         Address Addr = CreateMemTempWithoutCast(
4760             I->Ty, getContext().getTypeAlignInChars(I->Ty),
4761             "indirect-arg-temp");
4762         I->copyInto(*this, Addr);
4763         Address ArgSlot =
4764             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4765         Builder.CreateStore(Addr.getPointer(), ArgSlot);
4766       } else {
4767         // Store the RValue into the argument struct.
4768         Address Addr =
4769             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4770         unsigned AS = Addr.getType()->getPointerAddressSpace();
4771         llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
4772         // There are some cases where a trivial bitcast is not avoidable.  The
4773         // definition of a type later in a translation unit may change it's type
4774         // from {}* to (%struct.foo*)*.
4775         if (Addr.getType() != MemType)
4776           Addr = Builder.CreateBitCast(Addr, MemType);
4777         I->copyInto(*this, Addr);
4778       }
4779       break;
4780     }
4781 
4782     case ABIArgInfo::Indirect:
4783     case ABIArgInfo::IndirectAliased: {
4784       assert(NumIRArgs == 1);
4785       if (!I->isAggregate()) {
4786         // Make a temporary alloca to pass the argument.
4787         Address Addr = CreateMemTempWithoutCast(
4788             I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
4789         IRCallArgs[FirstIRArg] = Addr.getPointer();
4790 
4791         I->copyInto(*this, Addr);
4792       } else {
4793         // We want to avoid creating an unnecessary temporary+copy here;
4794         // however, we need one in three cases:
4795         // 1. If the argument is not byval, and we are required to copy the
4796         //    source.  (This case doesn't occur on any common architecture.)
4797         // 2. If the argument is byval, RV is not sufficiently aligned, and
4798         //    we cannot force it to be sufficiently aligned.
4799         // 3. If the argument is byval, but RV is not located in default
4800         //    or alloca address space.
4801         Address Addr = I->hasLValue()
4802                            ? I->getKnownLValue().getAddress(*this)
4803                            : I->getKnownRValue().getAggregateAddress();
4804         llvm::Value *V = Addr.getPointer();
4805         CharUnits Align = ArgInfo.getIndirectAlign();
4806         const llvm::DataLayout *TD = &CGM.getDataLayout();
4807 
4808         assert((FirstIRArg >= IRFuncTy->getNumParams() ||
4809                 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
4810                     TD->getAllocaAddrSpace()) &&
4811                "indirect argument must be in alloca address space");
4812 
4813         bool NeedCopy = false;
4814 
4815         if (Addr.getAlignment() < Align &&
4816             llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
4817                 Align.getAsAlign()) {
4818           NeedCopy = true;
4819         } else if (I->hasLValue()) {
4820           auto LV = I->getKnownLValue();
4821           auto AS = LV.getAddressSpace();
4822 
4823           if (!ArgInfo.getIndirectByVal() ||
4824               (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
4825             NeedCopy = true;
4826           }
4827           if (!getLangOpts().OpenCL) {
4828             if ((ArgInfo.getIndirectByVal() &&
4829                 (AS != LangAS::Default &&
4830                  AS != CGM.getASTAllocaAddressSpace()))) {
4831               NeedCopy = true;
4832             }
4833           }
4834           // For OpenCL even if RV is located in default or alloca address space
4835           // we don't want to perform address space cast for it.
4836           else if ((ArgInfo.getIndirectByVal() &&
4837                     Addr.getType()->getAddressSpace() != IRFuncTy->
4838                       getParamType(FirstIRArg)->getPointerAddressSpace())) {
4839             NeedCopy = true;
4840           }
4841         }
4842 
4843         if (NeedCopy) {
4844           // Create an aligned temporary, and copy to it.
4845           Address AI = CreateMemTempWithoutCast(
4846               I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4847           IRCallArgs[FirstIRArg] = AI.getPointer();
4848 
4849           // Emit lifetime markers for the temporary alloca.
4850           llvm::TypeSize ByvalTempElementSize =
4851               CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
4852           llvm::Value *LifetimeSize =
4853               EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
4854 
4855           // Add cleanup code to emit the end lifetime marker after the call.
4856           if (LifetimeSize) // In case we disabled lifetime markers.
4857             CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
4858 
4859           // Generate the copy.
4860           I->copyInto(*this, AI);
4861         } else {
4862           // Skip the extra memcpy call.
4863           auto *T = V->getType()->getPointerElementType()->getPointerTo(
4864               CGM.getDataLayout().getAllocaAddrSpace());
4865           IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4866               *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4867               true);
4868         }
4869       }
4870       break;
4871     }
4872 
4873     case ABIArgInfo::Ignore:
4874       assert(NumIRArgs == 0);
4875       break;
4876 
4877     case ABIArgInfo::Extend:
4878     case ABIArgInfo::Direct: {
4879       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4880           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4881           ArgInfo.getDirectOffset() == 0) {
4882         assert(NumIRArgs == 1);
4883         llvm::Value *V;
4884         if (!I->isAggregate())
4885           V = I->getKnownRValue().getScalarVal();
4886         else
4887           V = Builder.CreateLoad(
4888               I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4889                              : I->getKnownRValue().getAggregateAddress());
4890 
4891         // Implement swifterror by copying into a new swifterror argument.
4892         // We'll write back in the normal path out of the call.
4893         if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4894               == ParameterABI::SwiftErrorResult) {
4895           assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4896 
4897           QualType pointeeTy = I->Ty->getPointeeType();
4898           swiftErrorArg =
4899             Address(V, getContext().getTypeAlignInChars(pointeeTy));
4900 
4901           swiftErrorTemp =
4902             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4903           V = swiftErrorTemp.getPointer();
4904           cast<llvm::AllocaInst>(V)->setSwiftError(true);
4905 
4906           llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4907           Builder.CreateStore(errorValue, swiftErrorTemp);
4908         }
4909 
4910         // We might have to widen integers, but we should never truncate.
4911         if (ArgInfo.getCoerceToType() != V->getType() &&
4912             V->getType()->isIntegerTy())
4913           V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4914 
4915         // If the argument doesn't match, perform a bitcast to coerce it.  This
4916         // can happen due to trivial type mismatches.
4917         if (FirstIRArg < IRFuncTy->getNumParams() &&
4918             V->getType() != IRFuncTy->getParamType(FirstIRArg))
4919           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4920 
4921         IRCallArgs[FirstIRArg] = V;
4922         break;
4923       }
4924 
4925       // FIXME: Avoid the conversion through memory if possible.
4926       Address Src = Address::invalid();
4927       if (!I->isAggregate()) {
4928         Src = CreateMemTemp(I->Ty, "coerce");
4929         I->copyInto(*this, Src);
4930       } else {
4931         Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4932                              : I->getKnownRValue().getAggregateAddress();
4933       }
4934 
4935       // If the value is offset in memory, apply the offset now.
4936       Src = emitAddressAtOffset(*this, Src, ArgInfo);
4937 
4938       // Fast-isel and the optimizer generally like scalar values better than
4939       // FCAs, so we flatten them if this is safe to do for this argument.
4940       llvm::StructType *STy =
4941             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4942       if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4943         llvm::Type *SrcTy = Src.getElementType();
4944         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4945         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4946 
4947         // If the source type is smaller than the destination type of the
4948         // coerce-to logic, copy the source value into a temp alloca the size
4949         // of the destination type to allow loading all of it. The bits past
4950         // the source value are left undef.
4951         if (SrcSize < DstSize) {
4952           Address TempAlloca
4953             = CreateTempAlloca(STy, Src.getAlignment(),
4954                                Src.getName() + ".coerce");
4955           Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4956           Src = TempAlloca;
4957         } else {
4958           Src = Builder.CreateBitCast(Src,
4959                                       STy->getPointerTo(Src.getAddressSpace()));
4960         }
4961 
4962         assert(NumIRArgs == STy->getNumElements());
4963         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4964           Address EltPtr = Builder.CreateStructGEP(Src, i);
4965           llvm::Value *LI = Builder.CreateLoad(EltPtr);
4966           IRCallArgs[FirstIRArg + i] = LI;
4967         }
4968       } else {
4969         // In the simple case, just pass the coerced loaded value.
4970         assert(NumIRArgs == 1);
4971         llvm::Value *Load =
4972             CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4973 
4974         if (CallInfo.isCmseNSCall()) {
4975           // For certain parameter types, clear padding bits, as they may reveal
4976           // sensitive information.
4977           // Small struct/union types are passed as integer arrays.
4978           auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
4979           if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
4980             Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
4981         }
4982         IRCallArgs[FirstIRArg] = Load;
4983       }
4984 
4985       break;
4986     }
4987 
4988     case ABIArgInfo::CoerceAndExpand: {
4989       auto coercionType = ArgInfo.getCoerceAndExpandType();
4990       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4991 
4992       llvm::Value *tempSize = nullptr;
4993       Address addr = Address::invalid();
4994       Address AllocaAddr = Address::invalid();
4995       if (I->isAggregate()) {
4996         addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4997                               : I->getKnownRValue().getAggregateAddress();
4998 
4999       } else {
5000         RValue RV = I->getKnownRValue();
5001         assert(RV.isScalar()); // complex should always just be direct
5002 
5003         llvm::Type *scalarType = RV.getScalarVal()->getType();
5004         auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
5005         auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
5006 
5007         // Materialize to a temporary.
5008         addr = CreateTempAlloca(
5009             RV.getScalarVal()->getType(),
5010             CharUnits::fromQuantity(std::max(
5011                 (unsigned)layout->getAlignment().value(), scalarAlign)),
5012             "tmp",
5013             /*ArraySize=*/nullptr, &AllocaAddr);
5014         tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
5015 
5016         Builder.CreateStore(RV.getScalarVal(), addr);
5017       }
5018 
5019       addr = Builder.CreateElementBitCast(addr, coercionType);
5020 
5021       unsigned IRArgPos = FirstIRArg;
5022       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5023         llvm::Type *eltType = coercionType->getElementType(i);
5024         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5025         Address eltAddr = Builder.CreateStructGEP(addr, i);
5026         llvm::Value *elt = Builder.CreateLoad(eltAddr);
5027         IRCallArgs[IRArgPos++] = elt;
5028       }
5029       assert(IRArgPos == FirstIRArg + NumIRArgs);
5030 
5031       if (tempSize) {
5032         EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
5033       }
5034 
5035       break;
5036     }
5037 
5038     case ABIArgInfo::Expand: {
5039       unsigned IRArgPos = FirstIRArg;
5040       ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5041       assert(IRArgPos == FirstIRArg + NumIRArgs);
5042       break;
5043     }
5044     }
5045   }
5046 
5047   const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
5048   llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
5049 
5050   // If we're using inalloca, set up that argument.
5051   if (ArgMemory.isValid()) {
5052     llvm::Value *Arg = ArgMemory.getPointer();
5053     if (CallInfo.isVariadic()) {
5054       // When passing non-POD arguments by value to variadic functions, we will
5055       // end up with a variadic prototype and an inalloca call site.  In such
5056       // cases, we can't do any parameter mismatch checks.  Give up and bitcast
5057       // the callee.
5058       unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
5059       CalleePtr =
5060           Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
5061     } else {
5062       llvm::Type *LastParamTy =
5063           IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
5064       if (Arg->getType() != LastParamTy) {
5065 #ifndef NDEBUG
5066         // Assert that these structs have equivalent element types.
5067         llvm::StructType *FullTy = CallInfo.getArgStruct();
5068         llvm::StructType *DeclaredTy = cast<llvm::StructType>(
5069             cast<llvm::PointerType>(LastParamTy)->getElementType());
5070         assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
5071         for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
5072                                                 DE = DeclaredTy->element_end(),
5073                                                 FI = FullTy->element_begin();
5074              DI != DE; ++DI, ++FI)
5075           assert(*DI == *FI);
5076 #endif
5077         Arg = Builder.CreateBitCast(Arg, LastParamTy);
5078       }
5079     }
5080     assert(IRFunctionArgs.hasInallocaArg());
5081     IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5082   }
5083 
5084   // 2. Prepare the function pointer.
5085 
5086   // If the callee is a bitcast of a non-variadic function to have a
5087   // variadic function pointer type, check to see if we can remove the
5088   // bitcast.  This comes up with unprototyped functions.
5089   //
5090   // This makes the IR nicer, but more importantly it ensures that we
5091   // can inline the function at -O0 if it is marked always_inline.
5092   auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5093                                    llvm::Value *Ptr) -> llvm::Function * {
5094     if (!CalleeFT->isVarArg())
5095       return nullptr;
5096 
5097     // Get underlying value if it's a bitcast
5098     if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5099       if (CE->getOpcode() == llvm::Instruction::BitCast)
5100         Ptr = CE->getOperand(0);
5101     }
5102 
5103     llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5104     if (!OrigFn)
5105       return nullptr;
5106 
5107     llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5108 
5109     // If the original type is variadic, or if any of the component types
5110     // disagree, we cannot remove the cast.
5111     if (OrigFT->isVarArg() ||
5112         OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5113         OrigFT->getReturnType() != CalleeFT->getReturnType())
5114       return nullptr;
5115 
5116     for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5117       if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5118         return nullptr;
5119 
5120     return OrigFn;
5121   };
5122 
5123   if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5124     CalleePtr = OrigFn;
5125     IRFuncTy = OrigFn->getFunctionType();
5126   }
5127 
5128   // 3. Perform the actual call.
5129 
5130   // Deactivate any cleanups that we're supposed to do immediately before
5131   // the call.
5132   if (!CallArgs.getCleanupsToDeactivate().empty())
5133     deactivateArgCleanupsBeforeCall(*this, CallArgs);
5134 
5135   // Assert that the arguments we computed match up.  The IR verifier
5136   // will catch this, but this is a common enough source of problems
5137   // during IRGen changes that it's way better for debugging to catch
5138   // it ourselves here.
5139 #ifndef NDEBUG
5140   assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
5141   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
5142     // Inalloca argument can have different type.
5143     if (IRFunctionArgs.hasInallocaArg() &&
5144         i == IRFunctionArgs.getInallocaArgNo())
5145       continue;
5146     if (i < IRFuncTy->getNumParams())
5147       assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
5148   }
5149 #endif
5150 
5151   // Update the largest vector width if any arguments have vector types.
5152   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
5153     if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
5154       LargestVectorWidth =
5155           std::max((uint64_t)LargestVectorWidth,
5156                    VT->getPrimitiveSizeInBits().getKnownMinSize());
5157   }
5158 
5159   // Compute the calling convention and attributes.
5160   unsigned CallingConv;
5161   llvm::AttributeList Attrs;
5162   CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
5163                              Callee.getAbstractInfo(), Attrs, CallingConv,
5164                              /*AttrOnCallSite=*/true,
5165                              /*IsThunk=*/false);
5166 
5167   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5168     if (FD->hasAttr<StrictFPAttr>())
5169       // All calls within a strictfp function are marked strictfp
5170       Attrs =
5171         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5172                            llvm::Attribute::StrictFP);
5173 
5174   // Add call-site nomerge attribute if exists.
5175   if (InNoMergeAttributedStmt)
5176     Attrs =
5177         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5178                            llvm::Attribute::NoMerge);
5179 
5180   // Apply some call-site-specific attributes.
5181   // TODO: work this into building the attribute set.
5182 
5183   // Apply always_inline to all calls within flatten functions.
5184   // FIXME: should this really take priority over __try, below?
5185   if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
5186       !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
5187     Attrs =
5188         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5189                            llvm::Attribute::AlwaysInline);
5190   }
5191 
5192   // Disable inlining inside SEH __try blocks.
5193   if (isSEHTryScope()) {
5194     Attrs =
5195         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5196                            llvm::Attribute::NoInline);
5197   }
5198 
5199   // Decide whether to use a call or an invoke.
5200   bool CannotThrow;
5201   if (currentFunctionUsesSEHTry()) {
5202     // SEH cares about asynchronous exceptions, so everything can "throw."
5203     CannotThrow = false;
5204   } else if (isCleanupPadScope() &&
5205              EHPersonality::get(*this).isMSVCXXPersonality()) {
5206     // The MSVC++ personality will implicitly terminate the program if an
5207     // exception is thrown during a cleanup outside of a try/catch.
5208     // We don't need to model anything in IR to get this behavior.
5209     CannotThrow = true;
5210   } else {
5211     // Otherwise, nounwind call sites will never throw.
5212     CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind);
5213 
5214     if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5215       if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5216         CannotThrow = true;
5217   }
5218 
5219   // If we made a temporary, be sure to clean up after ourselves. Note that we
5220   // can't depend on being inside of an ExprWithCleanups, so we need to manually
5221   // pop this cleanup later on. Being eager about this is OK, since this
5222   // temporary is 'invisible' outside of the callee.
5223   if (UnusedReturnSizePtr)
5224     pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
5225                                          UnusedReturnSizePtr);
5226 
5227   llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
5228 
5229   SmallVector<llvm::OperandBundleDef, 1> BundleList =
5230       getBundlesForFunclet(CalleePtr);
5231 
5232   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5233     if (FD->hasAttr<StrictFPAttr>())
5234       // All calls within a strictfp function are marked strictfp
5235       Attrs =
5236         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5237                            llvm::Attribute::StrictFP);
5238 
5239   AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
5240   Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5241 
5242   AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
5243   Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5244 
5245   // Emit the actual call/invoke instruction.
5246   llvm::CallBase *CI;
5247   if (!InvokeDest) {
5248     CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5249   } else {
5250     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
5251     CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5252                               BundleList);
5253     EmitBlock(Cont);
5254   }
5255   if (callOrInvoke)
5256     *callOrInvoke = CI;
5257 
5258   // If this is within a function that has the guard(nocf) attribute and is an
5259   // indirect call, add the "guard_nocf" attribute to this call to indicate that
5260   // Control Flow Guard checks should not be added, even if the call is inlined.
5261   if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
5262     if (const auto *A = FD->getAttr<CFGuardAttr>()) {
5263       if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5264         Attrs = Attrs.addAttribute(
5265             getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf");
5266     }
5267   }
5268 
5269   // Apply the attributes and calling convention.
5270   CI->setAttributes(Attrs);
5271   CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
5272 
5273   // Apply various metadata.
5274 
5275   if (!CI->getType()->isVoidTy())
5276     CI->setName("call");
5277 
5278   // Update largest vector width from the return type.
5279   if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
5280     LargestVectorWidth =
5281         std::max((uint64_t)LargestVectorWidth,
5282                  VT->getPrimitiveSizeInBits().getKnownMinSize());
5283 
5284   // Insert instrumentation or attach profile metadata at indirect call sites.
5285   // For more details, see the comment before the definition of
5286   // IPVK_IndirectCallTarget in InstrProfData.inc.
5287   if (!CI->getCalledFunction())
5288     PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
5289                      CI, CalleePtr);
5290 
5291   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
5292   // optimizer it can aggressively ignore unwind edges.
5293   if (CGM.getLangOpts().ObjCAutoRefCount)
5294     AddObjCARCExceptionMetadata(CI);
5295 
5296   // Set tail call kind if necessary.
5297   if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
5298     if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
5299       Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5300     else if (IsMustTail)
5301       Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5302   }
5303 
5304   // Add metadata for calls to MSAllocator functions
5305   if (getDebugInfo() && TargetDecl &&
5306       TargetDecl->hasAttr<MSAllocatorAttr>())
5307     getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
5308 
5309   // 4. Finish the call.
5310 
5311   // If the call doesn't return, finish the basic block and clear the
5312   // insertion point; this allows the rest of IRGen to discard
5313   // unreachable code.
5314   if (CI->doesNotReturn()) {
5315     if (UnusedReturnSizePtr)
5316       PopCleanupBlock();
5317 
5318     // Strip away the noreturn attribute to better diagnose unreachable UB.
5319     if (SanOpts.has(SanitizerKind::Unreachable)) {
5320       // Also remove from function since CallBase::hasFnAttr additionally checks
5321       // attributes of the called function.
5322       if (auto *F = CI->getCalledFunction())
5323         F->removeFnAttr(llvm::Attribute::NoReturn);
5324       CI->removeAttribute(llvm::AttributeList::FunctionIndex,
5325                           llvm::Attribute::NoReturn);
5326 
5327       // Avoid incompatibility with ASan which relies on the `noreturn`
5328       // attribute to insert handler calls.
5329       if (SanOpts.hasOneOf(SanitizerKind::Address |
5330                            SanitizerKind::KernelAddress)) {
5331         SanitizerScope SanScope(this);
5332         llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
5333         Builder.SetInsertPoint(CI);
5334         auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
5335         llvm::FunctionCallee Fn =
5336             CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
5337         EmitNounwindRuntimeCall(Fn);
5338       }
5339     }
5340 
5341     EmitUnreachable(Loc);
5342     Builder.ClearInsertionPoint();
5343 
5344     // FIXME: For now, emit a dummy basic block because expr emitters in
5345     // generally are not ready to handle emitting expressions at unreachable
5346     // points.
5347     EnsureInsertPoint();
5348 
5349     // Return a reasonable RValue.
5350     return GetUndefRValue(RetTy);
5351   }
5352 
5353   // If this is a musttail call, return immediately. We do not branch to the
5354   // epilogue in this case.
5355   if (IsMustTail) {
5356     for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end();
5357          ++it) {
5358       EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it);
5359       if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn()))
5360         CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups");
5361     }
5362     if (CI->getType()->isVoidTy())
5363       Builder.CreateRetVoid();
5364     else
5365       Builder.CreateRet(CI);
5366     Builder.ClearInsertionPoint();
5367     EnsureInsertPoint();
5368     return GetUndefRValue(RetTy);
5369   }
5370 
5371   // Perform the swifterror writeback.
5372   if (swiftErrorTemp.isValid()) {
5373     llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
5374     Builder.CreateStore(errorResult, swiftErrorArg);
5375   }
5376 
5377   // Emit any call-associated writebacks immediately.  Arguably this
5378   // should happen after any return-value munging.
5379   if (CallArgs.hasWritebacks())
5380     emitWritebacks(*this, CallArgs);
5381 
5382   // The stack cleanup for inalloca arguments has to run out of the normal
5383   // lexical order, so deactivate it and run it manually here.
5384   CallArgs.freeArgumentMemory(*this);
5385 
5386   // Extract the return value.
5387   RValue Ret = [&] {
5388     switch (RetAI.getKind()) {
5389     case ABIArgInfo::CoerceAndExpand: {
5390       auto coercionType = RetAI.getCoerceAndExpandType();
5391 
5392       Address addr = SRetPtr;
5393       addr = Builder.CreateElementBitCast(addr, coercionType);
5394 
5395       assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
5396       bool requiresExtract = isa<llvm::StructType>(CI->getType());
5397 
5398       unsigned unpaddedIndex = 0;
5399       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5400         llvm::Type *eltType = coercionType->getElementType(i);
5401         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5402         Address eltAddr = Builder.CreateStructGEP(addr, i);
5403         llvm::Value *elt = CI;
5404         if (requiresExtract)
5405           elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
5406         else
5407           assert(unpaddedIndex == 0);
5408         Builder.CreateStore(elt, eltAddr);
5409       }
5410       // FALLTHROUGH
5411       LLVM_FALLTHROUGH;
5412     }
5413 
5414     case ABIArgInfo::InAlloca:
5415     case ABIArgInfo::Indirect: {
5416       RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
5417       if (UnusedReturnSizePtr)
5418         PopCleanupBlock();
5419       return ret;
5420     }
5421 
5422     case ABIArgInfo::Ignore:
5423       // If we are ignoring an argument that had a result, make sure to
5424       // construct the appropriate return value for our caller.
5425       return GetUndefRValue(RetTy);
5426 
5427     case ABIArgInfo::Extend:
5428     case ABIArgInfo::Direct: {
5429       llvm::Type *RetIRTy = ConvertType(RetTy);
5430       if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
5431         switch (getEvaluationKind(RetTy)) {
5432         case TEK_Complex: {
5433           llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
5434           llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
5435           return RValue::getComplex(std::make_pair(Real, Imag));
5436         }
5437         case TEK_Aggregate: {
5438           Address DestPtr = ReturnValue.getValue();
5439           bool DestIsVolatile = ReturnValue.isVolatile();
5440 
5441           if (!DestPtr.isValid()) {
5442             DestPtr = CreateMemTemp(RetTy, "agg.tmp");
5443             DestIsVolatile = false;
5444           }
5445           EmitAggregateStore(CI, DestPtr, DestIsVolatile);
5446           return RValue::getAggregate(DestPtr);
5447         }
5448         case TEK_Scalar: {
5449           // If the argument doesn't match, perform a bitcast to coerce it.  This
5450           // can happen due to trivial type mismatches.
5451           llvm::Value *V = CI;
5452           if (V->getType() != RetIRTy)
5453             V = Builder.CreateBitCast(V, RetIRTy);
5454           return RValue::get(V);
5455         }
5456         }
5457         llvm_unreachable("bad evaluation kind");
5458       }
5459 
5460       Address DestPtr = ReturnValue.getValue();
5461       bool DestIsVolatile = ReturnValue.isVolatile();
5462 
5463       if (!DestPtr.isValid()) {
5464         DestPtr = CreateMemTemp(RetTy, "coerce");
5465         DestIsVolatile = false;
5466       }
5467 
5468       // If the value is offset in memory, apply the offset now.
5469       Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
5470       CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
5471 
5472       return convertTempToRValue(DestPtr, RetTy, SourceLocation());
5473     }
5474 
5475     case ABIArgInfo::Expand:
5476     case ABIArgInfo::IndirectAliased:
5477       llvm_unreachable("Invalid ABI kind for return argument");
5478     }
5479 
5480     llvm_unreachable("Unhandled ABIArgInfo::Kind");
5481   } ();
5482 
5483   // Emit the assume_aligned check on the return value.
5484   if (Ret.isScalar() && TargetDecl) {
5485     AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5486     AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5487   }
5488 
5489   // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
5490   // we can't use the full cleanup mechanism.
5491   for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
5492     LifetimeEnd.Emit(*this, /*Flags=*/{});
5493 
5494   if (!ReturnValue.isExternallyDestructed() &&
5495       RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct)
5496     pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),
5497                 RetTy);
5498 
5499   return Ret;
5500 }
5501 
5502 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
5503   if (isVirtual()) {
5504     const CallExpr *CE = getVirtualCallExpr();
5505     return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
5506         CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
5507         CE ? CE->getBeginLoc() : SourceLocation());
5508   }
5509 
5510   return *this;
5511 }
5512 
5513 /* VarArg handling */
5514 
5515 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
5516   VAListAddr = VE->isMicrosoftABI()
5517                  ? EmitMSVAListRef(VE->getSubExpr())
5518                  : EmitVAListRef(VE->getSubExpr());
5519   QualType Ty = VE->getType();
5520   if (VE->isMicrosoftABI())
5521     return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
5522   return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
5523 }
5524