xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides C++ code generation targeting the Itanium C++ ABI.  The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
11 // documented at:
12 //  https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13 //  https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14 //
15 // It also supports the closely-related ARM ABI, documented at:
16 // https://developer.arm.com/documentation/ihi0041/g/
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "CGCXXABI.h"
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/StmtCXX.h"
30 #include "clang/AST/Type.h"
31 #include "clang/CodeGen/ConstantInitBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/ScopedPrinter.h"
38 
39 using namespace clang;
40 using namespace CodeGen;
41 
42 namespace {
43 class ItaniumCXXABI : public CodeGen::CGCXXABI {
44   /// VTables - All the vtables which have been defined.
45   llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
46 
47   /// All the thread wrapper functions that have been used.
48   llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
49       ThreadWrappers;
50 
51 protected:
52   bool UseARMMethodPtrABI;
53   bool UseARMGuardVarABI;
54   bool Use32BitVTableOffsetABI;
55 
56   ItaniumMangleContext &getMangleContext() {
57     return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
58   }
59 
60 public:
61   ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
62                 bool UseARMMethodPtrABI = false,
63                 bool UseARMGuardVarABI = false) :
64     CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
65     UseARMGuardVarABI(UseARMGuardVarABI),
66     Use32BitVTableOffsetABI(false) { }
67 
68   bool classifyReturnType(CGFunctionInfo &FI) const override;
69 
70   RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
71     // If C++ prohibits us from making a copy, pass by address.
72     if (!RD->canPassInRegisters())
73       return RAA_Indirect;
74     return RAA_Default;
75   }
76 
77   bool isThisCompleteObject(GlobalDecl GD) const override {
78     // The Itanium ABI has separate complete-object vs.  base-object
79     // variants of both constructors and destructors.
80     if (isa<CXXDestructorDecl>(GD.getDecl())) {
81       switch (GD.getDtorType()) {
82       case Dtor_Complete:
83       case Dtor_Deleting:
84         return true;
85 
86       case Dtor_Base:
87         return false;
88 
89       case Dtor_Comdat:
90         llvm_unreachable("emitting dtor comdat as function?");
91       }
92       llvm_unreachable("bad dtor kind");
93     }
94     if (isa<CXXConstructorDecl>(GD.getDecl())) {
95       switch (GD.getCtorType()) {
96       case Ctor_Complete:
97         return true;
98 
99       case Ctor_Base:
100         return false;
101 
102       case Ctor_CopyingClosure:
103       case Ctor_DefaultClosure:
104         llvm_unreachable("closure ctors in Itanium ABI?");
105 
106       case Ctor_Comdat:
107         llvm_unreachable("emitting ctor comdat as function?");
108       }
109       llvm_unreachable("bad dtor kind");
110     }
111 
112     // No other kinds.
113     return false;
114   }
115 
116   bool isZeroInitializable(const MemberPointerType *MPT) override;
117 
118   llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
119 
120   CGCallee
121     EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
122                                     const Expr *E,
123                                     Address This,
124                                     llvm::Value *&ThisPtrForCall,
125                                     llvm::Value *MemFnPtr,
126                                     const MemberPointerType *MPT) override;
127 
128   llvm::Value *
129     EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
130                                  Address Base,
131                                  llvm::Value *MemPtr,
132                                  const MemberPointerType *MPT) override;
133 
134   llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
135                                            const CastExpr *E,
136                                            llvm::Value *Src) override;
137   llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
138                                               llvm::Constant *Src) override;
139 
140   llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
141 
142   llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
143   llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
144                                         CharUnits offset) override;
145   llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
146   llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
147                                      CharUnits ThisAdjustment);
148 
149   llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
150                                            llvm::Value *L, llvm::Value *R,
151                                            const MemberPointerType *MPT,
152                                            bool Inequality) override;
153 
154   llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
155                                          llvm::Value *Addr,
156                                          const MemberPointerType *MPT) override;
157 
158   void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
159                                Address Ptr, QualType ElementType,
160                                const CXXDestructorDecl *Dtor) override;
161 
162   void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
163   void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
164 
165   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
166 
167   llvm::CallInst *
168   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
169                                       llvm::Value *Exn) override;
170 
171   void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
172   llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
173   CatchTypeInfo
174   getAddrOfCXXCatchHandlerType(QualType Ty,
175                                QualType CatchHandlerType) override {
176     return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
177   }
178 
179   bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
180   void EmitBadTypeidCall(CodeGenFunction &CGF) override;
181   llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
182                           Address ThisPtr,
183                           llvm::Type *StdTypeInfoPtrTy) override;
184 
185   bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
186                                           QualType SrcRecordTy) override;
187 
188   llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
189                                    QualType SrcRecordTy, QualType DestTy,
190                                    QualType DestRecordTy,
191                                    llvm::BasicBlock *CastEnd) override;
192 
193   llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
194                                      QualType SrcRecordTy,
195                                      QualType DestTy) override;
196 
197   bool EmitBadCastCall(CodeGenFunction &CGF) override;
198 
199   llvm::Value *
200     GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
201                               const CXXRecordDecl *ClassDecl,
202                               const CXXRecordDecl *BaseClassDecl) override;
203 
204   void EmitCXXConstructors(const CXXConstructorDecl *D) override;
205 
206   AddedStructorArgCounts
207   buildStructorSignature(GlobalDecl GD,
208                          SmallVectorImpl<CanQualType> &ArgTys) override;
209 
210   bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
211                               CXXDtorType DT) const override {
212     // Itanium does not emit any destructor variant as an inline thunk.
213     // Delegating may occur as an optimization, but all variants are either
214     // emitted with external linkage or as linkonce if they are inline and used.
215     return false;
216   }
217 
218   void EmitCXXDestructors(const CXXDestructorDecl *D) override;
219 
220   void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
221                                  FunctionArgList &Params) override;
222 
223   void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
224 
225   AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
226                                                const CXXConstructorDecl *D,
227                                                CXXCtorType Type,
228                                                bool ForVirtualBase,
229                                                bool Delegating) override;
230 
231   llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
232                                              const CXXDestructorDecl *DD,
233                                              CXXDtorType Type,
234                                              bool ForVirtualBase,
235                                              bool Delegating) override;
236 
237   void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
238                           CXXDtorType Type, bool ForVirtualBase,
239                           bool Delegating, Address This,
240                           QualType ThisTy) override;
241 
242   void emitVTableDefinitions(CodeGenVTables &CGVT,
243                              const CXXRecordDecl *RD) override;
244 
245   bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
246                                            CodeGenFunction::VPtr Vptr) override;
247 
248   bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
249     return true;
250   }
251 
252   llvm::Constant *
253   getVTableAddressPoint(BaseSubobject Base,
254                         const CXXRecordDecl *VTableClass) override;
255 
256   llvm::Value *getVTableAddressPointInStructor(
257       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258       BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
259 
260   llvm::Value *getVTableAddressPointInStructorWithVTT(
261       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
262       BaseSubobject Base, const CXXRecordDecl *NearestVBase);
263 
264   llvm::Constant *
265   getVTableAddressPointForConstExpr(BaseSubobject Base,
266                                     const CXXRecordDecl *VTableClass) override;
267 
268   llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
269                                         CharUnits VPtrOffset) override;
270 
271   CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
272                                      Address This, llvm::Type *Ty,
273                                      SourceLocation Loc) override;
274 
275   llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
276                                          const CXXDestructorDecl *Dtor,
277                                          CXXDtorType DtorType, Address This,
278                                          DeleteOrMemberCallExpr E) override;
279 
280   void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
281 
282   bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
283   bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
284 
285   void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
286                        bool ReturnAdjustment) override {
287     // Allow inlining of thunks by emitting them with available_externally
288     // linkage together with vtables when needed.
289     if (ForVTable && !Thunk->hasLocalLinkage())
290       Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
291     CGM.setGVProperties(Thunk, GD);
292   }
293 
294   bool exportThunk() override { return true; }
295 
296   llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
297                                      const ThisAdjustment &TA) override;
298 
299   llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
300                                        const ReturnAdjustment &RA) override;
301 
302   size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
303                               FunctionArgList &Args) const override {
304     assert(!Args.empty() && "expected the arglist to not be empty!");
305     return Args.size() - 1;
306   }
307 
308   StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
309   StringRef GetDeletedVirtualCallName() override
310     { return "__cxa_deleted_virtual"; }
311 
312   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
313   Address InitializeArrayCookie(CodeGenFunction &CGF,
314                                 Address NewPtr,
315                                 llvm::Value *NumElements,
316                                 const CXXNewExpr *expr,
317                                 QualType ElementType) override;
318   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
319                                    Address allocPtr,
320                                    CharUnits cookieSize) override;
321 
322   void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
323                        llvm::GlobalVariable *DeclPtr,
324                        bool PerformInit) override;
325   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
326                           llvm::FunctionCallee dtor,
327                           llvm::Constant *addr) override;
328 
329   llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
330                                                 llvm::Value *Val);
331   void EmitThreadLocalInitFuncs(
332       CodeGenModule &CGM,
333       ArrayRef<const VarDecl *> CXXThreadLocals,
334       ArrayRef<llvm::Function *> CXXThreadLocalInits,
335       ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
336 
337   bool mayNeedDestruction(const VarDecl *VD) const {
338     if (VD->needsDestruction(getContext()))
339       return true;
340 
341     // If the variable has an incomplete class type (or array thereof), it
342     // might need destruction.
343     const Type *T = VD->getType()->getBaseElementTypeUnsafe();
344     if (T->getAs<RecordType>() && T->isIncompleteType())
345       return true;
346 
347     return false;
348   }
349 
350   /// Determine whether we will definitely emit this variable with a constant
351   /// initializer, either because the language semantics demand it or because
352   /// we know that the initializer is a constant.
353   // For weak definitions, any initializer available in the current translation
354   // is not necessarily reflective of the initializer used; such initializers
355   // are ignored unless if InspectInitForWeakDef is true.
356   bool
357   isEmittedWithConstantInitializer(const VarDecl *VD,
358                                    bool InspectInitForWeakDef = false) const {
359     VD = VD->getMostRecentDecl();
360     if (VD->hasAttr<ConstInitAttr>())
361       return true;
362 
363     // All later checks examine the initializer specified on the variable. If
364     // the variable is weak, such examination would not be correct.
365     if (!InspectInitForWeakDef &&
366         (VD->isWeak() || VD->hasAttr<SelectAnyAttr>()))
367       return false;
368 
369     const VarDecl *InitDecl = VD->getInitializingDeclaration();
370     if (!InitDecl)
371       return false;
372 
373     // If there's no initializer to run, this is constant initialization.
374     if (!InitDecl->hasInit())
375       return true;
376 
377     // If we have the only definition, we don't need a thread wrapper if we
378     // will emit the value as a constant.
379     if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
380       return !mayNeedDestruction(VD) && InitDecl->evaluateValue();
381 
382     // Otherwise, we need a thread wrapper unless we know that every
383     // translation unit will emit the value as a constant. We rely on the
384     // variable being constant-initialized in every translation unit if it's
385     // constant-initialized in any translation unit, which isn't actually
386     // guaranteed by the standard but is necessary for sanity.
387     return InitDecl->hasConstantInitialization();
388   }
389 
390   bool usesThreadWrapperFunction(const VarDecl *VD) const override {
391     return !isEmittedWithConstantInitializer(VD) ||
392            mayNeedDestruction(VD);
393   }
394   LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
395                                       QualType LValType) override;
396 
397   bool NeedsVTTParameter(GlobalDecl GD) override;
398 
399   /**************************** RTTI Uniqueness ******************************/
400 
401 protected:
402   /// Returns true if the ABI requires RTTI type_info objects to be unique
403   /// across a program.
404   virtual bool shouldRTTIBeUnique() const { return true; }
405 
406 public:
407   /// What sort of unique-RTTI behavior should we use?
408   enum RTTIUniquenessKind {
409     /// We are guaranteeing, or need to guarantee, that the RTTI string
410     /// is unique.
411     RUK_Unique,
412 
413     /// We are not guaranteeing uniqueness for the RTTI string, so we
414     /// can demote to hidden visibility but must use string comparisons.
415     RUK_NonUniqueHidden,
416 
417     /// We are not guaranteeing uniqueness for the RTTI string, so we
418     /// have to use string comparisons, but we also have to emit it with
419     /// non-hidden visibility.
420     RUK_NonUniqueVisible
421   };
422 
423   /// Return the required visibility status for the given type and linkage in
424   /// the current ABI.
425   RTTIUniquenessKind
426   classifyRTTIUniqueness(QualType CanTy,
427                          llvm::GlobalValue::LinkageTypes Linkage) const;
428   friend class ItaniumRTTIBuilder;
429 
430   void emitCXXStructor(GlobalDecl GD) override;
431 
432   std::pair<llvm::Value *, const CXXRecordDecl *>
433   LoadVTablePtr(CodeGenFunction &CGF, Address This,
434                 const CXXRecordDecl *RD) override;
435 
436  private:
437    bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
438      const auto &VtableLayout =
439          CGM.getItaniumVTableContext().getVTableLayout(RD);
440 
441      for (const auto &VtableComponent : VtableLayout.vtable_components()) {
442        // Skip empty slot.
443        if (!VtableComponent.isUsedFunctionPointerKind())
444          continue;
445 
446        const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
447        if (!Method->getCanonicalDecl()->isInlined())
448          continue;
449 
450        StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
451        auto *Entry = CGM.GetGlobalValue(Name);
452        // This checks if virtual inline function has already been emitted.
453        // Note that it is possible that this inline function would be emitted
454        // after trying to emit vtable speculatively. Because of this we do
455        // an extra pass after emitting all deferred vtables to find and emit
456        // these vtables opportunistically.
457        if (!Entry || Entry->isDeclaration())
458          return true;
459      }
460      return false;
461   }
462 
463   bool isVTableHidden(const CXXRecordDecl *RD) const {
464     const auto &VtableLayout =
465             CGM.getItaniumVTableContext().getVTableLayout(RD);
466 
467     for (const auto &VtableComponent : VtableLayout.vtable_components()) {
468       if (VtableComponent.isRTTIKind()) {
469         const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
470         if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
471           return true;
472       } else if (VtableComponent.isUsedFunctionPointerKind()) {
473         const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
474         if (Method->getVisibility() == Visibility::HiddenVisibility &&
475             !Method->isDefined())
476           return true;
477       }
478     }
479     return false;
480   }
481 };
482 
483 class ARMCXXABI : public ItaniumCXXABI {
484 public:
485   ARMCXXABI(CodeGen::CodeGenModule &CGM) :
486     ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
487                   /*UseARMGuardVarABI=*/true) {}
488 
489   bool HasThisReturn(GlobalDecl GD) const override {
490     return (isa<CXXConstructorDecl>(GD.getDecl()) || (
491               isa<CXXDestructorDecl>(GD.getDecl()) &&
492               GD.getDtorType() != Dtor_Deleting));
493   }
494 
495   void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
496                            QualType ResTy) override;
497 
498   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
499   Address InitializeArrayCookie(CodeGenFunction &CGF,
500                                 Address NewPtr,
501                                 llvm::Value *NumElements,
502                                 const CXXNewExpr *expr,
503                                 QualType ElementType) override;
504   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
505                                    CharUnits cookieSize) override;
506 };
507 
508 class AppleARM64CXXABI : public ARMCXXABI {
509 public:
510   AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
511     Use32BitVTableOffsetABI = true;
512   }
513 
514   // ARM64 libraries are prepared for non-unique RTTI.
515   bool shouldRTTIBeUnique() const override { return false; }
516 };
517 
518 class FuchsiaCXXABI final : public ItaniumCXXABI {
519 public:
520   explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
521       : ItaniumCXXABI(CGM) {}
522 
523 private:
524   bool HasThisReturn(GlobalDecl GD) const override {
525     return isa<CXXConstructorDecl>(GD.getDecl()) ||
526            (isa<CXXDestructorDecl>(GD.getDecl()) &&
527             GD.getDtorType() != Dtor_Deleting);
528   }
529 };
530 
531 class WebAssemblyCXXABI final : public ItaniumCXXABI {
532 public:
533   explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
534       : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
535                       /*UseARMGuardVarABI=*/true) {}
536   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
537   llvm::CallInst *
538   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
539                                       llvm::Value *Exn) override;
540 
541 private:
542   bool HasThisReturn(GlobalDecl GD) const override {
543     return isa<CXXConstructorDecl>(GD.getDecl()) ||
544            (isa<CXXDestructorDecl>(GD.getDecl()) &&
545             GD.getDtorType() != Dtor_Deleting);
546   }
547   bool canCallMismatchedFunctionType() const override { return false; }
548 };
549 
550 class XLCXXABI final : public ItaniumCXXABI {
551 public:
552   explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
553       : ItaniumCXXABI(CGM) {}
554 
555   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
556                           llvm::FunctionCallee dtor,
557                           llvm::Constant *addr) override;
558 
559   bool useSinitAndSterm() const override { return true; }
560 
561 private:
562   void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
563                              llvm::Constant *addr);
564 };
565 }
566 
567 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
568   switch (CGM.getContext().getCXXABIKind()) {
569   // For IR-generation purposes, there's no significant difference
570   // between the ARM and iOS ABIs.
571   case TargetCXXABI::GenericARM:
572   case TargetCXXABI::iOS:
573   case TargetCXXABI::WatchOS:
574     return new ARMCXXABI(CGM);
575 
576   case TargetCXXABI::AppleARM64:
577     return new AppleARM64CXXABI(CGM);
578 
579   case TargetCXXABI::Fuchsia:
580     return new FuchsiaCXXABI(CGM);
581 
582   // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
583   // include the other 32-bit ARM oddities: constructor/destructor return values
584   // and array cookies.
585   case TargetCXXABI::GenericAArch64:
586     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
587                              /*UseARMGuardVarABI=*/true);
588 
589   case TargetCXXABI::GenericMIPS:
590     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
591 
592   case TargetCXXABI::WebAssembly:
593     return new WebAssemblyCXXABI(CGM);
594 
595   case TargetCXXABI::XL:
596     return new XLCXXABI(CGM);
597 
598   case TargetCXXABI::GenericItanium:
599     if (CGM.getContext().getTargetInfo().getTriple().getArch()
600         == llvm::Triple::le32) {
601       // For PNaCl, use ARM-style method pointers so that PNaCl code
602       // does not assume anything about the alignment of function
603       // pointers.
604       return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
605     }
606     return new ItaniumCXXABI(CGM);
607 
608   case TargetCXXABI::Microsoft:
609     llvm_unreachable("Microsoft ABI is not Itanium-based");
610   }
611   llvm_unreachable("bad ABI kind");
612 }
613 
614 llvm::Type *
615 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
616   if (MPT->isMemberDataPointer())
617     return CGM.PtrDiffTy;
618   return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
619 }
620 
621 /// In the Itanium and ARM ABIs, method pointers have the form:
622 ///   struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
623 ///
624 /// In the Itanium ABI:
625 ///  - method pointers are virtual if (memptr.ptr & 1) is nonzero
626 ///  - the this-adjustment is (memptr.adj)
627 ///  - the virtual offset is (memptr.ptr - 1)
628 ///
629 /// In the ARM ABI:
630 ///  - method pointers are virtual if (memptr.adj & 1) is nonzero
631 ///  - the this-adjustment is (memptr.adj >> 1)
632 ///  - the virtual offset is (memptr.ptr)
633 /// ARM uses 'adj' for the virtual flag because Thumb functions
634 /// may be only single-byte aligned.
635 ///
636 /// If the member is virtual, the adjusted 'this' pointer points
637 /// to a vtable pointer from which the virtual offset is applied.
638 ///
639 /// If the member is non-virtual, memptr.ptr is the address of
640 /// the function to call.
641 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
642     CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
643     llvm::Value *&ThisPtrForCall,
644     llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
645   CGBuilderTy &Builder = CGF.Builder;
646 
647   const FunctionProtoType *FPT =
648     MPT->getPointeeType()->getAs<FunctionProtoType>();
649   auto *RD =
650       cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
651 
652   llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
653       CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
654 
655   llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
656 
657   llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
658   llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
659   llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
660 
661   // Extract memptr.adj, which is in the second field.
662   llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
663 
664   // Compute the true adjustment.
665   llvm::Value *Adj = RawAdj;
666   if (UseARMMethodPtrABI)
667     Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
668 
669   // Apply the adjustment and cast back to the original struct type
670   // for consistency.
671   llvm::Value *This = ThisAddr.getPointer();
672   llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
673   Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
674   This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
675   ThisPtrForCall = This;
676 
677   // Load the function pointer.
678   llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
679 
680   // If the LSB in the function pointer is 1, the function pointer points to
681   // a virtual function.
682   llvm::Value *IsVirtual;
683   if (UseARMMethodPtrABI)
684     IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
685   else
686     IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
687   IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
688   Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
689 
690   // In the virtual path, the adjustment left 'This' pointing to the
691   // vtable of the correct base subobject.  The "function pointer" is an
692   // offset within the vtable (+1 for the virtual flag on non-ARM).
693   CGF.EmitBlock(FnVirtual);
694 
695   // Cast the adjusted this to a pointer to vtable pointer and load.
696   llvm::Type *VTableTy = Builder.getInt8PtrTy();
697   CharUnits VTablePtrAlign =
698     CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
699                                       CGF.getPointerAlign());
700   llvm::Value *VTable =
701     CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
702 
703   // Apply the offset.
704   // On ARM64, to reserve extra space in virtual member function pointers,
705   // we only pay attention to the low 32 bits of the offset.
706   llvm::Value *VTableOffset = FnAsInt;
707   if (!UseARMMethodPtrABI)
708     VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
709   if (Use32BitVTableOffsetABI) {
710     VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
711     VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
712   }
713 
714   // Check the address of the function pointer if CFI on member function
715   // pointers is enabled.
716   llvm::Constant *CheckSourceLocation;
717   llvm::Constant *CheckTypeDesc;
718   bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
719                             CGM.HasHiddenLTOVisibility(RD);
720   bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
721                            CGM.HasHiddenLTOVisibility(RD);
722   bool ShouldEmitWPDInfo =
723       CGM.getCodeGenOpts().WholeProgramVTables &&
724       // Don't insert type tests if we are forcing public std visibility.
725       !CGM.HasLTOVisibilityPublicStd(RD);
726   llvm::Value *VirtualFn = nullptr;
727 
728   {
729     CodeGenFunction::SanitizerScope SanScope(&CGF);
730     llvm::Value *TypeId = nullptr;
731     llvm::Value *CheckResult = nullptr;
732 
733     if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
734       // If doing CFI, VFE or WPD, we will need the metadata node to check
735       // against.
736       llvm::Metadata *MD =
737           CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
738       TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
739     }
740 
741     if (ShouldEmitVFEInfo) {
742       llvm::Value *VFPAddr =
743           Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
744 
745       // If doing VFE, load from the vtable with a type.checked.load intrinsic
746       // call. Note that we use the GEP to calculate the address to load from
747       // and pass 0 as the offset to the intrinsic. This is because every
748       // vtable slot of the correct type is marked with matching metadata, and
749       // we know that the load must be from one of these slots.
750       llvm::Value *CheckedLoad = Builder.CreateCall(
751           CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
752           {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
753       CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
754       VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
755       VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
756                                         "memptr.virtualfn");
757     } else {
758       // When not doing VFE, emit a normal load, as it allows more
759       // optimisations than type.checked.load.
760       if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
761         llvm::Value *VFPAddr =
762             Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
763         CheckResult = Builder.CreateCall(
764             CGM.getIntrinsic(llvm::Intrinsic::type_test),
765             {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
766       }
767 
768       if (CGM.getItaniumVTableContext().isRelativeLayout()) {
769         VirtualFn = CGF.Builder.CreateCall(
770             CGM.getIntrinsic(llvm::Intrinsic::load_relative,
771                              {VTableOffset->getType()}),
772             {VTable, VTableOffset});
773         VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
774       } else {
775         llvm::Value *VFPAddr =
776             CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
777         VFPAddr = CGF.Builder.CreateBitCast(
778             VFPAddr, FTy->getPointerTo()->getPointerTo());
779         VirtualFn = CGF.Builder.CreateAlignedLoad(
780             FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
781             "memptr.virtualfn");
782       }
783     }
784     assert(VirtualFn && "Virtual fuction pointer not created!");
785     assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
786             CheckResult) &&
787            "Check result required but not created!");
788 
789     if (ShouldEmitCFICheck) {
790       // If doing CFI, emit the check.
791       CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
792       CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
793       llvm::Constant *StaticData[] = {
794           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
795           CheckSourceLocation,
796           CheckTypeDesc,
797       };
798 
799       if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
800         CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
801       } else {
802         llvm::Value *AllVtables = llvm::MetadataAsValue::get(
803             CGM.getLLVMContext(),
804             llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
805         llvm::Value *ValidVtable = Builder.CreateCall(
806             CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
807         CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
808                       SanitizerHandler::CFICheckFail, StaticData,
809                       {VTable, ValidVtable});
810       }
811 
812       FnVirtual = Builder.GetInsertBlock();
813     }
814   } // End of sanitizer scope
815 
816   CGF.EmitBranch(FnEnd);
817 
818   // In the non-virtual path, the function pointer is actually a
819   // function pointer.
820   CGF.EmitBlock(FnNonVirtual);
821   llvm::Value *NonVirtualFn =
822     Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
823 
824   // Check the function pointer if CFI on member function pointers is enabled.
825   if (ShouldEmitCFICheck) {
826     CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
827     if (RD->hasDefinition()) {
828       CodeGenFunction::SanitizerScope SanScope(&CGF);
829 
830       llvm::Constant *StaticData[] = {
831           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
832           CheckSourceLocation,
833           CheckTypeDesc,
834       };
835 
836       llvm::Value *Bit = Builder.getFalse();
837       llvm::Value *CastedNonVirtualFn =
838           Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
839       for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
840         llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
841             getContext().getMemberPointerType(
842                 MPT->getPointeeType(),
843                 getContext().getRecordType(Base).getTypePtr()));
844         llvm::Value *TypeId =
845             llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
846 
847         llvm::Value *TypeTest =
848             Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
849                                {CastedNonVirtualFn, TypeId});
850         Bit = Builder.CreateOr(Bit, TypeTest);
851       }
852 
853       CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
854                     SanitizerHandler::CFICheckFail, StaticData,
855                     {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
856 
857       FnNonVirtual = Builder.GetInsertBlock();
858     }
859   }
860 
861   // We're done.
862   CGF.EmitBlock(FnEnd);
863   llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
864   CalleePtr->addIncoming(VirtualFn, FnVirtual);
865   CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
866 
867   CGCallee Callee(FPT, CalleePtr);
868   return Callee;
869 }
870 
871 /// Compute an l-value by applying the given pointer-to-member to a
872 /// base object.
873 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
874     CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
875     const MemberPointerType *MPT) {
876   assert(MemPtr->getType() == CGM.PtrDiffTy);
877 
878   CGBuilderTy &Builder = CGF.Builder;
879 
880   // Cast to char*.
881   Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
882 
883   // Apply the offset, which we assume is non-null.
884   llvm::Value *Addr = Builder.CreateInBoundsGEP(
885       Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
886 
887   // Cast the address to the appropriate pointer type, adopting the
888   // address space of the base pointer.
889   llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
890                             ->getPointerTo(Base.getAddressSpace());
891   return Builder.CreateBitCast(Addr, PType);
892 }
893 
894 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
895 /// conversion.
896 ///
897 /// Bitcast conversions are always a no-op under Itanium.
898 ///
899 /// Obligatory offset/adjustment diagram:
900 ///         <-- offset -->          <-- adjustment -->
901 ///   |--------------------------|----------------------|--------------------|
902 ///   ^Derived address point     ^Base address point    ^Member address point
903 ///
904 /// So when converting a base member pointer to a derived member pointer,
905 /// we add the offset to the adjustment because the address point has
906 /// decreased;  and conversely, when converting a derived MP to a base MP
907 /// we subtract the offset from the adjustment because the address point
908 /// has increased.
909 ///
910 /// The standard forbids (at compile time) conversion to and from
911 /// virtual bases, which is why we don't have to consider them here.
912 ///
913 /// The standard forbids (at run time) casting a derived MP to a base
914 /// MP when the derived MP does not point to a member of the base.
915 /// This is why -1 is a reasonable choice for null data member
916 /// pointers.
917 llvm::Value *
918 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
919                                            const CastExpr *E,
920                                            llvm::Value *src) {
921   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
922          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
923          E->getCastKind() == CK_ReinterpretMemberPointer);
924 
925   // Under Itanium, reinterprets don't require any additional processing.
926   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
927 
928   // Use constant emission if we can.
929   if (isa<llvm::Constant>(src))
930     return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
931 
932   llvm::Constant *adj = getMemberPointerAdjustment(E);
933   if (!adj) return src;
934 
935   CGBuilderTy &Builder = CGF.Builder;
936   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
937 
938   const MemberPointerType *destTy =
939     E->getType()->castAs<MemberPointerType>();
940 
941   // For member data pointers, this is just a matter of adding the
942   // offset if the source is non-null.
943   if (destTy->isMemberDataPointer()) {
944     llvm::Value *dst;
945     if (isDerivedToBase)
946       dst = Builder.CreateNSWSub(src, adj, "adj");
947     else
948       dst = Builder.CreateNSWAdd(src, adj, "adj");
949 
950     // Null check.
951     llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
952     llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
953     return Builder.CreateSelect(isNull, src, dst);
954   }
955 
956   // The this-adjustment is left-shifted by 1 on ARM.
957   if (UseARMMethodPtrABI) {
958     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
959     offset <<= 1;
960     adj = llvm::ConstantInt::get(adj->getType(), offset);
961   }
962 
963   llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
964   llvm::Value *dstAdj;
965   if (isDerivedToBase)
966     dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
967   else
968     dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
969 
970   return Builder.CreateInsertValue(src, dstAdj, 1);
971 }
972 
973 llvm::Constant *
974 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
975                                            llvm::Constant *src) {
976   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
977          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
978          E->getCastKind() == CK_ReinterpretMemberPointer);
979 
980   // Under Itanium, reinterprets don't require any additional processing.
981   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
982 
983   // If the adjustment is trivial, we don't need to do anything.
984   llvm::Constant *adj = getMemberPointerAdjustment(E);
985   if (!adj) return src;
986 
987   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
988 
989   const MemberPointerType *destTy =
990     E->getType()->castAs<MemberPointerType>();
991 
992   // For member data pointers, this is just a matter of adding the
993   // offset if the source is non-null.
994   if (destTy->isMemberDataPointer()) {
995     // null maps to null.
996     if (src->isAllOnesValue()) return src;
997 
998     if (isDerivedToBase)
999       return llvm::ConstantExpr::getNSWSub(src, adj);
1000     else
1001       return llvm::ConstantExpr::getNSWAdd(src, adj);
1002   }
1003 
1004   // The this-adjustment is left-shifted by 1 on ARM.
1005   if (UseARMMethodPtrABI) {
1006     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
1007     offset <<= 1;
1008     adj = llvm::ConstantInt::get(adj->getType(), offset);
1009   }
1010 
1011   llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
1012   llvm::Constant *dstAdj;
1013   if (isDerivedToBase)
1014     dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
1015   else
1016     dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
1017 
1018   return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
1019 }
1020 
1021 llvm::Constant *
1022 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
1023   // Itanium C++ ABI 2.3:
1024   //   A NULL pointer is represented as -1.
1025   if (MPT->isMemberDataPointer())
1026     return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
1027 
1028   llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
1029   llvm::Constant *Values[2] = { Zero, Zero };
1030   return llvm::ConstantStruct::getAnon(Values);
1031 }
1032 
1033 llvm::Constant *
1034 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1035                                      CharUnits offset) {
1036   // Itanium C++ ABI 2.3:
1037   //   A pointer to data member is an offset from the base address of
1038   //   the class object containing it, represented as a ptrdiff_t
1039   return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
1040 }
1041 
1042 llvm::Constant *
1043 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1044   return BuildMemberPointer(MD, CharUnits::Zero());
1045 }
1046 
1047 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1048                                                   CharUnits ThisAdjustment) {
1049   assert(MD->isInstance() && "Member function must not be static!");
1050 
1051   CodeGenTypes &Types = CGM.getTypes();
1052 
1053   // Get the function pointer (or index if this is a virtual function).
1054   llvm::Constant *MemPtr[2];
1055   if (MD->isVirtual()) {
1056     uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1057     uint64_t VTableOffset;
1058     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1059       // Multiply by 4-byte relative offsets.
1060       VTableOffset = Index * 4;
1061     } else {
1062       const ASTContext &Context = getContext();
1063       CharUnits PointerWidth = Context.toCharUnitsFromBits(
1064           Context.getTargetInfo().getPointerWidth(0));
1065       VTableOffset = Index * PointerWidth.getQuantity();
1066     }
1067 
1068     if (UseARMMethodPtrABI) {
1069       // ARM C++ ABI 3.2.1:
1070       //   This ABI specifies that adj contains twice the this
1071       //   adjustment, plus 1 if the member function is virtual. The
1072       //   least significant bit of adj then makes exactly the same
1073       //   discrimination as the least significant bit of ptr does for
1074       //   Itanium.
1075       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1076       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1077                                          2 * ThisAdjustment.getQuantity() + 1);
1078     } else {
1079       // Itanium C++ ABI 2.3:
1080       //   For a virtual function, [the pointer field] is 1 plus the
1081       //   virtual table offset (in bytes) of the function,
1082       //   represented as a ptrdiff_t.
1083       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1084       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1085                                          ThisAdjustment.getQuantity());
1086     }
1087   } else {
1088     const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1089     llvm::Type *Ty;
1090     // Check whether the function has a computable LLVM signature.
1091     if (Types.isFuncTypeConvertible(FPT)) {
1092       // The function has a computable LLVM signature; use the correct type.
1093       Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1094     } else {
1095       // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1096       // function type is incomplete.
1097       Ty = CGM.PtrDiffTy;
1098     }
1099     llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1100 
1101     MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1102     MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1103                                        (UseARMMethodPtrABI ? 2 : 1) *
1104                                        ThisAdjustment.getQuantity());
1105   }
1106 
1107   return llvm::ConstantStruct::getAnon(MemPtr);
1108 }
1109 
1110 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1111                                                  QualType MPType) {
1112   const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1113   const ValueDecl *MPD = MP.getMemberPointerDecl();
1114   if (!MPD)
1115     return EmitNullMemberPointer(MPT);
1116 
1117   CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1118 
1119   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1120     return BuildMemberPointer(MD, ThisAdjustment);
1121 
1122   CharUnits FieldOffset =
1123     getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1124   return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1125 }
1126 
1127 /// The comparison algorithm is pretty easy: the member pointers are
1128 /// the same if they're either bitwise identical *or* both null.
1129 ///
1130 /// ARM is different here only because null-ness is more complicated.
1131 llvm::Value *
1132 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1133                                            llvm::Value *L,
1134                                            llvm::Value *R,
1135                                            const MemberPointerType *MPT,
1136                                            bool Inequality) {
1137   CGBuilderTy &Builder = CGF.Builder;
1138 
1139   llvm::ICmpInst::Predicate Eq;
1140   llvm::Instruction::BinaryOps And, Or;
1141   if (Inequality) {
1142     Eq = llvm::ICmpInst::ICMP_NE;
1143     And = llvm::Instruction::Or;
1144     Or = llvm::Instruction::And;
1145   } else {
1146     Eq = llvm::ICmpInst::ICMP_EQ;
1147     And = llvm::Instruction::And;
1148     Or = llvm::Instruction::Or;
1149   }
1150 
1151   // Member data pointers are easy because there's a unique null
1152   // value, so it just comes down to bitwise equality.
1153   if (MPT->isMemberDataPointer())
1154     return Builder.CreateICmp(Eq, L, R);
1155 
1156   // For member function pointers, the tautologies are more complex.
1157   // The Itanium tautology is:
1158   //   (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1159   // The ARM tautology is:
1160   //   (L == R) <==> (L.ptr == R.ptr &&
1161   //                  (L.adj == R.adj ||
1162   //                   (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1163   // The inequality tautologies have exactly the same structure, except
1164   // applying De Morgan's laws.
1165 
1166   llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1167   llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1168 
1169   // This condition tests whether L.ptr == R.ptr.  This must always be
1170   // true for equality to hold.
1171   llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1172 
1173   // This condition, together with the assumption that L.ptr == R.ptr,
1174   // tests whether the pointers are both null.  ARM imposes an extra
1175   // condition.
1176   llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1177   llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1178 
1179   // This condition tests whether L.adj == R.adj.  If this isn't
1180   // true, the pointers are unequal unless they're both null.
1181   llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1182   llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1183   llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1184 
1185   // Null member function pointers on ARM clear the low bit of Adj,
1186   // so the zero condition has to check that neither low bit is set.
1187   if (UseARMMethodPtrABI) {
1188     llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1189 
1190     // Compute (l.adj | r.adj) & 1 and test it against zero.
1191     llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1192     llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1193     llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1194                                                       "cmp.or.adj");
1195     EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1196   }
1197 
1198   // Tie together all our conditions.
1199   llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1200   Result = Builder.CreateBinOp(And, PtrEq, Result,
1201                                Inequality ? "memptr.ne" : "memptr.eq");
1202   return Result;
1203 }
1204 
1205 llvm::Value *
1206 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1207                                           llvm::Value *MemPtr,
1208                                           const MemberPointerType *MPT) {
1209   CGBuilderTy &Builder = CGF.Builder;
1210 
1211   /// For member data pointers, this is just a check against -1.
1212   if (MPT->isMemberDataPointer()) {
1213     assert(MemPtr->getType() == CGM.PtrDiffTy);
1214     llvm::Value *NegativeOne =
1215       llvm::Constant::getAllOnesValue(MemPtr->getType());
1216     return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1217   }
1218 
1219   // In Itanium, a member function pointer is not null if 'ptr' is not null.
1220   llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1221 
1222   llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1223   llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1224 
1225   // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1226   // (the virtual bit) is set.
1227   if (UseARMMethodPtrABI) {
1228     llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1229     llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1230     llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1231     llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1232                                                   "memptr.isvirtual");
1233     Result = Builder.CreateOr(Result, IsVirtual);
1234   }
1235 
1236   return Result;
1237 }
1238 
1239 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1240   const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1241   if (!RD)
1242     return false;
1243 
1244   // If C++ prohibits us from making a copy, return by address.
1245   if (!RD->canPassInRegisters()) {
1246     auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1247     FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1248     return true;
1249   }
1250   return false;
1251 }
1252 
1253 /// The Itanium ABI requires non-zero initialization only for data
1254 /// member pointers, for which '0' is a valid offset.
1255 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1256   return MPT->isMemberFunctionPointer();
1257 }
1258 
1259 /// The Itanium ABI always places an offset to the complete object
1260 /// at entry -2 in the vtable.
1261 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1262                                             const CXXDeleteExpr *DE,
1263                                             Address Ptr,
1264                                             QualType ElementType,
1265                                             const CXXDestructorDecl *Dtor) {
1266   bool UseGlobalDelete = DE->isGlobalDelete();
1267   if (UseGlobalDelete) {
1268     // Derive the complete-object pointer, which is what we need
1269     // to pass to the deallocation function.
1270 
1271     // Grab the vtable pointer as an intptr_t*.
1272     auto *ClassDecl =
1273         cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1274     llvm::Value *VTable =
1275         CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1276 
1277     // Track back to entry -2 and pull out the offset there.
1278     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1279         CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1280     llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr,                                                        CGF.getPointerAlign());
1281 
1282     // Apply the offset.
1283     llvm::Value *CompletePtr =
1284       CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1285     CompletePtr =
1286         CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1287 
1288     // If we're supposed to call the global delete, make sure we do so
1289     // even if the destructor throws.
1290     CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1291                                     ElementType);
1292   }
1293 
1294   // FIXME: Provide a source location here even though there's no
1295   // CXXMemberCallExpr for dtor call.
1296   CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1297   EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1298 
1299   if (UseGlobalDelete)
1300     CGF.PopCleanupBlock();
1301 }
1302 
1303 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1304   // void __cxa_rethrow();
1305 
1306   llvm::FunctionType *FTy =
1307     llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1308 
1309   llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1310 
1311   if (isNoReturn)
1312     CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1313   else
1314     CGF.EmitRuntimeCallOrInvoke(Fn);
1315 }
1316 
1317 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1318   // void *__cxa_allocate_exception(size_t thrown_size);
1319 
1320   llvm::FunctionType *FTy =
1321     llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1322 
1323   return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1324 }
1325 
1326 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1327   // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1328   //                  void (*dest) (void *));
1329 
1330   llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1331   llvm::FunctionType *FTy =
1332     llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1333 
1334   return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1335 }
1336 
1337 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1338   QualType ThrowType = E->getSubExpr()->getType();
1339   // Now allocate the exception object.
1340   llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1341   uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1342 
1343   llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1344   llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1345       AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1346 
1347   CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1348   CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1349 
1350   // Now throw the exception.
1351   llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1352                                                          /*ForEH=*/true);
1353 
1354   // The address of the destructor.  If the exception type has a
1355   // trivial destructor (or isn't a record), we just pass null.
1356   llvm::Constant *Dtor = nullptr;
1357   if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1358     CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1359     if (!Record->hasTrivialDestructor()) {
1360       CXXDestructorDecl *DtorD = Record->getDestructor();
1361       Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1362       Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1363     }
1364   }
1365   if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1366 
1367   llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1368   CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1369 }
1370 
1371 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1372   // void *__dynamic_cast(const void *sub,
1373   //                      const abi::__class_type_info *src,
1374   //                      const abi::__class_type_info *dst,
1375   //                      std::ptrdiff_t src2dst_offset);
1376 
1377   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1378   llvm::Type *PtrDiffTy =
1379     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1380 
1381   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1382 
1383   llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1384 
1385   // Mark the function as nounwind readonly.
1386   llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1387                                             llvm::Attribute::ReadOnly };
1388   llvm::AttributeList Attrs = llvm::AttributeList::get(
1389       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1390 
1391   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1392 }
1393 
1394 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1395   // void __cxa_bad_cast();
1396   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1397   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1398 }
1399 
1400 /// Compute the src2dst_offset hint as described in the
1401 /// Itanium C++ ABI [2.9.7]
1402 static CharUnits computeOffsetHint(ASTContext &Context,
1403                                    const CXXRecordDecl *Src,
1404                                    const CXXRecordDecl *Dst) {
1405   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1406                      /*DetectVirtual=*/false);
1407 
1408   // If Dst is not derived from Src we can skip the whole computation below and
1409   // return that Src is not a public base of Dst.  Record all inheritance paths.
1410   if (!Dst->isDerivedFrom(Src, Paths))
1411     return CharUnits::fromQuantity(-2ULL);
1412 
1413   unsigned NumPublicPaths = 0;
1414   CharUnits Offset;
1415 
1416   // Now walk all possible inheritance paths.
1417   for (const CXXBasePath &Path : Paths) {
1418     if (Path.Access != AS_public)  // Ignore non-public inheritance.
1419       continue;
1420 
1421     ++NumPublicPaths;
1422 
1423     for (const CXXBasePathElement &PathElement : Path) {
1424       // If the path contains a virtual base class we can't give any hint.
1425       // -1: no hint.
1426       if (PathElement.Base->isVirtual())
1427         return CharUnits::fromQuantity(-1ULL);
1428 
1429       if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1430         continue;
1431 
1432       // Accumulate the base class offsets.
1433       const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1434       Offset += L.getBaseClassOffset(
1435           PathElement.Base->getType()->getAsCXXRecordDecl());
1436     }
1437   }
1438 
1439   // -2: Src is not a public base of Dst.
1440   if (NumPublicPaths == 0)
1441     return CharUnits::fromQuantity(-2ULL);
1442 
1443   // -3: Src is a multiple public base type but never a virtual base type.
1444   if (NumPublicPaths > 1)
1445     return CharUnits::fromQuantity(-3ULL);
1446 
1447   // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1448   // Return the offset of Src from the origin of Dst.
1449   return Offset;
1450 }
1451 
1452 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1453   // void __cxa_bad_typeid();
1454   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1455 
1456   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1457 }
1458 
1459 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1460                                               QualType SrcRecordTy) {
1461   return IsDeref;
1462 }
1463 
1464 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1465   llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1466   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1467   Call->setDoesNotReturn();
1468   CGF.Builder.CreateUnreachable();
1469 }
1470 
1471 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1472                                        QualType SrcRecordTy,
1473                                        Address ThisPtr,
1474                                        llvm::Type *StdTypeInfoPtrTy) {
1475   auto *ClassDecl =
1476       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1477   llvm::Value *Value =
1478       CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1479 
1480   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1481     // Load the type info.
1482     Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1483     Value = CGF.Builder.CreateCall(
1484         CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1485         {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1486 
1487     // Setup to dereference again since this is a proxy we accessed.
1488     Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
1489   } else {
1490     // Load the type info.
1491     Value =
1492         CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1493   }
1494   return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1495                                        CGF.getPointerAlign());
1496 }
1497 
1498 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1499                                                        QualType SrcRecordTy) {
1500   return SrcIsPtr;
1501 }
1502 
1503 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1504     CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1505     QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1506   llvm::Type *PtrDiffLTy =
1507       CGF.ConvertType(CGF.getContext().getPointerDiffType());
1508   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1509 
1510   llvm::Value *SrcRTTI =
1511       CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1512   llvm::Value *DestRTTI =
1513       CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1514 
1515   // Compute the offset hint.
1516   const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1517   const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1518   llvm::Value *OffsetHint = llvm::ConstantInt::get(
1519       PtrDiffLTy,
1520       computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1521 
1522   // Emit the call to __dynamic_cast.
1523   llvm::Value *Value = ThisAddr.getPointer();
1524   Value = CGF.EmitCastToVoidPtr(Value);
1525 
1526   llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1527   Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1528   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1529 
1530   /// C++ [expr.dynamic.cast]p9:
1531   ///   A failed cast to reference type throws std::bad_cast
1532   if (DestTy->isReferenceType()) {
1533     llvm::BasicBlock *BadCastBlock =
1534         CGF.createBasicBlock("dynamic_cast.bad_cast");
1535 
1536     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1537     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1538 
1539     CGF.EmitBlock(BadCastBlock);
1540     EmitBadCastCall(CGF);
1541   }
1542 
1543   return Value;
1544 }
1545 
1546 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1547                                                   Address ThisAddr,
1548                                                   QualType SrcRecordTy,
1549                                                   QualType DestTy) {
1550   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1551   auto *ClassDecl =
1552       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1553   llvm::Value *OffsetToTop;
1554   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1555     // Get the vtable pointer.
1556     llvm::Value *VTable =
1557         CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
1558 
1559     // Get the offset-to-top from the vtable.
1560     OffsetToTop =
1561         CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1562     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1563         CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1564   } else {
1565     llvm::Type *PtrDiffLTy =
1566         CGF.ConvertType(CGF.getContext().getPointerDiffType());
1567 
1568     // Get the vtable pointer.
1569     llvm::Value *VTable =
1570         CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
1571 
1572     // Get the offset-to-top from the vtable.
1573     OffsetToTop =
1574         CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1575     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1576         PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1577   }
1578   // Finally, add the offset to the pointer.
1579   llvm::Value *Value = ThisAddr.getPointer();
1580   Value = CGF.EmitCastToVoidPtr(Value);
1581   Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
1582   return CGF.Builder.CreateBitCast(Value, DestLTy);
1583 }
1584 
1585 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1586   llvm::FunctionCallee Fn = getBadCastFn(CGF);
1587   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1588   Call->setDoesNotReturn();
1589   CGF.Builder.CreateUnreachable();
1590   return true;
1591 }
1592 
1593 llvm::Value *
1594 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1595                                          Address This,
1596                                          const CXXRecordDecl *ClassDecl,
1597                                          const CXXRecordDecl *BaseClassDecl) {
1598   llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1599   CharUnits VBaseOffsetOffset =
1600       CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1601                                                                BaseClassDecl);
1602   llvm::Value *VBaseOffsetPtr =
1603     CGF.Builder.CreateConstGEP1_64(
1604         CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1605         "vbase.offset.ptr");
1606 
1607   llvm::Value *VBaseOffset;
1608   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1609     VBaseOffsetPtr =
1610         CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
1611     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1612         CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1613         "vbase.offset");
1614   } else {
1615     VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1616                                                CGM.PtrDiffTy->getPointerTo());
1617     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1618         CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1619   }
1620   return VBaseOffset;
1621 }
1622 
1623 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1624   // Just make sure we're in sync with TargetCXXABI.
1625   assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1626 
1627   // The constructor used for constructing this as a base class;
1628   // ignores virtual bases.
1629   CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1630 
1631   // The constructor used for constructing this as a complete class;
1632   // constructs the virtual bases, then calls the base constructor.
1633   if (!D->getParent()->isAbstract()) {
1634     // We don't need to emit the complete ctor if the class is abstract.
1635     CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1636   }
1637 }
1638 
1639 CGCXXABI::AddedStructorArgCounts
1640 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1641                                       SmallVectorImpl<CanQualType> &ArgTys) {
1642   ASTContext &Context = getContext();
1643 
1644   // All parameters are already in place except VTT, which goes after 'this'.
1645   // These are Clang types, so we don't need to worry about sret yet.
1646 
1647   // Check if we need to add a VTT parameter (which has type void **).
1648   if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1649                                              : GD.getDtorType() == Dtor_Base) &&
1650       cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1651     ArgTys.insert(ArgTys.begin() + 1,
1652                   Context.getPointerType(Context.VoidPtrTy));
1653     return AddedStructorArgCounts::prefix(1);
1654   }
1655   return AddedStructorArgCounts{};
1656 }
1657 
1658 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1659   // The destructor used for destructing this as a base class; ignores
1660   // virtual bases.
1661   CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1662 
1663   // The destructor used for destructing this as a most-derived class;
1664   // call the base destructor and then destructs any virtual bases.
1665   CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1666 
1667   // The destructor in a virtual table is always a 'deleting'
1668   // destructor, which calls the complete destructor and then uses the
1669   // appropriate operator delete.
1670   if (D->isVirtual())
1671     CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1672 }
1673 
1674 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1675                                               QualType &ResTy,
1676                                               FunctionArgList &Params) {
1677   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1678   assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1679 
1680   // Check if we need a VTT parameter as well.
1681   if (NeedsVTTParameter(CGF.CurGD)) {
1682     ASTContext &Context = getContext();
1683 
1684     // FIXME: avoid the fake decl
1685     QualType T = Context.getPointerType(Context.VoidPtrTy);
1686     auto *VTTDecl = ImplicitParamDecl::Create(
1687         Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1688         T, ImplicitParamDecl::CXXVTT);
1689     Params.insert(Params.begin() + 1, VTTDecl);
1690     getStructorImplicitParamDecl(CGF) = VTTDecl;
1691   }
1692 }
1693 
1694 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1695   // Naked functions have no prolog.
1696   if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1697     return;
1698 
1699   /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1700   /// adjustments are required, because they are all handled by thunks.
1701   setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1702 
1703   /// Initialize the 'vtt' slot if needed.
1704   if (getStructorImplicitParamDecl(CGF)) {
1705     getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1706         CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1707   }
1708 
1709   /// If this is a function that the ABI specifies returns 'this', initialize
1710   /// the return slot to 'this' at the start of the function.
1711   ///
1712   /// Unlike the setting of return types, this is done within the ABI
1713   /// implementation instead of by clients of CGCXXABI because:
1714   /// 1) getThisValue is currently protected
1715   /// 2) in theory, an ABI could implement 'this' returns some other way;
1716   ///    HasThisReturn only specifies a contract, not the implementation
1717   if (HasThisReturn(CGF.CurGD))
1718     CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1719 }
1720 
1721 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1722     CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1723     bool ForVirtualBase, bool Delegating) {
1724   if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1725     return AddedStructorArgs{};
1726 
1727   // Insert the implicit 'vtt' argument as the second argument.
1728   llvm::Value *VTT =
1729       CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1730   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1731   return AddedStructorArgs::prefix({{VTT, VTTTy}});
1732 }
1733 
1734 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1735     CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1736     bool ForVirtualBase, bool Delegating) {
1737   GlobalDecl GD(DD, Type);
1738   return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1739 }
1740 
1741 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1742                                        const CXXDestructorDecl *DD,
1743                                        CXXDtorType Type, bool ForVirtualBase,
1744                                        bool Delegating, Address This,
1745                                        QualType ThisTy) {
1746   GlobalDecl GD(DD, Type);
1747   llvm::Value *VTT =
1748       getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1749   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1750 
1751   CGCallee Callee;
1752   if (getContext().getLangOpts().AppleKext &&
1753       Type != Dtor_Base && DD->isVirtual())
1754     Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1755   else
1756     Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1757 
1758   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1759                             nullptr);
1760 }
1761 
1762 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1763                                           const CXXRecordDecl *RD) {
1764   llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1765   if (VTable->hasInitializer())
1766     return;
1767 
1768   ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1769   const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1770   llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1771   llvm::Constant *RTTI =
1772       CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1773 
1774   // Create and set the initializer.
1775   ConstantInitBuilder builder(CGM);
1776   auto components = builder.beginStruct();
1777   CGVT.createVTableInitializer(components, VTLayout, RTTI,
1778                                llvm::GlobalValue::isLocalLinkage(Linkage));
1779   components.finishAndSetAsInitializer(VTable);
1780 
1781   // Set the correct linkage.
1782   VTable->setLinkage(Linkage);
1783 
1784   if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1785     VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1786 
1787   // Set the right visibility.
1788   CGM.setGVProperties(VTable, RD);
1789 
1790   // If this is the magic class __cxxabiv1::__fundamental_type_info,
1791   // we will emit the typeinfo for the fundamental types. This is the
1792   // same behaviour as GCC.
1793   const DeclContext *DC = RD->getDeclContext();
1794   if (RD->getIdentifier() &&
1795       RD->getIdentifier()->isStr("__fundamental_type_info") &&
1796       isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1797       cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1798       DC->getParent()->isTranslationUnit())
1799     EmitFundamentalRTTIDescriptors(RD);
1800 
1801   // Always emit type metadata on non-available_externally definitions, and on
1802   // available_externally definitions if we are performing whole program
1803   // devirtualization. For WPD we need the type metadata on all vtable
1804   // definitions to ensure we associate derived classes with base classes
1805   // defined in headers but with a strong definition only in a shared library.
1806   if (!VTable->isDeclarationForLinker() ||
1807       CGM.getCodeGenOpts().WholeProgramVTables) {
1808     CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1809     // For available_externally definitions, add the vtable to
1810     // @llvm.compiler.used so that it isn't deleted before whole program
1811     // analysis.
1812     if (VTable->isDeclarationForLinker()) {
1813       assert(CGM.getCodeGenOpts().WholeProgramVTables);
1814       CGM.addCompilerUsedGlobal(VTable);
1815     }
1816   }
1817 
1818   if (VTContext.isRelativeLayout() && !VTable->isDSOLocal())
1819     CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1820 }
1821 
1822 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1823     CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1824   if (Vptr.NearestVBase == nullptr)
1825     return false;
1826   return NeedsVTTParameter(CGF.CurGD);
1827 }
1828 
1829 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1830     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1831     const CXXRecordDecl *NearestVBase) {
1832 
1833   if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1834       NeedsVTTParameter(CGF.CurGD)) {
1835     return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1836                                                   NearestVBase);
1837   }
1838   return getVTableAddressPoint(Base, VTableClass);
1839 }
1840 
1841 llvm::Constant *
1842 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1843                                      const CXXRecordDecl *VTableClass) {
1844   llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1845 
1846   // Find the appropriate vtable within the vtable group, and the address point
1847   // within that vtable.
1848   VTableLayout::AddressPointLocation AddressPoint =
1849       CGM.getItaniumVTableContext()
1850           .getVTableLayout(VTableClass)
1851           .getAddressPoint(Base);
1852   llvm::Value *Indices[] = {
1853     llvm::ConstantInt::get(CGM.Int32Ty, 0),
1854     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1855     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1856   };
1857 
1858   return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1859                                               Indices, /*InBounds=*/true,
1860                                               /*InRangeIndex=*/1);
1861 }
1862 
1863 // Check whether all the non-inline virtual methods for the class have the
1864 // specified attribute.
1865 template <typename T>
1866 static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
1867   bool FoundNonInlineVirtualMethodWithAttr = false;
1868   for (const auto *D : RD->noload_decls()) {
1869     if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1870       if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
1871           FD->doesThisDeclarationHaveABody())
1872         continue;
1873       if (!D->hasAttr<T>())
1874         return false;
1875       FoundNonInlineVirtualMethodWithAttr = true;
1876     }
1877   }
1878 
1879   // We didn't find any non-inline virtual methods missing the attribute.  We
1880   // will return true when we found at least one non-inline virtual with the
1881   // attribute.  (This lets our caller know that the attribute needs to be
1882   // propagated up to the vtable.)
1883   return FoundNonInlineVirtualMethodWithAttr;
1884 }
1885 
1886 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1887     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1888     const CXXRecordDecl *NearestVBase) {
1889   assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1890          NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1891 
1892   // Get the secondary vpointer index.
1893   uint64_t VirtualPointerIndex =
1894       CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1895 
1896   /// Load the VTT.
1897   llvm::Value *VTT = CGF.LoadCXXVTT();
1898   if (VirtualPointerIndex)
1899     VTT = CGF.Builder.CreateConstInBoundsGEP1_64(
1900         CGF.VoidPtrTy, VTT, VirtualPointerIndex);
1901 
1902   // And load the address point from the VTT.
1903   return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
1904                                        CGF.getPointerAlign());
1905 }
1906 
1907 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1908     BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1909   return getVTableAddressPoint(Base, VTableClass);
1910 }
1911 
1912 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1913                                                      CharUnits VPtrOffset) {
1914   assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1915 
1916   llvm::GlobalVariable *&VTable = VTables[RD];
1917   if (VTable)
1918     return VTable;
1919 
1920   // Queue up this vtable for possible deferred emission.
1921   CGM.addDeferredVTable(RD);
1922 
1923   SmallString<256> Name;
1924   llvm::raw_svector_ostream Out(Name);
1925   getMangleContext().mangleCXXVTable(RD, Out);
1926 
1927   const VTableLayout &VTLayout =
1928       CGM.getItaniumVTableContext().getVTableLayout(RD);
1929   llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1930 
1931   // Use pointer alignment for the vtable. Otherwise we would align them based
1932   // on the size of the initializer which doesn't make sense as only single
1933   // values are read.
1934   unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1935                         ? 32
1936                         : CGM.getTarget().getPointerAlign(0);
1937 
1938   VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1939       Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1940       getContext().toCharUnitsFromBits(PAlign).getQuantity());
1941   VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1942 
1943   // In MS C++ if you have a class with virtual functions in which you are using
1944   // selective member import/export, then all virtual functions must be exported
1945   // unless they are inline, otherwise a link error will result. To match this
1946   // behavior, for such classes, we dllimport the vtable if it is defined
1947   // externally and all the non-inline virtual methods are marked dllimport, and
1948   // we dllexport the vtable if it is defined in this TU and all the non-inline
1949   // virtual methods are marked dllexport.
1950   if (CGM.getTarget().hasPS4DLLImportExport()) {
1951     if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
1952       if (CGM.getVTables().isVTableExternal(RD)) {
1953         if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
1954           VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1955       } else {
1956         if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
1957           VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1958       }
1959     }
1960   }
1961   CGM.setGVProperties(VTable, RD);
1962 
1963   return VTable;
1964 }
1965 
1966 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1967                                                   GlobalDecl GD,
1968                                                   Address This,
1969                                                   llvm::Type *Ty,
1970                                                   SourceLocation Loc) {
1971   llvm::Type *TyPtr = Ty->getPointerTo();
1972   auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1973   llvm::Value *VTable = CGF.GetVTablePtr(
1974       This, TyPtr->getPointerTo(), MethodDecl->getParent());
1975 
1976   uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1977   llvm::Value *VFunc;
1978   if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1979     VFunc = CGF.EmitVTableTypeCheckedLoad(
1980         MethodDecl->getParent(), VTable,
1981         VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1982   } else {
1983     CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1984 
1985     llvm::Value *VFuncLoad;
1986     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1987       VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
1988       llvm::Value *Load = CGF.Builder.CreateCall(
1989           CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1990           {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
1991       VFuncLoad = CGF.Builder.CreateBitCast(Load, TyPtr);
1992     } else {
1993       VTable =
1994           CGF.Builder.CreateBitCast(VTable, TyPtr->getPointerTo());
1995       llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1996           TyPtr, VTable, VTableIndex, "vfn");
1997       VFuncLoad =
1998           CGF.Builder.CreateAlignedLoad(TyPtr, VTableSlotPtr,
1999                                         CGF.getPointerAlign());
2000     }
2001 
2002     // Add !invariant.load md to virtual function load to indicate that
2003     // function didn't change inside vtable.
2004     // It's safe to add it without -fstrict-vtable-pointers, but it would not
2005     // help in devirtualization because it will only matter if we will have 2
2006     // the same virtual function loads from the same vtable load, which won't
2007     // happen without enabled devirtualization with -fstrict-vtable-pointers.
2008     if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2009         CGM.getCodeGenOpts().StrictVTablePointers) {
2010       if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
2011         VFuncLoadInstr->setMetadata(
2012             llvm::LLVMContext::MD_invariant_load,
2013             llvm::MDNode::get(CGM.getLLVMContext(),
2014                               llvm::ArrayRef<llvm::Metadata *>()));
2015       }
2016     }
2017     VFunc = VFuncLoad;
2018   }
2019 
2020   CGCallee Callee(GD, VFunc);
2021   return Callee;
2022 }
2023 
2024 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
2025     CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
2026     Address This, DeleteOrMemberCallExpr E) {
2027   auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
2028   auto *D = E.dyn_cast<const CXXDeleteExpr *>();
2029   assert((CE != nullptr) ^ (D != nullptr));
2030   assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
2031   assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2032 
2033   GlobalDecl GD(Dtor, DtorType);
2034   const CGFunctionInfo *FInfo =
2035       &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2036   llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
2037   CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
2038 
2039   QualType ThisTy;
2040   if (CE) {
2041     ThisTy = CE->getObjectType();
2042   } else {
2043     ThisTy = D->getDestroyedType();
2044   }
2045 
2046   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
2047                             QualType(), nullptr);
2048   return nullptr;
2049 }
2050 
2051 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2052   CodeGenVTables &VTables = CGM.getVTables();
2053   llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2054   VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2055 }
2056 
2057 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2058     const CXXRecordDecl *RD) const {
2059   // We don't emit available_externally vtables if we are in -fapple-kext mode
2060   // because kext mode does not permit devirtualization.
2061   if (CGM.getLangOpts().AppleKext)
2062     return false;
2063 
2064   // If the vtable is hidden then it is not safe to emit an available_externally
2065   // copy of vtable.
2066   if (isVTableHidden(RD))
2067     return false;
2068 
2069   if (CGM.getCodeGenOpts().ForceEmitVTables)
2070     return true;
2071 
2072   // If we don't have any not emitted inline virtual function then we are safe
2073   // to emit an available_externally copy of vtable.
2074   // FIXME we can still emit a copy of the vtable if we
2075   // can emit definition of the inline functions.
2076   if (hasAnyUnusedVirtualInlineFunction(RD))
2077     return false;
2078 
2079   // For a class with virtual bases, we must also be able to speculatively
2080   // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2081   // the vtable" and "can emit the VTT". For a base subobject, this means we
2082   // need to be able to emit non-virtual base vtables.
2083   if (RD->getNumVBases()) {
2084     for (const auto &B : RD->bases()) {
2085       auto *BRD = B.getType()->getAsCXXRecordDecl();
2086       assert(BRD && "no class for base specifier");
2087       if (B.isVirtual() || !BRD->isDynamicClass())
2088         continue;
2089       if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2090         return false;
2091     }
2092   }
2093 
2094   return true;
2095 }
2096 
2097 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2098   if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2099     return false;
2100 
2101   // For a complete-object vtable (or more specifically, for the VTT), we need
2102   // to be able to speculatively emit the vtables of all dynamic virtual bases.
2103   for (const auto &B : RD->vbases()) {
2104     auto *BRD = B.getType()->getAsCXXRecordDecl();
2105     assert(BRD && "no class for base specifier");
2106     if (!BRD->isDynamicClass())
2107       continue;
2108     if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2109       return false;
2110   }
2111 
2112   return true;
2113 }
2114 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2115                                           Address InitialPtr,
2116                                           int64_t NonVirtualAdjustment,
2117                                           int64_t VirtualAdjustment,
2118                                           bool IsReturnAdjustment) {
2119   if (!NonVirtualAdjustment && !VirtualAdjustment)
2120     return InitialPtr.getPointer();
2121 
2122   Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
2123 
2124   // In a base-to-derived cast, the non-virtual adjustment is applied first.
2125   if (NonVirtualAdjustment && !IsReturnAdjustment) {
2126     V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2127                               CharUnits::fromQuantity(NonVirtualAdjustment));
2128   }
2129 
2130   // Perform the virtual adjustment if we have one.
2131   llvm::Value *ResultPtr;
2132   if (VirtualAdjustment) {
2133     Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
2134     llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2135 
2136     llvm::Value *Offset;
2137     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2138         CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2139     if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2140       // Load the adjustment offset from the vtable as a 32-bit int.
2141       OffsetPtr =
2142           CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
2143       Offset =
2144           CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2145                                         CharUnits::fromQuantity(4));
2146     } else {
2147       llvm::Type *PtrDiffTy =
2148           CGF.ConvertType(CGF.getContext().getPointerDiffType());
2149 
2150       OffsetPtr =
2151           CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
2152 
2153       // Load the adjustment offset from the vtable.
2154       Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2155                                              CGF.getPointerAlign());
2156     }
2157     // Adjust our pointer.
2158     ResultPtr = CGF.Builder.CreateInBoundsGEP(
2159         V.getElementType(), V.getPointer(), Offset);
2160   } else {
2161     ResultPtr = V.getPointer();
2162   }
2163 
2164   // In a derived-to-base conversion, the non-virtual adjustment is
2165   // applied second.
2166   if (NonVirtualAdjustment && IsReturnAdjustment) {
2167     ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2168                                                        NonVirtualAdjustment);
2169   }
2170 
2171   // Cast back to the original type.
2172   return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
2173 }
2174 
2175 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2176                                                   Address This,
2177                                                   const ThisAdjustment &TA) {
2178   return performTypeAdjustment(CGF, This, TA.NonVirtual,
2179                                TA.Virtual.Itanium.VCallOffsetOffset,
2180                                /*IsReturnAdjustment=*/false);
2181 }
2182 
2183 llvm::Value *
2184 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2185                                        const ReturnAdjustment &RA) {
2186   return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2187                                RA.Virtual.Itanium.VBaseOffsetOffset,
2188                                /*IsReturnAdjustment=*/true);
2189 }
2190 
2191 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2192                                     RValue RV, QualType ResultType) {
2193   if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2194     return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2195 
2196   // Destructor thunks in the ARM ABI have indeterminate results.
2197   llvm::Type *T = CGF.ReturnValue.getElementType();
2198   RValue Undef = RValue::get(llvm::UndefValue::get(T));
2199   return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2200 }
2201 
2202 /************************** Array allocation cookies **************************/
2203 
2204 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2205   // The array cookie is a size_t; pad that up to the element alignment.
2206   // The cookie is actually right-justified in that space.
2207   return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2208                   CGM.getContext().getPreferredTypeAlignInChars(elementType));
2209 }
2210 
2211 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2212                                              Address NewPtr,
2213                                              llvm::Value *NumElements,
2214                                              const CXXNewExpr *expr,
2215                                              QualType ElementType) {
2216   assert(requiresArrayCookie(expr));
2217 
2218   unsigned AS = NewPtr.getAddressSpace();
2219 
2220   ASTContext &Ctx = getContext();
2221   CharUnits SizeSize = CGF.getSizeSize();
2222 
2223   // The size of the cookie.
2224   CharUnits CookieSize =
2225       std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2226   assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2227 
2228   // Compute an offset to the cookie.
2229   Address CookiePtr = NewPtr;
2230   CharUnits CookieOffset = CookieSize - SizeSize;
2231   if (!CookieOffset.isZero())
2232     CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2233 
2234   // Write the number of elements into the appropriate slot.
2235   Address NumElementsPtr =
2236       CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2237   llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2238 
2239   // Handle the array cookie specially in ASan.
2240   if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2241       (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2242        CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2243     // The store to the CookiePtr does not need to be instrumented.
2244     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2245     llvm::FunctionType *FTy =
2246         llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2247     llvm::FunctionCallee F =
2248         CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2249     CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2250   }
2251 
2252   // Finally, compute a pointer to the actual data buffer by skipping
2253   // over the cookie completely.
2254   return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2255 }
2256 
2257 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2258                                                 Address allocPtr,
2259                                                 CharUnits cookieSize) {
2260   // The element size is right-justified in the cookie.
2261   Address numElementsPtr = allocPtr;
2262   CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2263   if (!numElementsOffset.isZero())
2264     numElementsPtr =
2265       CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2266 
2267   unsigned AS = allocPtr.getAddressSpace();
2268   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2269   if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2270     return CGF.Builder.CreateLoad(numElementsPtr);
2271   // In asan mode emit a function call instead of a regular load and let the
2272   // run-time deal with it: if the shadow is properly poisoned return the
2273   // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2274   // We can't simply ignore this load using nosanitize metadata because
2275   // the metadata may be lost.
2276   llvm::FunctionType *FTy =
2277       llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2278   llvm::FunctionCallee F =
2279       CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2280   return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2281 }
2282 
2283 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2284   // ARM says that the cookie is always:
2285   //   struct array_cookie {
2286   //     std::size_t element_size; // element_size != 0
2287   //     std::size_t element_count;
2288   //   };
2289   // But the base ABI doesn't give anything an alignment greater than
2290   // 8, so we can dismiss this as typical ABI-author blindness to
2291   // actual language complexity and round up to the element alignment.
2292   return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2293                   CGM.getContext().getTypeAlignInChars(elementType));
2294 }
2295 
2296 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2297                                          Address newPtr,
2298                                          llvm::Value *numElements,
2299                                          const CXXNewExpr *expr,
2300                                          QualType elementType) {
2301   assert(requiresArrayCookie(expr));
2302 
2303   // The cookie is always at the start of the buffer.
2304   Address cookie = newPtr;
2305 
2306   // The first element is the element size.
2307   cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2308   llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2309                  getContext().getTypeSizeInChars(elementType).getQuantity());
2310   CGF.Builder.CreateStore(elementSize, cookie);
2311 
2312   // The second element is the element count.
2313   cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2314   CGF.Builder.CreateStore(numElements, cookie);
2315 
2316   // Finally, compute a pointer to the actual data buffer by skipping
2317   // over the cookie completely.
2318   CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2319   return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2320 }
2321 
2322 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2323                                             Address allocPtr,
2324                                             CharUnits cookieSize) {
2325   // The number of elements is at offset sizeof(size_t) relative to
2326   // the allocated pointer.
2327   Address numElementsPtr
2328     = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2329 
2330   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2331   return CGF.Builder.CreateLoad(numElementsPtr);
2332 }
2333 
2334 /*********************** Static local initialization **************************/
2335 
2336 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2337                                               llvm::PointerType *GuardPtrTy) {
2338   // int __cxa_guard_acquire(__guard *guard_object);
2339   llvm::FunctionType *FTy =
2340     llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2341                             GuardPtrTy, /*isVarArg=*/false);
2342   return CGM.CreateRuntimeFunction(
2343       FTy, "__cxa_guard_acquire",
2344       llvm::AttributeList::get(CGM.getLLVMContext(),
2345                                llvm::AttributeList::FunctionIndex,
2346                                llvm::Attribute::NoUnwind));
2347 }
2348 
2349 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2350                                               llvm::PointerType *GuardPtrTy) {
2351   // void __cxa_guard_release(__guard *guard_object);
2352   llvm::FunctionType *FTy =
2353     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2354   return CGM.CreateRuntimeFunction(
2355       FTy, "__cxa_guard_release",
2356       llvm::AttributeList::get(CGM.getLLVMContext(),
2357                                llvm::AttributeList::FunctionIndex,
2358                                llvm::Attribute::NoUnwind));
2359 }
2360 
2361 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2362                                             llvm::PointerType *GuardPtrTy) {
2363   // void __cxa_guard_abort(__guard *guard_object);
2364   llvm::FunctionType *FTy =
2365     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2366   return CGM.CreateRuntimeFunction(
2367       FTy, "__cxa_guard_abort",
2368       llvm::AttributeList::get(CGM.getLLVMContext(),
2369                                llvm::AttributeList::FunctionIndex,
2370                                llvm::Attribute::NoUnwind));
2371 }
2372 
2373 namespace {
2374   struct CallGuardAbort final : EHScopeStack::Cleanup {
2375     llvm::GlobalVariable *Guard;
2376     CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2377 
2378     void Emit(CodeGenFunction &CGF, Flags flags) override {
2379       CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2380                                   Guard);
2381     }
2382   };
2383 }
2384 
2385 /// The ARM code here follows the Itanium code closely enough that we
2386 /// just special-case it at particular places.
2387 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2388                                     const VarDecl &D,
2389                                     llvm::GlobalVariable *var,
2390                                     bool shouldPerformInit) {
2391   CGBuilderTy &Builder = CGF.Builder;
2392 
2393   // Inline variables that weren't instantiated from variable templates have
2394   // partially-ordered initialization within their translation unit.
2395   bool NonTemplateInline =
2396       D.isInline() &&
2397       !isTemplateInstantiation(D.getTemplateSpecializationKind());
2398 
2399   // We only need to use thread-safe statics for local non-TLS variables and
2400   // inline variables; other global initialization is always single-threaded
2401   // or (through lazy dynamic loading in multiple threads) unsequenced.
2402   bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2403                     (D.isLocalVarDecl() || NonTemplateInline) &&
2404                     !D.getTLSKind();
2405 
2406   // If we have a global variable with internal linkage and thread-safe statics
2407   // are disabled, we can just let the guard variable be of type i8.
2408   bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2409 
2410   llvm::IntegerType *guardTy;
2411   CharUnits guardAlignment;
2412   if (useInt8GuardVariable) {
2413     guardTy = CGF.Int8Ty;
2414     guardAlignment = CharUnits::One();
2415   } else {
2416     // Guard variables are 64 bits in the generic ABI and size width on ARM
2417     // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2418     if (UseARMGuardVarABI) {
2419       guardTy = CGF.SizeTy;
2420       guardAlignment = CGF.getSizeAlign();
2421     } else {
2422       guardTy = CGF.Int64Ty;
2423       guardAlignment = CharUnits::fromQuantity(
2424                              CGM.getDataLayout().getABITypeAlignment(guardTy));
2425     }
2426   }
2427   llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
2428       CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2429 
2430   // Create the guard variable if we don't already have it (as we
2431   // might if we're double-emitting this function body).
2432   llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2433   if (!guard) {
2434     // Mangle the name for the guard.
2435     SmallString<256> guardName;
2436     {
2437       llvm::raw_svector_ostream out(guardName);
2438       getMangleContext().mangleStaticGuardVariable(&D, out);
2439     }
2440 
2441     // Create the guard variable with a zero-initializer.
2442     // Just absorb linkage and visibility from the guarded variable.
2443     guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2444                                      false, var->getLinkage(),
2445                                      llvm::ConstantInt::get(guardTy, 0),
2446                                      guardName.str());
2447     guard->setDSOLocal(var->isDSOLocal());
2448     guard->setVisibility(var->getVisibility());
2449     // If the variable is thread-local, so is its guard variable.
2450     guard->setThreadLocalMode(var->getThreadLocalMode());
2451     guard->setAlignment(guardAlignment.getAsAlign());
2452 
2453     // The ABI says: "It is suggested that it be emitted in the same COMDAT
2454     // group as the associated data object." In practice, this doesn't work for
2455     // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2456     llvm::Comdat *C = var->getComdat();
2457     if (!D.isLocalVarDecl() && C &&
2458         (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2459          CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2460       guard->setComdat(C);
2461     } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2462       guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2463     }
2464 
2465     CGM.setStaticLocalDeclGuardAddress(&D, guard);
2466   }
2467 
2468   Address guardAddr = Address(guard, guardAlignment);
2469 
2470   // Test whether the variable has completed initialization.
2471   //
2472   // Itanium C++ ABI 3.3.2:
2473   //   The following is pseudo-code showing how these functions can be used:
2474   //     if (obj_guard.first_byte == 0) {
2475   //       if ( __cxa_guard_acquire (&obj_guard) ) {
2476   //         try {
2477   //           ... initialize the object ...;
2478   //         } catch (...) {
2479   //            __cxa_guard_abort (&obj_guard);
2480   //            throw;
2481   //         }
2482   //         ... queue object destructor with __cxa_atexit() ...;
2483   //         __cxa_guard_release (&obj_guard);
2484   //       }
2485   //     }
2486 
2487   // Load the first byte of the guard variable.
2488   llvm::LoadInst *LI =
2489       Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2490 
2491   // Itanium ABI:
2492   //   An implementation supporting thread-safety on multiprocessor
2493   //   systems must also guarantee that references to the initialized
2494   //   object do not occur before the load of the initialization flag.
2495   //
2496   // In LLVM, we do this by marking the load Acquire.
2497   if (threadsafe)
2498     LI->setAtomic(llvm::AtomicOrdering::Acquire);
2499 
2500   // For ARM, we should only check the first bit, rather than the entire byte:
2501   //
2502   // ARM C++ ABI 3.2.3.1:
2503   //   To support the potential use of initialization guard variables
2504   //   as semaphores that are the target of ARM SWP and LDREX/STREX
2505   //   synchronizing instructions we define a static initialization
2506   //   guard variable to be a 4-byte aligned, 4-byte word with the
2507   //   following inline access protocol.
2508   //     #define INITIALIZED 1
2509   //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
2510   //       if (__cxa_guard_acquire(&obj_guard))
2511   //         ...
2512   //     }
2513   //
2514   // and similarly for ARM64:
2515   //
2516   // ARM64 C++ ABI 3.2.2:
2517   //   This ABI instead only specifies the value bit 0 of the static guard
2518   //   variable; all other bits are platform defined. Bit 0 shall be 0 when the
2519   //   variable is not initialized and 1 when it is.
2520   llvm::Value *V =
2521       (UseARMGuardVarABI && !useInt8GuardVariable)
2522           ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2523           : LI;
2524   llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2525 
2526   llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2527   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2528 
2529   // Check if the first byte of the guard variable is zero.
2530   CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2531                                CodeGenFunction::GuardKind::VariableGuard, &D);
2532 
2533   CGF.EmitBlock(InitCheckBlock);
2534 
2535   // Variables used when coping with thread-safe statics and exceptions.
2536   if (threadsafe) {
2537     // Call __cxa_guard_acquire.
2538     llvm::Value *V
2539       = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2540 
2541     llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2542 
2543     Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2544                          InitBlock, EndBlock);
2545 
2546     // Call __cxa_guard_abort along the exceptional edge.
2547     CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2548 
2549     CGF.EmitBlock(InitBlock);
2550   }
2551 
2552   // Emit the initializer and add a global destructor if appropriate.
2553   CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2554 
2555   if (threadsafe) {
2556     // Pop the guard-abort cleanup if we pushed one.
2557     CGF.PopCleanupBlock();
2558 
2559     // Call __cxa_guard_release.  This cannot throw.
2560     CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2561                                 guardAddr.getPointer());
2562   } else {
2563     // Store 1 into the first byte of the guard variable after initialization is
2564     // complete.
2565     Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2566                         Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2567   }
2568 
2569   CGF.EmitBlock(EndBlock);
2570 }
2571 
2572 /// Register a global destructor using __cxa_atexit.
2573 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2574                                         llvm::FunctionCallee dtor,
2575                                         llvm::Constant *addr, bool TLS) {
2576   assert(!CGF.getTarget().getTriple().isOSAIX() &&
2577          "unexpected call to emitGlobalDtorWithCXAAtExit");
2578   assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2579          "__cxa_atexit is disabled");
2580   const char *Name = "__cxa_atexit";
2581   if (TLS) {
2582     const llvm::Triple &T = CGF.getTarget().getTriple();
2583     Name = T.isOSDarwin() ?  "_tlv_atexit" : "__cxa_thread_atexit";
2584   }
2585 
2586   // We're assuming that the destructor function is something we can
2587   // reasonably call with the default CC.  Go ahead and cast it to the
2588   // right prototype.
2589   llvm::Type *dtorTy =
2590     llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2591 
2592   // Preserve address space of addr.
2593   auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2594   auto AddrInt8PtrTy =
2595       AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
2596 
2597   // Create a variable that binds the atexit to this shared object.
2598   llvm::Constant *handle =
2599       CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2600   auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2601   GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2602 
2603   // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2604   llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2605   llvm::FunctionType *atexitTy =
2606     llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2607 
2608   // Fetch the actual function.
2609   llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2610   if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2611     fn->setDoesNotThrow();
2612 
2613   if (!addr)
2614     // addr is null when we are trying to register a dtor annotated with
2615     // __attribute__((destructor)) in a constructor function. Using null here is
2616     // okay because this argument is just passed back to the destructor
2617     // function.
2618     addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2619 
2620   llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2621                              cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2622                          llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2623                          handle};
2624   CGF.EmitNounwindRuntimeCall(atexit, args);
2625 }
2626 
2627 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2628                                                    StringRef FnName) {
2629   // Create a function that registers/unregisters destructors that have the same
2630   // priority.
2631   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2632   llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2633       FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2634 
2635   return GlobalInitOrCleanupFn;
2636 }
2637 
2638 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2639   for (const auto &I : DtorsUsingAtExit) {
2640     int Priority = I.first;
2641     std::string GlobalCleanupFnName =
2642         std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2643 
2644     llvm::Function *GlobalCleanupFn =
2645         createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2646 
2647     CodeGenFunction CGF(*this);
2648     CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2649                       getTypes().arrangeNullaryFunction(), FunctionArgList(),
2650                       SourceLocation(), SourceLocation());
2651     auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2652 
2653     // Get the destructor function type, void(*)(void).
2654     llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2655     llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
2656 
2657     // Destructor functions are run/unregistered in non-ascending
2658     // order of their priorities.
2659     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2660     auto itv = Dtors.rbegin();
2661     while (itv != Dtors.rend()) {
2662       llvm::Function *Dtor = *itv;
2663 
2664       // We're assuming that the destructor function is something we can
2665       // reasonably call with the correct CC.  Go ahead and cast it to the
2666       // right prototype.
2667       llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
2668       llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
2669       llvm::Value *NeedsDestruct =
2670           CGF.Builder.CreateIsNull(V, "needs_destruct");
2671 
2672       llvm::BasicBlock *DestructCallBlock =
2673           CGF.createBasicBlock("destruct.call");
2674       llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2675           (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2676       // Check if unatexit returns a value of 0. If it does, jump to
2677       // DestructCallBlock, otherwise jump to EndBlock directly.
2678       CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2679 
2680       CGF.EmitBlock(DestructCallBlock);
2681 
2682       // Emit the call to casted Dtor.
2683       llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor);
2684       // Make sure the call and the callee agree on calling convention.
2685       CI->setCallingConv(Dtor->getCallingConv());
2686 
2687       CGF.EmitBlock(EndBlock);
2688 
2689       itv++;
2690     }
2691 
2692     CGF.FinishFunction();
2693     AddGlobalDtor(GlobalCleanupFn, Priority);
2694   }
2695 }
2696 
2697 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2698   for (const auto &I : DtorsUsingAtExit) {
2699     int Priority = I.first;
2700     std::string GlobalInitFnName =
2701         std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2702     llvm::Function *GlobalInitFn =
2703         createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2704 
2705     CodeGenFunction CGF(*this);
2706     CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2707                       getTypes().arrangeNullaryFunction(), FunctionArgList(),
2708                       SourceLocation(), SourceLocation());
2709     auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2710 
2711     // Since constructor functions are run in non-descending order of their
2712     // priorities, destructors are registered in non-descending order of their
2713     // priorities, and since destructor functions are run in the reverse order
2714     // of their registration, destructor functions are run in non-ascending
2715     // order of their priorities.
2716     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2717     for (auto *Dtor : Dtors) {
2718       // Register the destructor function calling __cxa_atexit if it is
2719       // available. Otherwise fall back on calling atexit.
2720       if (getCodeGenOpts().CXAAtExit) {
2721         emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2722       } else {
2723         // Get the destructor function type, void(*)(void).
2724         llvm::Type *dtorTy =
2725             llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo();
2726 
2727         // We're assuming that the destructor function is something we can
2728         // reasonably call with the correct CC.  Go ahead and cast it to the
2729         // right prototype.
2730         CGF.registerGlobalDtorWithAtExit(
2731             llvm::ConstantExpr::getBitCast(Dtor, dtorTy));
2732       }
2733     }
2734 
2735     CGF.FinishFunction();
2736     AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2737   }
2738 
2739   if (getCXXABI().useSinitAndSterm())
2740     unregisterGlobalDtorsWithUnAtExit();
2741 }
2742 
2743 /// Register a global destructor as best as we know how.
2744 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2745                                        llvm::FunctionCallee dtor,
2746                                        llvm::Constant *addr) {
2747   if (D.isNoDestroy(CGM.getContext()))
2748     return;
2749 
2750   // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2751   // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2752   // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2753   // We can always use __cxa_thread_atexit.
2754   if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2755     return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2756 
2757   // In Apple kexts, we want to add a global destructor entry.
2758   // FIXME: shouldn't this be guarded by some variable?
2759   if (CGM.getLangOpts().AppleKext) {
2760     // Generate a global destructor entry.
2761     return CGM.AddCXXDtorEntry(dtor, addr);
2762   }
2763 
2764   CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2765 }
2766 
2767 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2768                                        CodeGen::CodeGenModule &CGM) {
2769   assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2770   // Darwin prefers to have references to thread local variables to go through
2771   // the thread wrapper instead of directly referencing the backing variable.
2772   return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2773          CGM.getTarget().getTriple().isOSDarwin();
2774 }
2775 
2776 /// Get the appropriate linkage for the wrapper function. This is essentially
2777 /// the weak form of the variable's linkage; every translation unit which needs
2778 /// the wrapper emits a copy, and we want the linker to merge them.
2779 static llvm::GlobalValue::LinkageTypes
2780 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2781   llvm::GlobalValue::LinkageTypes VarLinkage =
2782       CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2783 
2784   // For internal linkage variables, we don't need an external or weak wrapper.
2785   if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2786     return VarLinkage;
2787 
2788   // If the thread wrapper is replaceable, give it appropriate linkage.
2789   if (isThreadWrapperReplaceable(VD, CGM))
2790     if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2791         !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2792       return VarLinkage;
2793   return llvm::GlobalValue::WeakODRLinkage;
2794 }
2795 
2796 llvm::Function *
2797 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2798                                              llvm::Value *Val) {
2799   // Mangle the name for the thread_local wrapper function.
2800   SmallString<256> WrapperName;
2801   {
2802     llvm::raw_svector_ostream Out(WrapperName);
2803     getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2804   }
2805 
2806   // FIXME: If VD is a definition, we should regenerate the function attributes
2807   // before returning.
2808   if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2809     return cast<llvm::Function>(V);
2810 
2811   QualType RetQT = VD->getType();
2812   if (RetQT->isReferenceType())
2813     RetQT = RetQT.getNonReferenceType();
2814 
2815   const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2816       getContext().getPointerType(RetQT), FunctionArgList());
2817 
2818   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2819   llvm::Function *Wrapper =
2820       llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2821                              WrapperName.str(), &CGM.getModule());
2822 
2823   if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2824     Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2825 
2826   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
2827 
2828   // Always resolve references to the wrapper at link time.
2829   if (!Wrapper->hasLocalLinkage())
2830     if (!isThreadWrapperReplaceable(VD, CGM) ||
2831         llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2832         llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2833         VD->getVisibility() == HiddenVisibility)
2834       Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2835 
2836   if (isThreadWrapperReplaceable(VD, CGM)) {
2837     Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2838     Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2839   }
2840 
2841   ThreadWrappers.push_back({VD, Wrapper});
2842   return Wrapper;
2843 }
2844 
2845 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2846     CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2847     ArrayRef<llvm::Function *> CXXThreadLocalInits,
2848     ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2849   llvm::Function *InitFunc = nullptr;
2850 
2851   // Separate initializers into those with ordered (or partially-ordered)
2852   // initialization and those with unordered initialization.
2853   llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2854   llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2855   for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2856     if (isTemplateInstantiation(
2857             CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2858       UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2859           CXXThreadLocalInits[I];
2860     else
2861       OrderedInits.push_back(CXXThreadLocalInits[I]);
2862   }
2863 
2864   if (!OrderedInits.empty()) {
2865     // Generate a guarded initialization function.
2866     llvm::FunctionType *FTy =
2867         llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2868     const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2869     InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2870                                                      SourceLocation(),
2871                                                      /*TLS=*/true);
2872     llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2873         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2874         llvm::GlobalVariable::InternalLinkage,
2875         llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2876     Guard->setThreadLocal(true);
2877     Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2878 
2879     CharUnits GuardAlign = CharUnits::One();
2880     Guard->setAlignment(GuardAlign.getAsAlign());
2881 
2882     CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2883         InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
2884     // On Darwin platforms, use CXX_FAST_TLS calling convention.
2885     if (CGM.getTarget().getTriple().isOSDarwin()) {
2886       InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2887       InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2888     }
2889   }
2890 
2891   // Create declarations for thread wrappers for all thread-local variables
2892   // with non-discardable definitions in this translation unit.
2893   for (const VarDecl *VD : CXXThreadLocals) {
2894     if (VD->hasDefinition() &&
2895         !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2896       llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2897       getOrCreateThreadLocalWrapper(VD, GV);
2898     }
2899   }
2900 
2901   // Emit all referenced thread wrappers.
2902   for (auto VDAndWrapper : ThreadWrappers) {
2903     const VarDecl *VD = VDAndWrapper.first;
2904     llvm::GlobalVariable *Var =
2905         cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2906     llvm::Function *Wrapper = VDAndWrapper.second;
2907 
2908     // Some targets require that all access to thread local variables go through
2909     // the thread wrapper.  This means that we cannot attempt to create a thread
2910     // wrapper or a thread helper.
2911     if (!VD->hasDefinition()) {
2912       if (isThreadWrapperReplaceable(VD, CGM)) {
2913         Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2914         continue;
2915       }
2916 
2917       // If this isn't a TU in which this variable is defined, the thread
2918       // wrapper is discardable.
2919       if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2920         Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2921     }
2922 
2923     CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2924 
2925     // Mangle the name for the thread_local initialization function.
2926     SmallString<256> InitFnName;
2927     {
2928       llvm::raw_svector_ostream Out(InitFnName);
2929       getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2930     }
2931 
2932     llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2933 
2934     // If we have a definition for the variable, emit the initialization
2935     // function as an alias to the global Init function (if any). Otherwise,
2936     // produce a declaration of the initialization function.
2937     llvm::GlobalValue *Init = nullptr;
2938     bool InitIsInitFunc = false;
2939     bool HasConstantInitialization = false;
2940     if (!usesThreadWrapperFunction(VD)) {
2941       HasConstantInitialization = true;
2942     } else if (VD->hasDefinition()) {
2943       InitIsInitFunc = true;
2944       llvm::Function *InitFuncToUse = InitFunc;
2945       if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2946         InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2947       if (InitFuncToUse)
2948         Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2949                                          InitFuncToUse);
2950     } else {
2951       // Emit a weak global function referring to the initialization function.
2952       // This function will not exist if the TU defining the thread_local
2953       // variable in question does not need any dynamic initialization for
2954       // its thread_local variables.
2955       Init = llvm::Function::Create(InitFnTy,
2956                                     llvm::GlobalVariable::ExternalWeakLinkage,
2957                                     InitFnName.str(), &CGM.getModule());
2958       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2959       CGM.SetLLVMFunctionAttributes(
2960           GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
2961     }
2962 
2963     if (Init) {
2964       Init->setVisibility(Var->getVisibility());
2965       // Don't mark an extern_weak function DSO local on windows.
2966       if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
2967         Init->setDSOLocal(Var->isDSOLocal());
2968     }
2969 
2970     llvm::LLVMContext &Context = CGM.getModule().getContext();
2971 
2972     // The linker on AIX is not happy with missing weak symbols.  However,
2973     // other TUs will not know whether the initialization routine exists
2974     // so create an empty, init function to satisfy the linker.
2975     // This is needed whenever a thread wrapper function is not used, and
2976     // also when the symbol is weak.
2977     if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
2978         isEmittedWithConstantInitializer(VD, true) &&
2979         !mayNeedDestruction(VD)) {
2980       // Init should be null.  If it were non-null, then the logic above would
2981       // either be defining the function to be an alias or declaring the
2982       // function with the expectation that the definition of the variable
2983       // is elsewhere.
2984       assert(Init == nullptr && "Expected Init to be null.");
2985 
2986       llvm::Function *Func = llvm::Function::Create(
2987           InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
2988       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2989       CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2990                                     cast<llvm::Function>(Func),
2991                                     /*IsThunk=*/false);
2992       // Create a function body that just returns
2993       llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
2994       CGBuilderTy Builder(CGM, Entry);
2995       Builder.CreateRetVoid();
2996     }
2997 
2998     llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2999     CGBuilderTy Builder(CGM, Entry);
3000     if (HasConstantInitialization) {
3001       // No dynamic initialization to invoke.
3002     } else if (InitIsInitFunc) {
3003       if (Init) {
3004         llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
3005         if (isThreadWrapperReplaceable(VD, CGM)) {
3006           CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3007           llvm::Function *Fn =
3008               cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
3009           Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3010         }
3011       }
3012     } else if (CGM.getTriple().isOSAIX()) {
3013       // On AIX, except if constinit and also neither of class type or of
3014       // (possibly multi-dimensional) array of class type, thread_local vars
3015       // will have init routines regardless of whether they are
3016       // const-initialized.  Since the routine is guaranteed to exist, we can
3017       // unconditionally call it without testing for its existance.  This
3018       // avoids potentially unresolved weak symbols which the AIX linker
3019       // isn't happy with.
3020       Builder.CreateCall(InitFnTy, Init);
3021     } else {
3022       // Don't know whether we have an init function. Call it if it exists.
3023       llvm::Value *Have = Builder.CreateIsNotNull(Init);
3024       llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3025       llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3026       Builder.CreateCondBr(Have, InitBB, ExitBB);
3027 
3028       Builder.SetInsertPoint(InitBB);
3029       Builder.CreateCall(InitFnTy, Init);
3030       Builder.CreateBr(ExitBB);
3031 
3032       Builder.SetInsertPoint(ExitBB);
3033     }
3034 
3035     // For a reference, the result of the wrapper function is a pointer to
3036     // the referenced object.
3037     llvm::Value *Val = Var;
3038     if (VD->getType()->isReferenceType()) {
3039       CharUnits Align = CGM.getContext().getDeclAlign(VD);
3040       Val = Builder.CreateAlignedLoad(Var->getValueType(), Var, Align);
3041     }
3042     if (Val->getType() != Wrapper->getReturnType())
3043       Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
3044           Val, Wrapper->getReturnType(), "");
3045     Builder.CreateRet(Val);
3046   }
3047 }
3048 
3049 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3050                                                    const VarDecl *VD,
3051                                                    QualType LValType) {
3052   llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3053   llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3054 
3055   llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3056   CallVal->setCallingConv(Wrapper->getCallingConv());
3057 
3058   LValue LV;
3059   if (VD->getType()->isReferenceType())
3060     LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
3061   else
3062     LV = CGF.MakeAddrLValue(CallVal, LValType,
3063                             CGF.getContext().getDeclAlign(VD));
3064   // FIXME: need setObjCGCLValueClass?
3065   return LV;
3066 }
3067 
3068 /// Return whether the given global decl needs a VTT parameter, which it does
3069 /// if it's a base constructor or destructor with virtual bases.
3070 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3071   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3072 
3073   // We don't have any virtual bases, just return early.
3074   if (!MD->getParent()->getNumVBases())
3075     return false;
3076 
3077   // Check if we have a base constructor.
3078   if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3079     return true;
3080 
3081   // Check if we have a base destructor.
3082   if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3083     return true;
3084 
3085   return false;
3086 }
3087 
3088 namespace {
3089 class ItaniumRTTIBuilder {
3090   CodeGenModule &CGM;  // Per-module state.
3091   llvm::LLVMContext &VMContext;
3092   const ItaniumCXXABI &CXXABI;  // Per-module state.
3093 
3094   /// Fields - The fields of the RTTI descriptor currently being built.
3095   SmallVector<llvm::Constant *, 16> Fields;
3096 
3097   /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3098   llvm::GlobalVariable *
3099   GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3100 
3101   /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3102   /// descriptor of the given type.
3103   llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3104 
3105   /// BuildVTablePointer - Build the vtable pointer for the given type.
3106   void BuildVTablePointer(const Type *Ty);
3107 
3108   /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3109   /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3110   void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3111 
3112   /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3113   /// classes with bases that do not satisfy the abi::__si_class_type_info
3114   /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3115   void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3116 
3117   /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3118   /// for pointer types.
3119   void BuildPointerTypeInfo(QualType PointeeTy);
3120 
3121   /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3122   /// type_info for an object type.
3123   void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3124 
3125   /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3126   /// struct, used for member pointer types.
3127   void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3128 
3129 public:
3130   ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3131       : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3132 
3133   // Pointer type info flags.
3134   enum {
3135     /// PTI_Const - Type has const qualifier.
3136     PTI_Const = 0x1,
3137 
3138     /// PTI_Volatile - Type has volatile qualifier.
3139     PTI_Volatile = 0x2,
3140 
3141     /// PTI_Restrict - Type has restrict qualifier.
3142     PTI_Restrict = 0x4,
3143 
3144     /// PTI_Incomplete - Type is incomplete.
3145     PTI_Incomplete = 0x8,
3146 
3147     /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3148     /// (in pointer to member).
3149     PTI_ContainingClassIncomplete = 0x10,
3150 
3151     /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3152     //PTI_TransactionSafe = 0x20,
3153 
3154     /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3155     PTI_Noexcept = 0x40,
3156   };
3157 
3158   // VMI type info flags.
3159   enum {
3160     /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3161     VMI_NonDiamondRepeat = 0x1,
3162 
3163     /// VMI_DiamondShaped - Class is diamond shaped.
3164     VMI_DiamondShaped = 0x2
3165   };
3166 
3167   // Base class type info flags.
3168   enum {
3169     /// BCTI_Virtual - Base class is virtual.
3170     BCTI_Virtual = 0x1,
3171 
3172     /// BCTI_Public - Base class is public.
3173     BCTI_Public = 0x2
3174   };
3175 
3176   /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3177   /// link to an existing RTTI descriptor if one already exists.
3178   llvm::Constant *BuildTypeInfo(QualType Ty);
3179 
3180   /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3181   llvm::Constant *BuildTypeInfo(
3182       QualType Ty,
3183       llvm::GlobalVariable::LinkageTypes Linkage,
3184       llvm::GlobalValue::VisibilityTypes Visibility,
3185       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3186 };
3187 }
3188 
3189 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3190     QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3191   SmallString<256> Name;
3192   llvm::raw_svector_ostream Out(Name);
3193   CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
3194 
3195   // We know that the mangled name of the type starts at index 4 of the
3196   // mangled name of the typename, so we can just index into it in order to
3197   // get the mangled name of the type.
3198   llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3199                                                             Name.substr(4));
3200   auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3201 
3202   llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3203       Name, Init->getType(), Linkage, Align.getQuantity());
3204 
3205   GV->setInitializer(Init);
3206 
3207   return GV;
3208 }
3209 
3210 llvm::Constant *
3211 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3212   // Mangle the RTTI name.
3213   SmallString<256> Name;
3214   llvm::raw_svector_ostream Out(Name);
3215   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3216 
3217   // Look for an existing global.
3218   llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3219 
3220   if (!GV) {
3221     // Create a new global variable.
3222     // Note for the future: If we would ever like to do deferred emission of
3223     // RTTI, check if emitting vtables opportunistically need any adjustment.
3224 
3225     GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
3226                                   /*isConstant=*/true,
3227                                   llvm::GlobalValue::ExternalLinkage, nullptr,
3228                                   Name);
3229     const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3230     CGM.setGVProperties(GV, RD);
3231     // Import the typeinfo symbol when all non-inline virtual methods are
3232     // imported.
3233     if (CGM.getTarget().hasPS4DLLImportExport()) {
3234       if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
3235         GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3236         CGM.setDSOLocal(GV);
3237       }
3238     }
3239   }
3240 
3241   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3242 }
3243 
3244 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3245 /// info for that type is defined in the standard library.
3246 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3247   // Itanium C++ ABI 2.9.2:
3248   //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
3249   //   the run-time support library. Specifically, the run-time support
3250   //   library should contain type_info objects for the types X, X* and
3251   //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3252   //   unsigned char, signed char, short, unsigned short, int, unsigned int,
3253   //   long, unsigned long, long long, unsigned long long, float, double,
3254   //   long double, char16_t, char32_t, and the IEEE 754r decimal and
3255   //   half-precision floating point types.
3256   //
3257   // GCC also emits RTTI for __int128.
3258   // FIXME: We do not emit RTTI information for decimal types here.
3259 
3260   // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3261   switch (Ty->getKind()) {
3262     case BuiltinType::Void:
3263     case BuiltinType::NullPtr:
3264     case BuiltinType::Bool:
3265     case BuiltinType::WChar_S:
3266     case BuiltinType::WChar_U:
3267     case BuiltinType::Char_U:
3268     case BuiltinType::Char_S:
3269     case BuiltinType::UChar:
3270     case BuiltinType::SChar:
3271     case BuiltinType::Short:
3272     case BuiltinType::UShort:
3273     case BuiltinType::Int:
3274     case BuiltinType::UInt:
3275     case BuiltinType::Long:
3276     case BuiltinType::ULong:
3277     case BuiltinType::LongLong:
3278     case BuiltinType::ULongLong:
3279     case BuiltinType::Half:
3280     case BuiltinType::Float:
3281     case BuiltinType::Double:
3282     case BuiltinType::LongDouble:
3283     case BuiltinType::Float16:
3284     case BuiltinType::Float128:
3285     case BuiltinType::Ibm128:
3286     case BuiltinType::Char8:
3287     case BuiltinType::Char16:
3288     case BuiltinType::Char32:
3289     case BuiltinType::Int128:
3290     case BuiltinType::UInt128:
3291       return true;
3292 
3293 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3294     case BuiltinType::Id:
3295 #include "clang/Basic/OpenCLImageTypes.def"
3296 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3297     case BuiltinType::Id:
3298 #include "clang/Basic/OpenCLExtensionTypes.def"
3299     case BuiltinType::OCLSampler:
3300     case BuiltinType::OCLEvent:
3301     case BuiltinType::OCLClkEvent:
3302     case BuiltinType::OCLQueue:
3303     case BuiltinType::OCLReserveID:
3304 #define SVE_TYPE(Name, Id, SingletonId) \
3305     case BuiltinType::Id:
3306 #include "clang/Basic/AArch64SVEACLETypes.def"
3307 #define PPC_VECTOR_TYPE(Name, Id, Size) \
3308     case BuiltinType::Id:
3309 #include "clang/Basic/PPCTypes.def"
3310 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3311 #include "clang/Basic/RISCVVTypes.def"
3312     case BuiltinType::ShortAccum:
3313     case BuiltinType::Accum:
3314     case BuiltinType::LongAccum:
3315     case BuiltinType::UShortAccum:
3316     case BuiltinType::UAccum:
3317     case BuiltinType::ULongAccum:
3318     case BuiltinType::ShortFract:
3319     case BuiltinType::Fract:
3320     case BuiltinType::LongFract:
3321     case BuiltinType::UShortFract:
3322     case BuiltinType::UFract:
3323     case BuiltinType::ULongFract:
3324     case BuiltinType::SatShortAccum:
3325     case BuiltinType::SatAccum:
3326     case BuiltinType::SatLongAccum:
3327     case BuiltinType::SatUShortAccum:
3328     case BuiltinType::SatUAccum:
3329     case BuiltinType::SatULongAccum:
3330     case BuiltinType::SatShortFract:
3331     case BuiltinType::SatFract:
3332     case BuiltinType::SatLongFract:
3333     case BuiltinType::SatUShortFract:
3334     case BuiltinType::SatUFract:
3335     case BuiltinType::SatULongFract:
3336     case BuiltinType::BFloat16:
3337       return false;
3338 
3339     case BuiltinType::Dependent:
3340 #define BUILTIN_TYPE(Id, SingletonId)
3341 #define PLACEHOLDER_TYPE(Id, SingletonId) \
3342     case BuiltinType::Id:
3343 #include "clang/AST/BuiltinTypes.def"
3344       llvm_unreachable("asking for RRTI for a placeholder type!");
3345 
3346     case BuiltinType::ObjCId:
3347     case BuiltinType::ObjCClass:
3348     case BuiltinType::ObjCSel:
3349       llvm_unreachable("FIXME: Objective-C types are unsupported!");
3350   }
3351 
3352   llvm_unreachable("Invalid BuiltinType Kind!");
3353 }
3354 
3355 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3356   QualType PointeeTy = PointerTy->getPointeeType();
3357   const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3358   if (!BuiltinTy)
3359     return false;
3360 
3361   // Check the qualifiers.
3362   Qualifiers Quals = PointeeTy.getQualifiers();
3363   Quals.removeConst();
3364 
3365   if (!Quals.empty())
3366     return false;
3367 
3368   return TypeInfoIsInStandardLibrary(BuiltinTy);
3369 }
3370 
3371 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
3372 /// information for the given type exists in the standard library.
3373 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3374   // Type info for builtin types is defined in the standard library.
3375   if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3376     return TypeInfoIsInStandardLibrary(BuiltinTy);
3377 
3378   // Type info for some pointer types to builtin types is defined in the
3379   // standard library.
3380   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3381     return TypeInfoIsInStandardLibrary(PointerTy);
3382 
3383   return false;
3384 }
3385 
3386 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3387 /// the given type exists somewhere else, and that we should not emit the type
3388 /// information in this translation unit.  Assumes that it is not a
3389 /// standard-library type.
3390 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3391                                             QualType Ty) {
3392   ASTContext &Context = CGM.getContext();
3393 
3394   // If RTTI is disabled, assume it might be disabled in the
3395   // translation unit that defines any potential key function, too.
3396   if (!Context.getLangOpts().RTTI) return false;
3397 
3398   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3399     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3400     if (!RD->hasDefinition())
3401       return false;
3402 
3403     if (!RD->isDynamicClass())
3404       return false;
3405 
3406     // FIXME: this may need to be reconsidered if the key function
3407     // changes.
3408     // N.B. We must always emit the RTTI data ourselves if there exists a key
3409     // function.
3410     bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3411 
3412     // Don't import the RTTI but emit it locally.
3413     if (CGM.getTriple().isWindowsGNUEnvironment())
3414       return false;
3415 
3416     if (CGM.getVTables().isVTableExternal(RD)) {
3417       if (CGM.getTarget().hasPS4DLLImportExport())
3418         return true;
3419 
3420       return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3421                  ? false
3422                  : true;
3423     }
3424     if (IsDLLImport)
3425       return true;
3426   }
3427 
3428   return false;
3429 }
3430 
3431 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
3432 static bool IsIncompleteClassType(const RecordType *RecordTy) {
3433   return !RecordTy->getDecl()->isCompleteDefinition();
3434 }
3435 
3436 /// ContainsIncompleteClassType - Returns whether the given type contains an
3437 /// incomplete class type. This is true if
3438 ///
3439 ///   * The given type is an incomplete class type.
3440 ///   * The given type is a pointer type whose pointee type contains an
3441 ///     incomplete class type.
3442 ///   * The given type is a member pointer type whose class is an incomplete
3443 ///     class type.
3444 ///   * The given type is a member pointer type whoise pointee type contains an
3445 ///     incomplete class type.
3446 /// is an indirect or direct pointer to an incomplete class type.
3447 static bool ContainsIncompleteClassType(QualType Ty) {
3448   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3449     if (IsIncompleteClassType(RecordTy))
3450       return true;
3451   }
3452 
3453   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3454     return ContainsIncompleteClassType(PointerTy->getPointeeType());
3455 
3456   if (const MemberPointerType *MemberPointerTy =
3457       dyn_cast<MemberPointerType>(Ty)) {
3458     // Check if the class type is incomplete.
3459     const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3460     if (IsIncompleteClassType(ClassType))
3461       return true;
3462 
3463     return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3464   }
3465 
3466   return false;
3467 }
3468 
3469 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3470 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3471 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3472 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3473   // Check the number of bases.
3474   if (RD->getNumBases() != 1)
3475     return false;
3476 
3477   // Get the base.
3478   CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3479 
3480   // Check that the base is not virtual.
3481   if (Base->isVirtual())
3482     return false;
3483 
3484   // Check that the base is public.
3485   if (Base->getAccessSpecifier() != AS_public)
3486     return false;
3487 
3488   // Check that the class is dynamic iff the base is.
3489   auto *BaseDecl =
3490       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3491   if (!BaseDecl->isEmpty() &&
3492       BaseDecl->isDynamicClass() != RD->isDynamicClass())
3493     return false;
3494 
3495   return true;
3496 }
3497 
3498 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3499   // abi::__class_type_info.
3500   static const char * const ClassTypeInfo =
3501     "_ZTVN10__cxxabiv117__class_type_infoE";
3502   // abi::__si_class_type_info.
3503   static const char * const SIClassTypeInfo =
3504     "_ZTVN10__cxxabiv120__si_class_type_infoE";
3505   // abi::__vmi_class_type_info.
3506   static const char * const VMIClassTypeInfo =
3507     "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3508 
3509   const char *VTableName = nullptr;
3510 
3511   switch (Ty->getTypeClass()) {
3512 #define TYPE(Class, Base)
3513 #define ABSTRACT_TYPE(Class, Base)
3514 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3515 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3516 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3517 #include "clang/AST/TypeNodes.inc"
3518     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3519 
3520   case Type::LValueReference:
3521   case Type::RValueReference:
3522     llvm_unreachable("References shouldn't get here");
3523 
3524   case Type::Auto:
3525   case Type::DeducedTemplateSpecialization:
3526     llvm_unreachable("Undeduced type shouldn't get here");
3527 
3528   case Type::Pipe:
3529     llvm_unreachable("Pipe types shouldn't get here");
3530 
3531   case Type::Builtin:
3532   case Type::ExtInt:
3533   // GCC treats vector and complex types as fundamental types.
3534   case Type::Vector:
3535   case Type::ExtVector:
3536   case Type::ConstantMatrix:
3537   case Type::Complex:
3538   case Type::Atomic:
3539   // FIXME: GCC treats block pointers as fundamental types?!
3540   case Type::BlockPointer:
3541     // abi::__fundamental_type_info.
3542     VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3543     break;
3544 
3545   case Type::ConstantArray:
3546   case Type::IncompleteArray:
3547   case Type::VariableArray:
3548     // abi::__array_type_info.
3549     VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3550     break;
3551 
3552   case Type::FunctionNoProto:
3553   case Type::FunctionProto:
3554     // abi::__function_type_info.
3555     VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3556     break;
3557 
3558   case Type::Enum:
3559     // abi::__enum_type_info.
3560     VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3561     break;
3562 
3563   case Type::Record: {
3564     const CXXRecordDecl *RD =
3565       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3566 
3567     if (!RD->hasDefinition() || !RD->getNumBases()) {
3568       VTableName = ClassTypeInfo;
3569     } else if (CanUseSingleInheritance(RD)) {
3570       VTableName = SIClassTypeInfo;
3571     } else {
3572       VTableName = VMIClassTypeInfo;
3573     }
3574 
3575     break;
3576   }
3577 
3578   case Type::ObjCObject:
3579     // Ignore protocol qualifiers.
3580     Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3581 
3582     // Handle id and Class.
3583     if (isa<BuiltinType>(Ty)) {
3584       VTableName = ClassTypeInfo;
3585       break;
3586     }
3587 
3588     assert(isa<ObjCInterfaceType>(Ty));
3589     LLVM_FALLTHROUGH;
3590 
3591   case Type::ObjCInterface:
3592     if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3593       VTableName = SIClassTypeInfo;
3594     } else {
3595       VTableName = ClassTypeInfo;
3596     }
3597     break;
3598 
3599   case Type::ObjCObjectPointer:
3600   case Type::Pointer:
3601     // abi::__pointer_type_info.
3602     VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3603     break;
3604 
3605   case Type::MemberPointer:
3606     // abi::__pointer_to_member_type_info.
3607     VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3608     break;
3609   }
3610 
3611   llvm::Constant *VTable = nullptr;
3612 
3613   // Check if the alias exists. If it doesn't, then get or create the global.
3614   if (CGM.getItaniumVTableContext().isRelativeLayout())
3615     VTable = CGM.getModule().getNamedAlias(VTableName);
3616   if (!VTable)
3617     VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3618 
3619   CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3620 
3621   llvm::Type *PtrDiffTy =
3622       CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3623 
3624   // The vtable address point is 2.
3625   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3626     // The vtable address point is 8 bytes after its start:
3627     // 4 for the offset to top + 4 for the relative offset to rtti.
3628     llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3629     VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3630     VTable =
3631         llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3632   } else {
3633     llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3634     VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
3635                                                           Two);
3636   }
3637   VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3638 
3639   Fields.push_back(VTable);
3640 }
3641 
3642 /// Return the linkage that the type info and type info name constants
3643 /// should have for the given type.
3644 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3645                                                              QualType Ty) {
3646   // Itanium C++ ABI 2.9.5p7:
3647   //   In addition, it and all of the intermediate abi::__pointer_type_info
3648   //   structs in the chain down to the abi::__class_type_info for the
3649   //   incomplete class type must be prevented from resolving to the
3650   //   corresponding type_info structs for the complete class type, possibly
3651   //   by making them local static objects. Finally, a dummy class RTTI is
3652   //   generated for the incomplete type that will not resolve to the final
3653   //   complete class RTTI (because the latter need not exist), possibly by
3654   //   making it a local static object.
3655   if (ContainsIncompleteClassType(Ty))
3656     return llvm::GlobalValue::InternalLinkage;
3657 
3658   switch (Ty->getLinkage()) {
3659   case NoLinkage:
3660   case InternalLinkage:
3661   case UniqueExternalLinkage:
3662     return llvm::GlobalValue::InternalLinkage;
3663 
3664   case VisibleNoLinkage:
3665   case ModuleInternalLinkage:
3666   case ModuleLinkage:
3667   case ExternalLinkage:
3668     // RTTI is not enabled, which means that this type info struct is going
3669     // to be used for exception handling. Give it linkonce_odr linkage.
3670     if (!CGM.getLangOpts().RTTI)
3671       return llvm::GlobalValue::LinkOnceODRLinkage;
3672 
3673     if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3674       const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3675       if (RD->hasAttr<WeakAttr>())
3676         return llvm::GlobalValue::WeakODRLinkage;
3677       if (CGM.getTriple().isWindowsItaniumEnvironment())
3678         if (RD->hasAttr<DLLImportAttr>() &&
3679             ShouldUseExternalRTTIDescriptor(CGM, Ty))
3680           return llvm::GlobalValue::ExternalLinkage;
3681       // MinGW always uses LinkOnceODRLinkage for type info.
3682       if (RD->isDynamicClass() &&
3683           !CGM.getContext()
3684                .getTargetInfo()
3685                .getTriple()
3686                .isWindowsGNUEnvironment())
3687         return CGM.getVTableLinkage(RD);
3688     }
3689 
3690     return llvm::GlobalValue::LinkOnceODRLinkage;
3691   }
3692 
3693   llvm_unreachable("Invalid linkage!");
3694 }
3695 
3696 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3697   // We want to operate on the canonical type.
3698   Ty = Ty.getCanonicalType();
3699 
3700   // Check if we've already emitted an RTTI descriptor for this type.
3701   SmallString<256> Name;
3702   llvm::raw_svector_ostream Out(Name);
3703   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3704 
3705   llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3706   if (OldGV && !OldGV->isDeclaration()) {
3707     assert(!OldGV->hasAvailableExternallyLinkage() &&
3708            "available_externally typeinfos not yet implemented");
3709 
3710     return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3711   }
3712 
3713   // Check if there is already an external RTTI descriptor for this type.
3714   if (IsStandardLibraryRTTIDescriptor(Ty) ||
3715       ShouldUseExternalRTTIDescriptor(CGM, Ty))
3716     return GetAddrOfExternalRTTIDescriptor(Ty);
3717 
3718   // Emit the standard library with external linkage.
3719   llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3720 
3721   // Give the type_info object and name the formal visibility of the
3722   // type itself.
3723   llvm::GlobalValue::VisibilityTypes llvmVisibility;
3724   if (llvm::GlobalValue::isLocalLinkage(Linkage))
3725     // If the linkage is local, only default visibility makes sense.
3726     llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3727   else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3728            ItaniumCXXABI::RUK_NonUniqueHidden)
3729     llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3730   else
3731     llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3732 
3733   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3734       llvm::GlobalValue::DefaultStorageClass;
3735   if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3736     auto RD = Ty->getAsCXXRecordDecl();
3737     if (RD && RD->hasAttr<DLLExportAttr>())
3738       DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3739   }
3740 
3741   return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3742 }
3743 
3744 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3745       QualType Ty,
3746       llvm::GlobalVariable::LinkageTypes Linkage,
3747       llvm::GlobalValue::VisibilityTypes Visibility,
3748       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3749   // Add the vtable pointer.
3750   BuildVTablePointer(cast<Type>(Ty));
3751 
3752   // And the name.
3753   llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3754   llvm::Constant *TypeNameField;
3755 
3756   // If we're supposed to demote the visibility, be sure to set a flag
3757   // to use a string comparison for type_info comparisons.
3758   ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3759       CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3760   if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3761     // The flag is the sign bit, which on ARM64 is defined to be clear
3762     // for global pointers.  This is very ARM64-specific.
3763     TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3764     llvm::Constant *flag =
3765         llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3766     TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3767     TypeNameField =
3768         llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3769   } else {
3770     TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3771   }
3772   Fields.push_back(TypeNameField);
3773 
3774   switch (Ty->getTypeClass()) {
3775 #define TYPE(Class, Base)
3776 #define ABSTRACT_TYPE(Class, Base)
3777 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3778 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3779 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3780 #include "clang/AST/TypeNodes.inc"
3781     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3782 
3783   // GCC treats vector types as fundamental types.
3784   case Type::Builtin:
3785   case Type::Vector:
3786   case Type::ExtVector:
3787   case Type::ConstantMatrix:
3788   case Type::Complex:
3789   case Type::BlockPointer:
3790     // Itanium C++ ABI 2.9.5p4:
3791     // abi::__fundamental_type_info adds no data members to std::type_info.
3792     break;
3793 
3794   case Type::LValueReference:
3795   case Type::RValueReference:
3796     llvm_unreachable("References shouldn't get here");
3797 
3798   case Type::Auto:
3799   case Type::DeducedTemplateSpecialization:
3800     llvm_unreachable("Undeduced type shouldn't get here");
3801 
3802   case Type::Pipe:
3803     break;
3804 
3805   case Type::ExtInt:
3806     break;
3807 
3808   case Type::ConstantArray:
3809   case Type::IncompleteArray:
3810   case Type::VariableArray:
3811     // Itanium C++ ABI 2.9.5p5:
3812     // abi::__array_type_info adds no data members to std::type_info.
3813     break;
3814 
3815   case Type::FunctionNoProto:
3816   case Type::FunctionProto:
3817     // Itanium C++ ABI 2.9.5p5:
3818     // abi::__function_type_info adds no data members to std::type_info.
3819     break;
3820 
3821   case Type::Enum:
3822     // Itanium C++ ABI 2.9.5p5:
3823     // abi::__enum_type_info adds no data members to std::type_info.
3824     break;
3825 
3826   case Type::Record: {
3827     const CXXRecordDecl *RD =
3828       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3829     if (!RD->hasDefinition() || !RD->getNumBases()) {
3830       // We don't need to emit any fields.
3831       break;
3832     }
3833 
3834     if (CanUseSingleInheritance(RD))
3835       BuildSIClassTypeInfo(RD);
3836     else
3837       BuildVMIClassTypeInfo(RD);
3838 
3839     break;
3840   }
3841 
3842   case Type::ObjCObject:
3843   case Type::ObjCInterface:
3844     BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3845     break;
3846 
3847   case Type::ObjCObjectPointer:
3848     BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3849     break;
3850 
3851   case Type::Pointer:
3852     BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3853     break;
3854 
3855   case Type::MemberPointer:
3856     BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3857     break;
3858 
3859   case Type::Atomic:
3860     // No fields, at least for the moment.
3861     break;
3862   }
3863 
3864   llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3865 
3866   SmallString<256> Name;
3867   llvm::raw_svector_ostream Out(Name);
3868   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3869   llvm::Module &M = CGM.getModule();
3870   llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3871   llvm::GlobalVariable *GV =
3872       new llvm::GlobalVariable(M, Init->getType(),
3873                                /*isConstant=*/true, Linkage, Init, Name);
3874 
3875   // Export the typeinfo in the same circumstances as the vtable is exported.
3876   auto GVDLLStorageClass = DLLStorageClass;
3877   if (CGM.getTarget().hasPS4DLLImportExport()) {
3878     if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3879       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3880       if (RD->hasAttr<DLLExportAttr>() ||
3881           CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
3882         GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
3883       }
3884     }
3885   }
3886 
3887   // If there's already an old global variable, replace it with the new one.
3888   if (OldGV) {
3889     GV->takeName(OldGV);
3890     llvm::Constant *NewPtr =
3891       llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3892     OldGV->replaceAllUsesWith(NewPtr);
3893     OldGV->eraseFromParent();
3894   }
3895 
3896   if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3897     GV->setComdat(M.getOrInsertComdat(GV->getName()));
3898 
3899   CharUnits Align =
3900       CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3901   GV->setAlignment(Align.getAsAlign());
3902 
3903   // The Itanium ABI specifies that type_info objects must be globally
3904   // unique, with one exception: if the type is an incomplete class
3905   // type or a (possibly indirect) pointer to one.  That exception
3906   // affects the general case of comparing type_info objects produced
3907   // by the typeid operator, which is why the comparison operators on
3908   // std::type_info generally use the type_info name pointers instead
3909   // of the object addresses.  However, the language's built-in uses
3910   // of RTTI generally require class types to be complete, even when
3911   // manipulating pointers to those class types.  This allows the
3912   // implementation of dynamic_cast to rely on address equality tests,
3913   // which is much faster.
3914 
3915   // All of this is to say that it's important that both the type_info
3916   // object and the type_info name be uniqued when weakly emitted.
3917 
3918   TypeName->setVisibility(Visibility);
3919   CGM.setDSOLocal(TypeName);
3920 
3921   GV->setVisibility(Visibility);
3922   CGM.setDSOLocal(GV);
3923 
3924   TypeName->setDLLStorageClass(DLLStorageClass);
3925   GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
3926                              ? GVDLLStorageClass
3927                              : DLLStorageClass);
3928 
3929   TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3930   GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3931 
3932   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3933 }
3934 
3935 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3936 /// for the given Objective-C object type.
3937 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3938   // Drop qualifiers.
3939   const Type *T = OT->getBaseType().getTypePtr();
3940   assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3941 
3942   // The builtin types are abi::__class_type_infos and don't require
3943   // extra fields.
3944   if (isa<BuiltinType>(T)) return;
3945 
3946   ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3947   ObjCInterfaceDecl *Super = Class->getSuperClass();
3948 
3949   // Root classes are also __class_type_info.
3950   if (!Super) return;
3951 
3952   QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3953 
3954   // Everything else is single inheritance.
3955   llvm::Constant *BaseTypeInfo =
3956       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3957   Fields.push_back(BaseTypeInfo);
3958 }
3959 
3960 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3961 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3962 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3963   // Itanium C++ ABI 2.9.5p6b:
3964   // It adds to abi::__class_type_info a single member pointing to the
3965   // type_info structure for the base type,
3966   llvm::Constant *BaseTypeInfo =
3967     ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3968   Fields.push_back(BaseTypeInfo);
3969 }
3970 
3971 namespace {
3972   /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3973   /// a class hierarchy.
3974   struct SeenBases {
3975     llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3976     llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3977   };
3978 }
3979 
3980 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3981 /// abi::__vmi_class_type_info.
3982 ///
3983 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3984                                              SeenBases &Bases) {
3985 
3986   unsigned Flags = 0;
3987 
3988   auto *BaseDecl =
3989       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3990 
3991   if (Base->isVirtual()) {
3992     // Mark the virtual base as seen.
3993     if (!Bases.VirtualBases.insert(BaseDecl).second) {
3994       // If this virtual base has been seen before, then the class is diamond
3995       // shaped.
3996       Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3997     } else {
3998       if (Bases.NonVirtualBases.count(BaseDecl))
3999         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4000     }
4001   } else {
4002     // Mark the non-virtual base as seen.
4003     if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
4004       // If this non-virtual base has been seen before, then the class has non-
4005       // diamond shaped repeated inheritance.
4006       Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4007     } else {
4008       if (Bases.VirtualBases.count(BaseDecl))
4009         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4010     }
4011   }
4012 
4013   // Walk all bases.
4014   for (const auto &I : BaseDecl->bases())
4015     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4016 
4017   return Flags;
4018 }
4019 
4020 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
4021   unsigned Flags = 0;
4022   SeenBases Bases;
4023 
4024   // Walk all bases.
4025   for (const auto &I : RD->bases())
4026     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4027 
4028   return Flags;
4029 }
4030 
4031 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4032 /// classes with bases that do not satisfy the abi::__si_class_type_info
4033 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4034 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4035   llvm::Type *UnsignedIntLTy =
4036     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4037 
4038   // Itanium C++ ABI 2.9.5p6c:
4039   //   __flags is a word with flags describing details about the class
4040   //   structure, which may be referenced by using the __flags_masks
4041   //   enumeration. These flags refer to both direct and indirect bases.
4042   unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4043   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4044 
4045   // Itanium C++ ABI 2.9.5p6c:
4046   //   __base_count is a word with the number of direct proper base class
4047   //   descriptions that follow.
4048   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4049 
4050   if (!RD->getNumBases())
4051     return;
4052 
4053   // Now add the base class descriptions.
4054 
4055   // Itanium C++ ABI 2.9.5p6c:
4056   //   __base_info[] is an array of base class descriptions -- one for every
4057   //   direct proper base. Each description is of the type:
4058   //
4059   //   struct abi::__base_class_type_info {
4060   //   public:
4061   //     const __class_type_info *__base_type;
4062   //     long __offset_flags;
4063   //
4064   //     enum __offset_flags_masks {
4065   //       __virtual_mask = 0x1,
4066   //       __public_mask = 0x2,
4067   //       __offset_shift = 8
4068   //     };
4069   //   };
4070 
4071   // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4072   // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4073   // LLP64 platforms.
4074   // FIXME: Consider updating libc++abi to match, and extend this logic to all
4075   // LLP64 platforms.
4076   QualType OffsetFlagsTy = CGM.getContext().LongTy;
4077   const TargetInfo &TI = CGM.getContext().getTargetInfo();
4078   if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
4079     OffsetFlagsTy = CGM.getContext().LongLongTy;
4080   llvm::Type *OffsetFlagsLTy =
4081       CGM.getTypes().ConvertType(OffsetFlagsTy);
4082 
4083   for (const auto &Base : RD->bases()) {
4084     // The __base_type member points to the RTTI for the base type.
4085     Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4086 
4087     auto *BaseDecl =
4088         cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4089 
4090     int64_t OffsetFlags = 0;
4091 
4092     // All but the lower 8 bits of __offset_flags are a signed offset.
4093     // For a non-virtual base, this is the offset in the object of the base
4094     // subobject. For a virtual base, this is the offset in the virtual table of
4095     // the virtual base offset for the virtual base referenced (negative).
4096     CharUnits Offset;
4097     if (Base.isVirtual())
4098       Offset =
4099         CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
4100     else {
4101       const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4102       Offset = Layout.getBaseClassOffset(BaseDecl);
4103     };
4104 
4105     OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4106 
4107     // The low-order byte of __offset_flags contains flags, as given by the
4108     // masks from the enumeration __offset_flags_masks.
4109     if (Base.isVirtual())
4110       OffsetFlags |= BCTI_Virtual;
4111     if (Base.getAccessSpecifier() == AS_public)
4112       OffsetFlags |= BCTI_Public;
4113 
4114     Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4115   }
4116 }
4117 
4118 /// Compute the flags for a __pbase_type_info, and remove the corresponding
4119 /// pieces from \p Type.
4120 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4121   unsigned Flags = 0;
4122 
4123   if (Type.isConstQualified())
4124     Flags |= ItaniumRTTIBuilder::PTI_Const;
4125   if (Type.isVolatileQualified())
4126     Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4127   if (Type.isRestrictQualified())
4128     Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4129   Type = Type.getUnqualifiedType();
4130 
4131   // Itanium C++ ABI 2.9.5p7:
4132   //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
4133   //   incomplete class type, the incomplete target type flag is set.
4134   if (ContainsIncompleteClassType(Type))
4135     Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4136 
4137   if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4138     if (Proto->isNothrow()) {
4139       Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4140       Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
4141     }
4142   }
4143 
4144   return Flags;
4145 }
4146 
4147 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4148 /// used for pointer types.
4149 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4150   // Itanium C++ ABI 2.9.5p7:
4151   //   __flags is a flag word describing the cv-qualification and other
4152   //   attributes of the type pointed to
4153   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4154 
4155   llvm::Type *UnsignedIntLTy =
4156     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4157   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4158 
4159   // Itanium C++ ABI 2.9.5p7:
4160   //  __pointee is a pointer to the std::type_info derivation for the
4161   //  unqualified type being pointed to.
4162   llvm::Constant *PointeeTypeInfo =
4163       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4164   Fields.push_back(PointeeTypeInfo);
4165 }
4166 
4167 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4168 /// struct, used for member pointer types.
4169 void
4170 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4171   QualType PointeeTy = Ty->getPointeeType();
4172 
4173   // Itanium C++ ABI 2.9.5p7:
4174   //   __flags is a flag word describing the cv-qualification and other
4175   //   attributes of the type pointed to.
4176   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4177 
4178   const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4179   if (IsIncompleteClassType(ClassType))
4180     Flags |= PTI_ContainingClassIncomplete;
4181 
4182   llvm::Type *UnsignedIntLTy =
4183     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4184   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4185 
4186   // Itanium C++ ABI 2.9.5p7:
4187   //   __pointee is a pointer to the std::type_info derivation for the
4188   //   unqualified type being pointed to.
4189   llvm::Constant *PointeeTypeInfo =
4190       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4191   Fields.push_back(PointeeTypeInfo);
4192 
4193   // Itanium C++ ABI 2.9.5p9:
4194   //   __context is a pointer to an abi::__class_type_info corresponding to the
4195   //   class type containing the member pointed to
4196   //   (e.g., the "A" in "int A::*").
4197   Fields.push_back(
4198       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4199 }
4200 
4201 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4202   return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4203 }
4204 
4205 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4206   // Types added here must also be added to TypeInfoIsInStandardLibrary.
4207   QualType FundamentalTypes[] = {
4208       getContext().VoidTy,             getContext().NullPtrTy,
4209       getContext().BoolTy,             getContext().WCharTy,
4210       getContext().CharTy,             getContext().UnsignedCharTy,
4211       getContext().SignedCharTy,       getContext().ShortTy,
4212       getContext().UnsignedShortTy,    getContext().IntTy,
4213       getContext().UnsignedIntTy,      getContext().LongTy,
4214       getContext().UnsignedLongTy,     getContext().LongLongTy,
4215       getContext().UnsignedLongLongTy, getContext().Int128Ty,
4216       getContext().UnsignedInt128Ty,   getContext().HalfTy,
4217       getContext().FloatTy,            getContext().DoubleTy,
4218       getContext().LongDoubleTy,       getContext().Float128Ty,
4219       getContext().Char8Ty,            getContext().Char16Ty,
4220       getContext().Char32Ty
4221   };
4222   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4223       RD->hasAttr<DLLExportAttr>()
4224       ? llvm::GlobalValue::DLLExportStorageClass
4225       : llvm::GlobalValue::DefaultStorageClass;
4226   llvm::GlobalValue::VisibilityTypes Visibility =
4227       CodeGenModule::GetLLVMVisibility(RD->getVisibility());
4228   for (const QualType &FundamentalType : FundamentalTypes) {
4229     QualType PointerType = getContext().getPointerType(FundamentalType);
4230     QualType PointerTypeConst = getContext().getPointerType(
4231         FundamentalType.withConst());
4232     for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4233       ItaniumRTTIBuilder(*this).BuildTypeInfo(
4234           Type, llvm::GlobalValue::ExternalLinkage,
4235           Visibility, DLLStorageClass);
4236   }
4237 }
4238 
4239 /// What sort of uniqueness rules should we use for the RTTI for the
4240 /// given type?
4241 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4242     QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4243   if (shouldRTTIBeUnique())
4244     return RUK_Unique;
4245 
4246   // It's only necessary for linkonce_odr or weak_odr linkage.
4247   if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4248       Linkage != llvm::GlobalValue::WeakODRLinkage)
4249     return RUK_Unique;
4250 
4251   // It's only necessary with default visibility.
4252   if (CanTy->getVisibility() != DefaultVisibility)
4253     return RUK_Unique;
4254 
4255   // If we're not required to publish this symbol, hide it.
4256   if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4257     return RUK_NonUniqueHidden;
4258 
4259   // If we're required to publish this symbol, as we might be under an
4260   // explicit instantiation, leave it with default visibility but
4261   // enable string-comparisons.
4262   assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4263   return RUK_NonUniqueVisible;
4264 }
4265 
4266 // Find out how to codegen the complete destructor and constructor
4267 namespace {
4268 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4269 }
4270 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4271                                        const CXXMethodDecl *MD) {
4272   if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4273     return StructorCodegen::Emit;
4274 
4275   // The complete and base structors are not equivalent if there are any virtual
4276   // bases, so emit separate functions.
4277   if (MD->getParent()->getNumVBases())
4278     return StructorCodegen::Emit;
4279 
4280   GlobalDecl AliasDecl;
4281   if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4282     AliasDecl = GlobalDecl(DD, Dtor_Complete);
4283   } else {
4284     const auto *CD = cast<CXXConstructorDecl>(MD);
4285     AliasDecl = GlobalDecl(CD, Ctor_Complete);
4286   }
4287   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4288 
4289   if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4290     return StructorCodegen::RAUW;
4291 
4292   // FIXME: Should we allow available_externally aliases?
4293   if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4294     return StructorCodegen::RAUW;
4295 
4296   if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4297     // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4298     if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4299         CGM.getTarget().getTriple().isOSBinFormatWasm())
4300       return StructorCodegen::COMDAT;
4301     return StructorCodegen::Emit;
4302   }
4303 
4304   return StructorCodegen::Alias;
4305 }
4306 
4307 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4308                                            GlobalDecl AliasDecl,
4309                                            GlobalDecl TargetDecl) {
4310   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4311 
4312   StringRef MangledName = CGM.getMangledName(AliasDecl);
4313   llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4314   if (Entry && !Entry->isDeclaration())
4315     return;
4316 
4317   auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4318 
4319   // Create the alias with no name.
4320   auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4321 
4322   // Constructors and destructors are always unnamed_addr.
4323   Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4324 
4325   // Switch any previous uses to the alias.
4326   if (Entry) {
4327     assert(Entry->getType() == Aliasee->getType() &&
4328            "declaration exists with different type");
4329     Alias->takeName(Entry);
4330     Entry->replaceAllUsesWith(Alias);
4331     Entry->eraseFromParent();
4332   } else {
4333     Alias->setName(MangledName);
4334   }
4335 
4336   // Finally, set up the alias with its proper name and attributes.
4337   CGM.SetCommonAttributes(AliasDecl, Alias);
4338 }
4339 
4340 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4341   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4342   auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4343   const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4344 
4345   StructorCodegen CGType = getCodegenToUse(CGM, MD);
4346 
4347   if (CD ? GD.getCtorType() == Ctor_Complete
4348          : GD.getDtorType() == Dtor_Complete) {
4349     GlobalDecl BaseDecl;
4350     if (CD)
4351       BaseDecl = GD.getWithCtorType(Ctor_Base);
4352     else
4353       BaseDecl = GD.getWithDtorType(Dtor_Base);
4354 
4355     if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4356       emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4357       return;
4358     }
4359 
4360     if (CGType == StructorCodegen::RAUW) {
4361       StringRef MangledName = CGM.getMangledName(GD);
4362       auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4363       CGM.addReplacement(MangledName, Aliasee);
4364       return;
4365     }
4366   }
4367 
4368   // The base destructor is equivalent to the base destructor of its
4369   // base class if there is exactly one non-virtual base class with a
4370   // non-trivial destructor, there are no fields with a non-trivial
4371   // destructor, and the body of the destructor is trivial.
4372   if (DD && GD.getDtorType() == Dtor_Base &&
4373       CGType != StructorCodegen::COMDAT &&
4374       !CGM.TryEmitBaseDestructorAsAlias(DD))
4375     return;
4376 
4377   // FIXME: The deleting destructor is equivalent to the selected operator
4378   // delete if:
4379   //  * either the delete is a destroying operator delete or the destructor
4380   //    would be trivial if it weren't virtual,
4381   //  * the conversion from the 'this' parameter to the first parameter of the
4382   //    destructor is equivalent to a bitcast,
4383   //  * the destructor does not have an implicit "this" return, and
4384   //  * the operator delete has the same calling convention and IR function type
4385   //    as the destructor.
4386   // In such cases we should try to emit the deleting dtor as an alias to the
4387   // selected 'operator delete'.
4388 
4389   llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4390 
4391   if (CGType == StructorCodegen::COMDAT) {
4392     SmallString<256> Buffer;
4393     llvm::raw_svector_ostream Out(Buffer);
4394     if (DD)
4395       getMangleContext().mangleCXXDtorComdat(DD, Out);
4396     else
4397       getMangleContext().mangleCXXCtorComdat(CD, Out);
4398     llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4399     Fn->setComdat(C);
4400   } else {
4401     CGM.maybeSetTrivialComdat(*MD, *Fn);
4402   }
4403 }
4404 
4405 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4406   // void *__cxa_begin_catch(void*);
4407   llvm::FunctionType *FTy = llvm::FunctionType::get(
4408       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4409 
4410   return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4411 }
4412 
4413 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4414   // void __cxa_end_catch();
4415   llvm::FunctionType *FTy =
4416       llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4417 
4418   return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4419 }
4420 
4421 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4422   // void *__cxa_get_exception_ptr(void*);
4423   llvm::FunctionType *FTy = llvm::FunctionType::get(
4424       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4425 
4426   return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4427 }
4428 
4429 namespace {
4430   /// A cleanup to call __cxa_end_catch.  In many cases, the caught
4431   /// exception type lets us state definitively that the thrown exception
4432   /// type does not have a destructor.  In particular:
4433   ///   - Catch-alls tell us nothing, so we have to conservatively
4434   ///     assume that the thrown exception might have a destructor.
4435   ///   - Catches by reference behave according to their base types.
4436   ///   - Catches of non-record types will only trigger for exceptions
4437   ///     of non-record types, which never have destructors.
4438   ///   - Catches of record types can trigger for arbitrary subclasses
4439   ///     of the caught type, so we have to assume the actual thrown
4440   ///     exception type might have a throwing destructor, even if the
4441   ///     caught type's destructor is trivial or nothrow.
4442   struct CallEndCatch final : EHScopeStack::Cleanup {
4443     CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4444     bool MightThrow;
4445 
4446     void Emit(CodeGenFunction &CGF, Flags flags) override {
4447       if (!MightThrow) {
4448         CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4449         return;
4450       }
4451 
4452       CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4453     }
4454   };
4455 }
4456 
4457 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
4458 /// __cxa_end_catch.
4459 ///
4460 /// \param EndMightThrow - true if __cxa_end_catch might throw
4461 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4462                                    llvm::Value *Exn,
4463                                    bool EndMightThrow) {
4464   llvm::CallInst *call =
4465     CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4466 
4467   CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4468 
4469   return call;
4470 }
4471 
4472 /// A "special initializer" callback for initializing a catch
4473 /// parameter during catch initialization.
4474 static void InitCatchParam(CodeGenFunction &CGF,
4475                            const VarDecl &CatchParam,
4476                            Address ParamAddr,
4477                            SourceLocation Loc) {
4478   // Load the exception from where the landing pad saved it.
4479   llvm::Value *Exn = CGF.getExceptionFromSlot();
4480 
4481   CanQualType CatchType =
4482     CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4483   llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4484 
4485   // If we're catching by reference, we can just cast the object
4486   // pointer to the appropriate pointer.
4487   if (isa<ReferenceType>(CatchType)) {
4488     QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4489     bool EndCatchMightThrow = CaughtType->isRecordType();
4490 
4491     // __cxa_begin_catch returns the adjusted object pointer.
4492     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4493 
4494     // We have no way to tell the personality function that we're
4495     // catching by reference, so if we're catching a pointer,
4496     // __cxa_begin_catch will actually return that pointer by value.
4497     if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4498       QualType PointeeType = PT->getPointeeType();
4499 
4500       // When catching by reference, generally we should just ignore
4501       // this by-value pointer and use the exception object instead.
4502       if (!PointeeType->isRecordType()) {
4503 
4504         // Exn points to the struct _Unwind_Exception header, which
4505         // we have to skip past in order to reach the exception data.
4506         unsigned HeaderSize =
4507           CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4508         AdjustedExn =
4509             CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4510 
4511       // However, if we're catching a pointer-to-record type that won't
4512       // work, because the personality function might have adjusted
4513       // the pointer.  There's actually no way for us to fully satisfy
4514       // the language/ABI contract here:  we can't use Exn because it
4515       // might have the wrong adjustment, but we can't use the by-value
4516       // pointer because it's off by a level of abstraction.
4517       //
4518       // The current solution is to dump the adjusted pointer into an
4519       // alloca, which breaks language semantics (because changing the
4520       // pointer doesn't change the exception) but at least works.
4521       // The better solution would be to filter out non-exact matches
4522       // and rethrow them, but this is tricky because the rethrow
4523       // really needs to be catchable by other sites at this landing
4524       // pad.  The best solution is to fix the personality function.
4525       } else {
4526         // Pull the pointer for the reference type off.
4527         llvm::Type *PtrTy =
4528           cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
4529 
4530         // Create the temporary and write the adjusted pointer into it.
4531         Address ExnPtrTmp =
4532           CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4533         llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4534         CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4535 
4536         // Bind the reference to the temporary.
4537         AdjustedExn = ExnPtrTmp.getPointer();
4538       }
4539     }
4540 
4541     llvm::Value *ExnCast =
4542       CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4543     CGF.Builder.CreateStore(ExnCast, ParamAddr);
4544     return;
4545   }
4546 
4547   // Scalars and complexes.
4548   TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4549   if (TEK != TEK_Aggregate) {
4550     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4551 
4552     // If the catch type is a pointer type, __cxa_begin_catch returns
4553     // the pointer by value.
4554     if (CatchType->hasPointerRepresentation()) {
4555       llvm::Value *CastExn =
4556         CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4557 
4558       switch (CatchType.getQualifiers().getObjCLifetime()) {
4559       case Qualifiers::OCL_Strong:
4560         CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4561         LLVM_FALLTHROUGH;
4562 
4563       case Qualifiers::OCL_None:
4564       case Qualifiers::OCL_ExplicitNone:
4565       case Qualifiers::OCL_Autoreleasing:
4566         CGF.Builder.CreateStore(CastExn, ParamAddr);
4567         return;
4568 
4569       case Qualifiers::OCL_Weak:
4570         CGF.EmitARCInitWeak(ParamAddr, CastExn);
4571         return;
4572       }
4573       llvm_unreachable("bad ownership qualifier!");
4574     }
4575 
4576     // Otherwise, it returns a pointer into the exception object.
4577 
4578     llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4579     llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4580 
4581     LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4582     LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4583     switch (TEK) {
4584     case TEK_Complex:
4585       CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4586                              /*init*/ true);
4587       return;
4588     case TEK_Scalar: {
4589       llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4590       CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4591       return;
4592     }
4593     case TEK_Aggregate:
4594       llvm_unreachable("evaluation kind filtered out!");
4595     }
4596     llvm_unreachable("bad evaluation kind");
4597   }
4598 
4599   assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4600   auto catchRD = CatchType->getAsCXXRecordDecl();
4601   CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4602 
4603   llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4604 
4605   // Check for a copy expression.  If we don't have a copy expression,
4606   // that means a trivial copy is okay.
4607   const Expr *copyExpr = CatchParam.getInit();
4608   if (!copyExpr) {
4609     llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4610     Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4611                         caughtExnAlignment);
4612     LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4613     LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4614     CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4615     return;
4616   }
4617 
4618   // We have to call __cxa_get_exception_ptr to get the adjusted
4619   // pointer before copying.
4620   llvm::CallInst *rawAdjustedExn =
4621     CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4622 
4623   // Cast that to the appropriate type.
4624   Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4625                       caughtExnAlignment);
4626 
4627   // The copy expression is defined in terms of an OpaqueValueExpr.
4628   // Find it and map it to the adjusted expression.
4629   CodeGenFunction::OpaqueValueMapping
4630     opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4631            CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4632 
4633   // Call the copy ctor in a terminate scope.
4634   CGF.EHStack.pushTerminate();
4635 
4636   // Perform the copy construction.
4637   CGF.EmitAggExpr(copyExpr,
4638                   AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4639                                         AggValueSlot::IsNotDestructed,
4640                                         AggValueSlot::DoesNotNeedGCBarriers,
4641                                         AggValueSlot::IsNotAliased,
4642                                         AggValueSlot::DoesNotOverlap));
4643 
4644   // Leave the terminate scope.
4645   CGF.EHStack.popTerminate();
4646 
4647   // Undo the opaque value mapping.
4648   opaque.pop();
4649 
4650   // Finally we can call __cxa_begin_catch.
4651   CallBeginCatch(CGF, Exn, true);
4652 }
4653 
4654 /// Begins a catch statement by initializing the catch variable and
4655 /// calling __cxa_begin_catch.
4656 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4657                                    const CXXCatchStmt *S) {
4658   // We have to be very careful with the ordering of cleanups here:
4659   //   C++ [except.throw]p4:
4660   //     The destruction [of the exception temporary] occurs
4661   //     immediately after the destruction of the object declared in
4662   //     the exception-declaration in the handler.
4663   //
4664   // So the precise ordering is:
4665   //   1.  Construct catch variable.
4666   //   2.  __cxa_begin_catch
4667   //   3.  Enter __cxa_end_catch cleanup
4668   //   4.  Enter dtor cleanup
4669   //
4670   // We do this by using a slightly abnormal initialization process.
4671   // Delegation sequence:
4672   //   - ExitCXXTryStmt opens a RunCleanupsScope
4673   //     - EmitAutoVarAlloca creates the variable and debug info
4674   //       - InitCatchParam initializes the variable from the exception
4675   //       - CallBeginCatch calls __cxa_begin_catch
4676   //       - CallBeginCatch enters the __cxa_end_catch cleanup
4677   //     - EmitAutoVarCleanups enters the variable destructor cleanup
4678   //   - EmitCXXTryStmt emits the code for the catch body
4679   //   - EmitCXXTryStmt close the RunCleanupsScope
4680 
4681   VarDecl *CatchParam = S->getExceptionDecl();
4682   if (!CatchParam) {
4683     llvm::Value *Exn = CGF.getExceptionFromSlot();
4684     CallBeginCatch(CGF, Exn, true);
4685     return;
4686   }
4687 
4688   // Emit the local.
4689   CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4690   InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4691   CGF.EmitAutoVarCleanups(var);
4692 }
4693 
4694 /// Get or define the following function:
4695 ///   void @__clang_call_terminate(i8* %exn) nounwind noreturn
4696 /// This code is used only in C++.
4697 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4698   llvm::FunctionType *fnTy =
4699     llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4700   llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4701       fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4702   llvm::Function *fn =
4703       cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4704   if (fn->empty()) {
4705     fn->setDoesNotThrow();
4706     fn->setDoesNotReturn();
4707 
4708     // What we really want is to massively penalize inlining without
4709     // forbidding it completely.  The difference between that and
4710     // 'noinline' is negligible.
4711     fn->addFnAttr(llvm::Attribute::NoInline);
4712 
4713     // Allow this function to be shared across translation units, but
4714     // we don't want it to turn into an exported symbol.
4715     fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4716     fn->setVisibility(llvm::Function::HiddenVisibility);
4717     if (CGM.supportsCOMDAT())
4718       fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4719 
4720     // Set up the function.
4721     llvm::BasicBlock *entry =
4722         llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4723     CGBuilderTy builder(CGM, entry);
4724 
4725     // Pull the exception pointer out of the parameter list.
4726     llvm::Value *exn = &*fn->arg_begin();
4727 
4728     // Call __cxa_begin_catch(exn).
4729     llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4730     catchCall->setDoesNotThrow();
4731     catchCall->setCallingConv(CGM.getRuntimeCC());
4732 
4733     // Call std::terminate().
4734     llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4735     termCall->setDoesNotThrow();
4736     termCall->setDoesNotReturn();
4737     termCall->setCallingConv(CGM.getRuntimeCC());
4738 
4739     // std::terminate cannot return.
4740     builder.CreateUnreachable();
4741   }
4742   return fnRef;
4743 }
4744 
4745 llvm::CallInst *
4746 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4747                                                    llvm::Value *Exn) {
4748   // In C++, we want to call __cxa_begin_catch() before terminating.
4749   if (Exn) {
4750     assert(CGF.CGM.getLangOpts().CPlusPlus);
4751     return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4752   }
4753   return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4754 }
4755 
4756 std::pair<llvm::Value *, const CXXRecordDecl *>
4757 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4758                              const CXXRecordDecl *RD) {
4759   return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4760 }
4761 
4762 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4763                                        const CXXCatchStmt *C) {
4764   if (CGF.getTarget().hasFeature("exception-handling"))
4765     CGF.EHStack.pushCleanup<CatchRetScope>(
4766         NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4767   ItaniumCXXABI::emitBeginCatch(CGF, C);
4768 }
4769 
4770 llvm::CallInst *
4771 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4772                                                        llvm::Value *Exn) {
4773   // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4774   // the violating exception to mark it handled, but it is currently hard to do
4775   // with wasm EH instruction structure with catch/catch_all, we just call
4776   // std::terminate and ignore the violating exception as in CGCXXABI.
4777   // TODO Consider code transformation that makes calling __clang_call_terminate
4778   // possible.
4779   return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
4780 }
4781 
4782 /// Register a global destructor as best as we know how.
4783 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4784                                   llvm::FunctionCallee Dtor,
4785                                   llvm::Constant *Addr) {
4786   if (D.getTLSKind() != VarDecl::TLS_None) {
4787     // atexit routine expects "int(*)(int,...)"
4788     llvm::FunctionType *FTy =
4789         llvm::FunctionType::get(CGM.IntTy, CGM.IntTy, true);
4790     llvm::PointerType *FpTy = FTy->getPointerTo();
4791 
4792     // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
4793     llvm::FunctionType *AtExitTy =
4794         llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, FpTy}, true);
4795 
4796     // Fetch the actual function.
4797     llvm::FunctionCallee AtExit =
4798         CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
4799 
4800     // Create __dtor function for the var decl.
4801     llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
4802 
4803     // Register above __dtor with atexit().
4804     // First param is flags and must be 0, second param is function ptr
4805     llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
4806     CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
4807 
4808     // Cannot unregister TLS __dtor so done
4809     return;
4810   }
4811 
4812   // Create __dtor function for the var decl.
4813   llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
4814 
4815   // Register above __dtor with atexit().
4816   CGF.registerGlobalDtorWithAtExit(DtorStub);
4817 
4818   // Emit __finalize function to unregister __dtor and (as appropriate) call
4819   // __dtor.
4820   emitCXXStermFinalizer(D, DtorStub, Addr);
4821 }
4822 
4823 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4824                                      llvm::Constant *addr) {
4825   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4826   SmallString<256> FnName;
4827   {
4828     llvm::raw_svector_ostream Out(FnName);
4829     getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4830   }
4831 
4832   // Create the finalization action associated with a variable.
4833   const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
4834   llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4835       FTy, FnName.str(), FI, D.getLocation());
4836 
4837   CodeGenFunction CGF(CGM);
4838 
4839   CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4840                     FunctionArgList(), D.getLocation(),
4841                     D.getInit()->getExprLoc());
4842 
4843   // The unatexit subroutine unregisters __dtor functions that were previously
4844   // registered by the atexit subroutine. If the referenced function is found,
4845   // the unatexit returns a value of 0, meaning that the cleanup is still
4846   // pending (and we should call the __dtor function).
4847   llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4848 
4849   llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4850 
4851   llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4852   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4853 
4854   // Check if unatexit returns a value of 0. If it does, jump to
4855   // DestructCallBlock, otherwise jump to EndBlock directly.
4856   CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4857 
4858   CGF.EmitBlock(DestructCallBlock);
4859 
4860   // Emit the call to dtorStub.
4861   llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4862 
4863   // Make sure the call and the callee agree on calling convention.
4864   CI->setCallingConv(dtorStub->getCallingConv());
4865 
4866   CGF.EmitBlock(EndBlock);
4867 
4868   CGF.FinishFunction();
4869 
4870   if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
4871     CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
4872                                              IPA->getPriority());
4873   } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
4874              getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
4875     // According to C++ [basic.start.init]p2, class template static data
4876     // members (i.e., implicitly or explicitly instantiated specializations)
4877     // have unordered initialization. As a consequence, we can put them into
4878     // their own llvm.global_dtors entry.
4879     CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4880   } else {
4881     CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4882   }
4883 }
4884