xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp (revision 9dba64be9536c28e4800e06512b7f29b43ade345)
1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides C++ code generation targeting the Itanium C++ ABI.  The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
11 // documented at:
12 //  http://www.codesourcery.com/public/cxx-abi/abi.html
13 //  http://www.codesourcery.com/public/cxx-abi/abi-eh.html
14 //
15 // It also supports the closely-related ARM ABI, documented at:
16 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "CGCXXABI.h"
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/CodeGen/ConstantInitBuilder.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/Type.h"
30 #include "clang/AST/StmtCXX.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/GlobalValue.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/Value.h"
36 #include "llvm/Support/ScopedPrinter.h"
37 
38 using namespace clang;
39 using namespace CodeGen;
40 
41 namespace {
42 class ItaniumCXXABI : public CodeGen::CGCXXABI {
43   /// VTables - All the vtables which have been defined.
44   llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
45 
46   /// All the thread wrapper functions that have been used.
47   llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
48       ThreadWrappers;
49 
50 protected:
51   bool UseARMMethodPtrABI;
52   bool UseARMGuardVarABI;
53   bool Use32BitVTableOffsetABI;
54 
55   ItaniumMangleContext &getMangleContext() {
56     return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
57   }
58 
59 public:
60   ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
61                 bool UseARMMethodPtrABI = false,
62                 bool UseARMGuardVarABI = false) :
63     CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
64     UseARMGuardVarABI(UseARMGuardVarABI),
65     Use32BitVTableOffsetABI(false) { }
66 
67   bool classifyReturnType(CGFunctionInfo &FI) const override;
68 
69   RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
70     // If C++ prohibits us from making a copy, pass by address.
71     if (!RD->canPassInRegisters())
72       return RAA_Indirect;
73     return RAA_Default;
74   }
75 
76   bool isThisCompleteObject(GlobalDecl GD) const override {
77     // The Itanium ABI has separate complete-object vs.  base-object
78     // variants of both constructors and destructors.
79     if (isa<CXXDestructorDecl>(GD.getDecl())) {
80       switch (GD.getDtorType()) {
81       case Dtor_Complete:
82       case Dtor_Deleting:
83         return true;
84 
85       case Dtor_Base:
86         return false;
87 
88       case Dtor_Comdat:
89         llvm_unreachable("emitting dtor comdat as function?");
90       }
91       llvm_unreachable("bad dtor kind");
92     }
93     if (isa<CXXConstructorDecl>(GD.getDecl())) {
94       switch (GD.getCtorType()) {
95       case Ctor_Complete:
96         return true;
97 
98       case Ctor_Base:
99         return false;
100 
101       case Ctor_CopyingClosure:
102       case Ctor_DefaultClosure:
103         llvm_unreachable("closure ctors in Itanium ABI?");
104 
105       case Ctor_Comdat:
106         llvm_unreachable("emitting ctor comdat as function?");
107       }
108       llvm_unreachable("bad dtor kind");
109     }
110 
111     // No other kinds.
112     return false;
113   }
114 
115   bool isZeroInitializable(const MemberPointerType *MPT) override;
116 
117   llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
118 
119   CGCallee
120     EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
121                                     const Expr *E,
122                                     Address This,
123                                     llvm::Value *&ThisPtrForCall,
124                                     llvm::Value *MemFnPtr,
125                                     const MemberPointerType *MPT) override;
126 
127   llvm::Value *
128     EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
129                                  Address Base,
130                                  llvm::Value *MemPtr,
131                                  const MemberPointerType *MPT) override;
132 
133   llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
134                                            const CastExpr *E,
135                                            llvm::Value *Src) override;
136   llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
137                                               llvm::Constant *Src) override;
138 
139   llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
140 
141   llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
142   llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
143                                         CharUnits offset) override;
144   llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
145   llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
146                                      CharUnits ThisAdjustment);
147 
148   llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
149                                            llvm::Value *L, llvm::Value *R,
150                                            const MemberPointerType *MPT,
151                                            bool Inequality) override;
152 
153   llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
154                                          llvm::Value *Addr,
155                                          const MemberPointerType *MPT) override;
156 
157   void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
158                                Address Ptr, QualType ElementType,
159                                const CXXDestructorDecl *Dtor) override;
160 
161   void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
162   void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
163 
164   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
165 
166   llvm::CallInst *
167   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
168                                       llvm::Value *Exn) override;
169 
170   void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
171   llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
172   CatchTypeInfo
173   getAddrOfCXXCatchHandlerType(QualType Ty,
174                                QualType CatchHandlerType) override {
175     return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
176   }
177 
178   bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
179   void EmitBadTypeidCall(CodeGenFunction &CGF) override;
180   llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
181                           Address ThisPtr,
182                           llvm::Type *StdTypeInfoPtrTy) override;
183 
184   bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
185                                           QualType SrcRecordTy) override;
186 
187   llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
188                                    QualType SrcRecordTy, QualType DestTy,
189                                    QualType DestRecordTy,
190                                    llvm::BasicBlock *CastEnd) override;
191 
192   llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
193                                      QualType SrcRecordTy,
194                                      QualType DestTy) override;
195 
196   bool EmitBadCastCall(CodeGenFunction &CGF) override;
197 
198   llvm::Value *
199     GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
200                               const CXXRecordDecl *ClassDecl,
201                               const CXXRecordDecl *BaseClassDecl) override;
202 
203   void EmitCXXConstructors(const CXXConstructorDecl *D) override;
204 
205   AddedStructorArgs
206   buildStructorSignature(GlobalDecl GD,
207                          SmallVectorImpl<CanQualType> &ArgTys) override;
208 
209   bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
210                               CXXDtorType DT) const override {
211     // Itanium does not emit any destructor variant as an inline thunk.
212     // Delegating may occur as an optimization, but all variants are either
213     // emitted with external linkage or as linkonce if they are inline and used.
214     return false;
215   }
216 
217   void EmitCXXDestructors(const CXXDestructorDecl *D) override;
218 
219   void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
220                                  FunctionArgList &Params) override;
221 
222   void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
223 
224   AddedStructorArgs
225   addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
226                              CXXCtorType Type, bool ForVirtualBase,
227                              bool Delegating, CallArgList &Args) override;
228 
229   void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
230                           CXXDtorType Type, bool ForVirtualBase,
231                           bool Delegating, Address This,
232                           QualType ThisTy) override;
233 
234   void emitVTableDefinitions(CodeGenVTables &CGVT,
235                              const CXXRecordDecl *RD) override;
236 
237   bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
238                                            CodeGenFunction::VPtr Vptr) override;
239 
240   bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
241     return true;
242   }
243 
244   llvm::Constant *
245   getVTableAddressPoint(BaseSubobject Base,
246                         const CXXRecordDecl *VTableClass) override;
247 
248   llvm::Value *getVTableAddressPointInStructor(
249       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
250       BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
251 
252   llvm::Value *getVTableAddressPointInStructorWithVTT(
253       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
254       BaseSubobject Base, const CXXRecordDecl *NearestVBase);
255 
256   llvm::Constant *
257   getVTableAddressPointForConstExpr(BaseSubobject Base,
258                                     const CXXRecordDecl *VTableClass) override;
259 
260   llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
261                                         CharUnits VPtrOffset) override;
262 
263   CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
264                                      Address This, llvm::Type *Ty,
265                                      SourceLocation Loc) override;
266 
267   llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
268                                          const CXXDestructorDecl *Dtor,
269                                          CXXDtorType DtorType, Address This,
270                                          DeleteOrMemberCallExpr E) override;
271 
272   void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
273 
274   bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
275   bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
276 
277   void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
278                        bool ReturnAdjustment) override {
279     // Allow inlining of thunks by emitting them with available_externally
280     // linkage together with vtables when needed.
281     if (ForVTable && !Thunk->hasLocalLinkage())
282       Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
283     CGM.setGVProperties(Thunk, GD);
284   }
285 
286   bool exportThunk() override { return true; }
287 
288   llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
289                                      const ThisAdjustment &TA) override;
290 
291   llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
292                                        const ReturnAdjustment &RA) override;
293 
294   size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
295                               FunctionArgList &Args) const override {
296     assert(!Args.empty() && "expected the arglist to not be empty!");
297     return Args.size() - 1;
298   }
299 
300   StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
301   StringRef GetDeletedVirtualCallName() override
302     { return "__cxa_deleted_virtual"; }
303 
304   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
305   Address InitializeArrayCookie(CodeGenFunction &CGF,
306                                 Address NewPtr,
307                                 llvm::Value *NumElements,
308                                 const CXXNewExpr *expr,
309                                 QualType ElementType) override;
310   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
311                                    Address allocPtr,
312                                    CharUnits cookieSize) override;
313 
314   void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
315                        llvm::GlobalVariable *DeclPtr,
316                        bool PerformInit) override;
317   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
318                           llvm::FunctionCallee dtor,
319                           llvm::Constant *addr) override;
320 
321   llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
322                                                 llvm::Value *Val);
323   void EmitThreadLocalInitFuncs(
324       CodeGenModule &CGM,
325       ArrayRef<const VarDecl *> CXXThreadLocals,
326       ArrayRef<llvm::Function *> CXXThreadLocalInits,
327       ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
328 
329   /// Determine whether we will definitely emit this variable with a constant
330   /// initializer, either because the language semantics demand it or because
331   /// we know that the initializer is a constant.
332   bool isEmittedWithConstantInitializer(const VarDecl *VD) const {
333     VD = VD->getMostRecentDecl();
334     if (VD->hasAttr<ConstInitAttr>())
335       return true;
336 
337     // All later checks examine the initializer specified on the variable. If
338     // the variable is weak, such examination would not be correct.
339     if (VD->isWeak() || VD->hasAttr<SelectAnyAttr>())
340       return false;
341 
342     const VarDecl *InitDecl = VD->getInitializingDeclaration();
343     if (!InitDecl)
344       return false;
345 
346     // If there's no initializer to run, this is constant initialization.
347     if (!InitDecl->hasInit())
348       return true;
349 
350     // If we have the only definition, we don't need a thread wrapper if we
351     // will emit the value as a constant.
352     if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
353       return !VD->needsDestruction(getContext()) && InitDecl->evaluateValue();
354 
355     // Otherwise, we need a thread wrapper unless we know that every
356     // translation unit will emit the value as a constant. We rely on
357     // ICE-ness not varying between translation units, which isn't actually
358     // guaranteed by the standard but is necessary for sanity.
359     return InitDecl->isInitKnownICE() && InitDecl->isInitICE();
360   }
361 
362   bool usesThreadWrapperFunction(const VarDecl *VD) const override {
363     return !isEmittedWithConstantInitializer(VD) ||
364            VD->needsDestruction(getContext());
365   }
366   LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
367                                       QualType LValType) override;
368 
369   bool NeedsVTTParameter(GlobalDecl GD) override;
370 
371   /**************************** RTTI Uniqueness ******************************/
372 
373 protected:
374   /// Returns true if the ABI requires RTTI type_info objects to be unique
375   /// across a program.
376   virtual bool shouldRTTIBeUnique() const { return true; }
377 
378 public:
379   /// What sort of unique-RTTI behavior should we use?
380   enum RTTIUniquenessKind {
381     /// We are guaranteeing, or need to guarantee, that the RTTI string
382     /// is unique.
383     RUK_Unique,
384 
385     /// We are not guaranteeing uniqueness for the RTTI string, so we
386     /// can demote to hidden visibility but must use string comparisons.
387     RUK_NonUniqueHidden,
388 
389     /// We are not guaranteeing uniqueness for the RTTI string, so we
390     /// have to use string comparisons, but we also have to emit it with
391     /// non-hidden visibility.
392     RUK_NonUniqueVisible
393   };
394 
395   /// Return the required visibility status for the given type and linkage in
396   /// the current ABI.
397   RTTIUniquenessKind
398   classifyRTTIUniqueness(QualType CanTy,
399                          llvm::GlobalValue::LinkageTypes Linkage) const;
400   friend class ItaniumRTTIBuilder;
401 
402   void emitCXXStructor(GlobalDecl GD) override;
403 
404   std::pair<llvm::Value *, const CXXRecordDecl *>
405   LoadVTablePtr(CodeGenFunction &CGF, Address This,
406                 const CXXRecordDecl *RD) override;
407 
408  private:
409    bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
410      const auto &VtableLayout =
411          CGM.getItaniumVTableContext().getVTableLayout(RD);
412 
413      for (const auto &VtableComponent : VtableLayout.vtable_components()) {
414        // Skip empty slot.
415        if (!VtableComponent.isUsedFunctionPointerKind())
416          continue;
417 
418        const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
419        if (!Method->getCanonicalDecl()->isInlined())
420          continue;
421 
422        StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
423        auto *Entry = CGM.GetGlobalValue(Name);
424        // This checks if virtual inline function has already been emitted.
425        // Note that it is possible that this inline function would be emitted
426        // after trying to emit vtable speculatively. Because of this we do
427        // an extra pass after emitting all deferred vtables to find and emit
428        // these vtables opportunistically.
429        if (!Entry || Entry->isDeclaration())
430          return true;
431      }
432      return false;
433   }
434 
435   bool isVTableHidden(const CXXRecordDecl *RD) const {
436     const auto &VtableLayout =
437             CGM.getItaniumVTableContext().getVTableLayout(RD);
438 
439     for (const auto &VtableComponent : VtableLayout.vtable_components()) {
440       if (VtableComponent.isRTTIKind()) {
441         const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
442         if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
443           return true;
444       } else if (VtableComponent.isUsedFunctionPointerKind()) {
445         const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
446         if (Method->getVisibility() == Visibility::HiddenVisibility &&
447             !Method->isDefined())
448           return true;
449       }
450     }
451     return false;
452   }
453 };
454 
455 class ARMCXXABI : public ItaniumCXXABI {
456 public:
457   ARMCXXABI(CodeGen::CodeGenModule &CGM) :
458     ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
459                   /*UseARMGuardVarABI=*/true) {}
460 
461   bool HasThisReturn(GlobalDecl GD) const override {
462     return (isa<CXXConstructorDecl>(GD.getDecl()) || (
463               isa<CXXDestructorDecl>(GD.getDecl()) &&
464               GD.getDtorType() != Dtor_Deleting));
465   }
466 
467   void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
468                            QualType ResTy) override;
469 
470   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
471   Address InitializeArrayCookie(CodeGenFunction &CGF,
472                                 Address NewPtr,
473                                 llvm::Value *NumElements,
474                                 const CXXNewExpr *expr,
475                                 QualType ElementType) override;
476   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
477                                    CharUnits cookieSize) override;
478 };
479 
480 class iOS64CXXABI : public ARMCXXABI {
481 public:
482   iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
483     Use32BitVTableOffsetABI = true;
484   }
485 
486   // ARM64 libraries are prepared for non-unique RTTI.
487   bool shouldRTTIBeUnique() const override { return false; }
488 };
489 
490 class WebAssemblyCXXABI final : public ItaniumCXXABI {
491 public:
492   explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
493       : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
494                       /*UseARMGuardVarABI=*/true) {}
495   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
496 
497 private:
498   bool HasThisReturn(GlobalDecl GD) const override {
499     return isa<CXXConstructorDecl>(GD.getDecl()) ||
500            (isa<CXXDestructorDecl>(GD.getDecl()) &&
501             GD.getDtorType() != Dtor_Deleting);
502   }
503   bool canCallMismatchedFunctionType() const override { return false; }
504 };
505 }
506 
507 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
508   switch (CGM.getTarget().getCXXABI().getKind()) {
509   // For IR-generation purposes, there's no significant difference
510   // between the ARM and iOS ABIs.
511   case TargetCXXABI::GenericARM:
512   case TargetCXXABI::iOS:
513   case TargetCXXABI::WatchOS:
514     return new ARMCXXABI(CGM);
515 
516   case TargetCXXABI::iOS64:
517     return new iOS64CXXABI(CGM);
518 
519   // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
520   // include the other 32-bit ARM oddities: constructor/destructor return values
521   // and array cookies.
522   case TargetCXXABI::GenericAArch64:
523     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
524                              /*UseARMGuardVarABI=*/true);
525 
526   case TargetCXXABI::GenericMIPS:
527     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
528 
529   case TargetCXXABI::WebAssembly:
530     return new WebAssemblyCXXABI(CGM);
531 
532   case TargetCXXABI::GenericItanium:
533     if (CGM.getContext().getTargetInfo().getTriple().getArch()
534         == llvm::Triple::le32) {
535       // For PNaCl, use ARM-style method pointers so that PNaCl code
536       // does not assume anything about the alignment of function
537       // pointers.
538       return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
539     }
540     return new ItaniumCXXABI(CGM);
541 
542   case TargetCXXABI::Microsoft:
543     llvm_unreachable("Microsoft ABI is not Itanium-based");
544   }
545   llvm_unreachable("bad ABI kind");
546 }
547 
548 llvm::Type *
549 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
550   if (MPT->isMemberDataPointer())
551     return CGM.PtrDiffTy;
552   return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
553 }
554 
555 /// In the Itanium and ARM ABIs, method pointers have the form:
556 ///   struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
557 ///
558 /// In the Itanium ABI:
559 ///  - method pointers are virtual if (memptr.ptr & 1) is nonzero
560 ///  - the this-adjustment is (memptr.adj)
561 ///  - the virtual offset is (memptr.ptr - 1)
562 ///
563 /// In the ARM ABI:
564 ///  - method pointers are virtual if (memptr.adj & 1) is nonzero
565 ///  - the this-adjustment is (memptr.adj >> 1)
566 ///  - the virtual offset is (memptr.ptr)
567 /// ARM uses 'adj' for the virtual flag because Thumb functions
568 /// may be only single-byte aligned.
569 ///
570 /// If the member is virtual, the adjusted 'this' pointer points
571 /// to a vtable pointer from which the virtual offset is applied.
572 ///
573 /// If the member is non-virtual, memptr.ptr is the address of
574 /// the function to call.
575 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
576     CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
577     llvm::Value *&ThisPtrForCall,
578     llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
579   CGBuilderTy &Builder = CGF.Builder;
580 
581   const FunctionProtoType *FPT =
582     MPT->getPointeeType()->getAs<FunctionProtoType>();
583   auto *RD =
584       cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
585 
586   llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
587       CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
588 
589   llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
590 
591   llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
592   llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
593   llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
594 
595   // Extract memptr.adj, which is in the second field.
596   llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
597 
598   // Compute the true adjustment.
599   llvm::Value *Adj = RawAdj;
600   if (UseARMMethodPtrABI)
601     Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
602 
603   // Apply the adjustment and cast back to the original struct type
604   // for consistency.
605   llvm::Value *This = ThisAddr.getPointer();
606   llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
607   Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
608   This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
609   ThisPtrForCall = This;
610 
611   // Load the function pointer.
612   llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
613 
614   // If the LSB in the function pointer is 1, the function pointer points to
615   // a virtual function.
616   llvm::Value *IsVirtual;
617   if (UseARMMethodPtrABI)
618     IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
619   else
620     IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
621   IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
622   Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
623 
624   // In the virtual path, the adjustment left 'This' pointing to the
625   // vtable of the correct base subobject.  The "function pointer" is an
626   // offset within the vtable (+1 for the virtual flag on non-ARM).
627   CGF.EmitBlock(FnVirtual);
628 
629   // Cast the adjusted this to a pointer to vtable pointer and load.
630   llvm::Type *VTableTy = Builder.getInt8PtrTy();
631   CharUnits VTablePtrAlign =
632     CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
633                                       CGF.getPointerAlign());
634   llvm::Value *VTable =
635     CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
636 
637   // Apply the offset.
638   // On ARM64, to reserve extra space in virtual member function pointers,
639   // we only pay attention to the low 32 bits of the offset.
640   llvm::Value *VTableOffset = FnAsInt;
641   if (!UseARMMethodPtrABI)
642     VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
643   if (Use32BitVTableOffsetABI) {
644     VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
645     VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
646   }
647 
648   // Check the address of the function pointer if CFI on member function
649   // pointers is enabled.
650   llvm::Constant *CheckSourceLocation;
651   llvm::Constant *CheckTypeDesc;
652   bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
653                             CGM.HasHiddenLTOVisibility(RD);
654   bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
655                            CGM.HasHiddenLTOVisibility(RD);
656   llvm::Value *VirtualFn = nullptr;
657 
658   {
659     CodeGenFunction::SanitizerScope SanScope(&CGF);
660     llvm::Value *TypeId = nullptr;
661     llvm::Value *CheckResult = nullptr;
662 
663     if (ShouldEmitCFICheck || ShouldEmitVFEInfo) {
664       // If doing CFI or VFE, we will need the metadata node to check against.
665       llvm::Metadata *MD =
666           CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
667       TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
668     }
669 
670     llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
671 
672     if (ShouldEmitVFEInfo) {
673       // If doing VFE, load from the vtable with a type.checked.load intrinsic
674       // call. Note that we use the GEP to calculate the address to load from
675       // and pass 0 as the offset to the intrinsic. This is because every
676       // vtable slot of the correct type is marked with matching metadata, and
677       // we know that the load must be from one of these slots.
678       llvm::Value *CheckedLoad = Builder.CreateCall(
679           CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
680           {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
681       CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
682       VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
683       VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
684                                         "memptr.virtualfn");
685     } else {
686       // When not doing VFE, emit a normal load, as it allows more
687       // optimisations than type.checked.load.
688       if (ShouldEmitCFICheck) {
689         CheckResult = Builder.CreateCall(
690             CGM.getIntrinsic(llvm::Intrinsic::type_test),
691             {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
692       }
693       VFPAddr =
694           Builder.CreateBitCast(VFPAddr, FTy->getPointerTo()->getPointerTo());
695       VirtualFn = Builder.CreateAlignedLoad(VFPAddr, CGF.getPointerAlign(),
696                                             "memptr.virtualfn");
697     }
698     assert(VirtualFn && "Virtual fuction pointer not created!");
699     assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || CheckResult) &&
700            "Check result required but not created!");
701 
702     if (ShouldEmitCFICheck) {
703       // If doing CFI, emit the check.
704       CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
705       CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
706       llvm::Constant *StaticData[] = {
707           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
708           CheckSourceLocation,
709           CheckTypeDesc,
710       };
711 
712       if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
713         CGF.EmitTrapCheck(CheckResult);
714       } else {
715         llvm::Value *AllVtables = llvm::MetadataAsValue::get(
716             CGM.getLLVMContext(),
717             llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
718         llvm::Value *ValidVtable = Builder.CreateCall(
719             CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
720         CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
721                       SanitizerHandler::CFICheckFail, StaticData,
722                       {VTable, ValidVtable});
723       }
724 
725       FnVirtual = Builder.GetInsertBlock();
726     }
727   } // End of sanitizer scope
728 
729   CGF.EmitBranch(FnEnd);
730 
731   // In the non-virtual path, the function pointer is actually a
732   // function pointer.
733   CGF.EmitBlock(FnNonVirtual);
734   llvm::Value *NonVirtualFn =
735     Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
736 
737   // Check the function pointer if CFI on member function pointers is enabled.
738   if (ShouldEmitCFICheck) {
739     CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
740     if (RD->hasDefinition()) {
741       CodeGenFunction::SanitizerScope SanScope(&CGF);
742 
743       llvm::Constant *StaticData[] = {
744           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
745           CheckSourceLocation,
746           CheckTypeDesc,
747       };
748 
749       llvm::Value *Bit = Builder.getFalse();
750       llvm::Value *CastedNonVirtualFn =
751           Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
752       for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
753         llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
754             getContext().getMemberPointerType(
755                 MPT->getPointeeType(),
756                 getContext().getRecordType(Base).getTypePtr()));
757         llvm::Value *TypeId =
758             llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
759 
760         llvm::Value *TypeTest =
761             Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
762                                {CastedNonVirtualFn, TypeId});
763         Bit = Builder.CreateOr(Bit, TypeTest);
764       }
765 
766       CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
767                     SanitizerHandler::CFICheckFail, StaticData,
768                     {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
769 
770       FnNonVirtual = Builder.GetInsertBlock();
771     }
772   }
773 
774   // We're done.
775   CGF.EmitBlock(FnEnd);
776   llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
777   CalleePtr->addIncoming(VirtualFn, FnVirtual);
778   CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
779 
780   CGCallee Callee(FPT, CalleePtr);
781   return Callee;
782 }
783 
784 /// Compute an l-value by applying the given pointer-to-member to a
785 /// base object.
786 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
787     CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
788     const MemberPointerType *MPT) {
789   assert(MemPtr->getType() == CGM.PtrDiffTy);
790 
791   CGBuilderTy &Builder = CGF.Builder;
792 
793   // Cast to char*.
794   Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
795 
796   // Apply the offset, which we assume is non-null.
797   llvm::Value *Addr =
798     Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
799 
800   // Cast the address to the appropriate pointer type, adopting the
801   // address space of the base pointer.
802   llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
803                             ->getPointerTo(Base.getAddressSpace());
804   return Builder.CreateBitCast(Addr, PType);
805 }
806 
807 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
808 /// conversion.
809 ///
810 /// Bitcast conversions are always a no-op under Itanium.
811 ///
812 /// Obligatory offset/adjustment diagram:
813 ///         <-- offset -->          <-- adjustment -->
814 ///   |--------------------------|----------------------|--------------------|
815 ///   ^Derived address point     ^Base address point    ^Member address point
816 ///
817 /// So when converting a base member pointer to a derived member pointer,
818 /// we add the offset to the adjustment because the address point has
819 /// decreased;  and conversely, when converting a derived MP to a base MP
820 /// we subtract the offset from the adjustment because the address point
821 /// has increased.
822 ///
823 /// The standard forbids (at compile time) conversion to and from
824 /// virtual bases, which is why we don't have to consider them here.
825 ///
826 /// The standard forbids (at run time) casting a derived MP to a base
827 /// MP when the derived MP does not point to a member of the base.
828 /// This is why -1 is a reasonable choice for null data member
829 /// pointers.
830 llvm::Value *
831 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
832                                            const CastExpr *E,
833                                            llvm::Value *src) {
834   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
835          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
836          E->getCastKind() == CK_ReinterpretMemberPointer);
837 
838   // Under Itanium, reinterprets don't require any additional processing.
839   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
840 
841   // Use constant emission if we can.
842   if (isa<llvm::Constant>(src))
843     return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
844 
845   llvm::Constant *adj = getMemberPointerAdjustment(E);
846   if (!adj) return src;
847 
848   CGBuilderTy &Builder = CGF.Builder;
849   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
850 
851   const MemberPointerType *destTy =
852     E->getType()->castAs<MemberPointerType>();
853 
854   // For member data pointers, this is just a matter of adding the
855   // offset if the source is non-null.
856   if (destTy->isMemberDataPointer()) {
857     llvm::Value *dst;
858     if (isDerivedToBase)
859       dst = Builder.CreateNSWSub(src, adj, "adj");
860     else
861       dst = Builder.CreateNSWAdd(src, adj, "adj");
862 
863     // Null check.
864     llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
865     llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
866     return Builder.CreateSelect(isNull, src, dst);
867   }
868 
869   // The this-adjustment is left-shifted by 1 on ARM.
870   if (UseARMMethodPtrABI) {
871     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
872     offset <<= 1;
873     adj = llvm::ConstantInt::get(adj->getType(), offset);
874   }
875 
876   llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
877   llvm::Value *dstAdj;
878   if (isDerivedToBase)
879     dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
880   else
881     dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
882 
883   return Builder.CreateInsertValue(src, dstAdj, 1);
884 }
885 
886 llvm::Constant *
887 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
888                                            llvm::Constant *src) {
889   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
890          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
891          E->getCastKind() == CK_ReinterpretMemberPointer);
892 
893   // Under Itanium, reinterprets don't require any additional processing.
894   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
895 
896   // If the adjustment is trivial, we don't need to do anything.
897   llvm::Constant *adj = getMemberPointerAdjustment(E);
898   if (!adj) return src;
899 
900   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
901 
902   const MemberPointerType *destTy =
903     E->getType()->castAs<MemberPointerType>();
904 
905   // For member data pointers, this is just a matter of adding the
906   // offset if the source is non-null.
907   if (destTy->isMemberDataPointer()) {
908     // null maps to null.
909     if (src->isAllOnesValue()) return src;
910 
911     if (isDerivedToBase)
912       return llvm::ConstantExpr::getNSWSub(src, adj);
913     else
914       return llvm::ConstantExpr::getNSWAdd(src, adj);
915   }
916 
917   // The this-adjustment is left-shifted by 1 on ARM.
918   if (UseARMMethodPtrABI) {
919     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
920     offset <<= 1;
921     adj = llvm::ConstantInt::get(adj->getType(), offset);
922   }
923 
924   llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
925   llvm::Constant *dstAdj;
926   if (isDerivedToBase)
927     dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
928   else
929     dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
930 
931   return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
932 }
933 
934 llvm::Constant *
935 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
936   // Itanium C++ ABI 2.3:
937   //   A NULL pointer is represented as -1.
938   if (MPT->isMemberDataPointer())
939     return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
940 
941   llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
942   llvm::Constant *Values[2] = { Zero, Zero };
943   return llvm::ConstantStruct::getAnon(Values);
944 }
945 
946 llvm::Constant *
947 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
948                                      CharUnits offset) {
949   // Itanium C++ ABI 2.3:
950   //   A pointer to data member is an offset from the base address of
951   //   the class object containing it, represented as a ptrdiff_t
952   return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
953 }
954 
955 llvm::Constant *
956 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
957   return BuildMemberPointer(MD, CharUnits::Zero());
958 }
959 
960 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
961                                                   CharUnits ThisAdjustment) {
962   assert(MD->isInstance() && "Member function must not be static!");
963 
964   CodeGenTypes &Types = CGM.getTypes();
965 
966   // Get the function pointer (or index if this is a virtual function).
967   llvm::Constant *MemPtr[2];
968   if (MD->isVirtual()) {
969     uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
970 
971     const ASTContext &Context = getContext();
972     CharUnits PointerWidth =
973       Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
974     uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
975 
976     if (UseARMMethodPtrABI) {
977       // ARM C++ ABI 3.2.1:
978       //   This ABI specifies that adj contains twice the this
979       //   adjustment, plus 1 if the member function is virtual. The
980       //   least significant bit of adj then makes exactly the same
981       //   discrimination as the least significant bit of ptr does for
982       //   Itanium.
983       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
984       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
985                                          2 * ThisAdjustment.getQuantity() + 1);
986     } else {
987       // Itanium C++ ABI 2.3:
988       //   For a virtual function, [the pointer field] is 1 plus the
989       //   virtual table offset (in bytes) of the function,
990       //   represented as a ptrdiff_t.
991       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
992       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
993                                          ThisAdjustment.getQuantity());
994     }
995   } else {
996     const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
997     llvm::Type *Ty;
998     // Check whether the function has a computable LLVM signature.
999     if (Types.isFuncTypeConvertible(FPT)) {
1000       // The function has a computable LLVM signature; use the correct type.
1001       Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1002     } else {
1003       // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1004       // function type is incomplete.
1005       Ty = CGM.PtrDiffTy;
1006     }
1007     llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1008 
1009     MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1010     MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1011                                        (UseARMMethodPtrABI ? 2 : 1) *
1012                                        ThisAdjustment.getQuantity());
1013   }
1014 
1015   return llvm::ConstantStruct::getAnon(MemPtr);
1016 }
1017 
1018 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1019                                                  QualType MPType) {
1020   const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1021   const ValueDecl *MPD = MP.getMemberPointerDecl();
1022   if (!MPD)
1023     return EmitNullMemberPointer(MPT);
1024 
1025   CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
1026 
1027   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1028     return BuildMemberPointer(MD, ThisAdjustment);
1029 
1030   CharUnits FieldOffset =
1031     getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1032   return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1033 }
1034 
1035 /// The comparison algorithm is pretty easy: the member pointers are
1036 /// the same if they're either bitwise identical *or* both null.
1037 ///
1038 /// ARM is different here only because null-ness is more complicated.
1039 llvm::Value *
1040 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1041                                            llvm::Value *L,
1042                                            llvm::Value *R,
1043                                            const MemberPointerType *MPT,
1044                                            bool Inequality) {
1045   CGBuilderTy &Builder = CGF.Builder;
1046 
1047   llvm::ICmpInst::Predicate Eq;
1048   llvm::Instruction::BinaryOps And, Or;
1049   if (Inequality) {
1050     Eq = llvm::ICmpInst::ICMP_NE;
1051     And = llvm::Instruction::Or;
1052     Or = llvm::Instruction::And;
1053   } else {
1054     Eq = llvm::ICmpInst::ICMP_EQ;
1055     And = llvm::Instruction::And;
1056     Or = llvm::Instruction::Or;
1057   }
1058 
1059   // Member data pointers are easy because there's a unique null
1060   // value, so it just comes down to bitwise equality.
1061   if (MPT->isMemberDataPointer())
1062     return Builder.CreateICmp(Eq, L, R);
1063 
1064   // For member function pointers, the tautologies are more complex.
1065   // The Itanium tautology is:
1066   //   (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1067   // The ARM tautology is:
1068   //   (L == R) <==> (L.ptr == R.ptr &&
1069   //                  (L.adj == R.adj ||
1070   //                   (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1071   // The inequality tautologies have exactly the same structure, except
1072   // applying De Morgan's laws.
1073 
1074   llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1075   llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1076 
1077   // This condition tests whether L.ptr == R.ptr.  This must always be
1078   // true for equality to hold.
1079   llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1080 
1081   // This condition, together with the assumption that L.ptr == R.ptr,
1082   // tests whether the pointers are both null.  ARM imposes an extra
1083   // condition.
1084   llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1085   llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1086 
1087   // This condition tests whether L.adj == R.adj.  If this isn't
1088   // true, the pointers are unequal unless they're both null.
1089   llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1090   llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1091   llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1092 
1093   // Null member function pointers on ARM clear the low bit of Adj,
1094   // so the zero condition has to check that neither low bit is set.
1095   if (UseARMMethodPtrABI) {
1096     llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1097 
1098     // Compute (l.adj | r.adj) & 1 and test it against zero.
1099     llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1100     llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1101     llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1102                                                       "cmp.or.adj");
1103     EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1104   }
1105 
1106   // Tie together all our conditions.
1107   llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1108   Result = Builder.CreateBinOp(And, PtrEq, Result,
1109                                Inequality ? "memptr.ne" : "memptr.eq");
1110   return Result;
1111 }
1112 
1113 llvm::Value *
1114 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1115                                           llvm::Value *MemPtr,
1116                                           const MemberPointerType *MPT) {
1117   CGBuilderTy &Builder = CGF.Builder;
1118 
1119   /// For member data pointers, this is just a check against -1.
1120   if (MPT->isMemberDataPointer()) {
1121     assert(MemPtr->getType() == CGM.PtrDiffTy);
1122     llvm::Value *NegativeOne =
1123       llvm::Constant::getAllOnesValue(MemPtr->getType());
1124     return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1125   }
1126 
1127   // In Itanium, a member function pointer is not null if 'ptr' is not null.
1128   llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1129 
1130   llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1131   llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1132 
1133   // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1134   // (the virtual bit) is set.
1135   if (UseARMMethodPtrABI) {
1136     llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1137     llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1138     llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1139     llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1140                                                   "memptr.isvirtual");
1141     Result = Builder.CreateOr(Result, IsVirtual);
1142   }
1143 
1144   return Result;
1145 }
1146 
1147 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1148   const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1149   if (!RD)
1150     return false;
1151 
1152   // If C++ prohibits us from making a copy, return by address.
1153   if (!RD->canPassInRegisters()) {
1154     auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1155     FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1156     return true;
1157   }
1158   return false;
1159 }
1160 
1161 /// The Itanium ABI requires non-zero initialization only for data
1162 /// member pointers, for which '0' is a valid offset.
1163 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1164   return MPT->isMemberFunctionPointer();
1165 }
1166 
1167 /// The Itanium ABI always places an offset to the complete object
1168 /// at entry -2 in the vtable.
1169 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1170                                             const CXXDeleteExpr *DE,
1171                                             Address Ptr,
1172                                             QualType ElementType,
1173                                             const CXXDestructorDecl *Dtor) {
1174   bool UseGlobalDelete = DE->isGlobalDelete();
1175   if (UseGlobalDelete) {
1176     // Derive the complete-object pointer, which is what we need
1177     // to pass to the deallocation function.
1178 
1179     // Grab the vtable pointer as an intptr_t*.
1180     auto *ClassDecl =
1181         cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1182     llvm::Value *VTable =
1183         CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1184 
1185     // Track back to entry -2 and pull out the offset there.
1186     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1187         VTable, -2, "complete-offset.ptr");
1188     llvm::Value *Offset =
1189       CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1190 
1191     // Apply the offset.
1192     llvm::Value *CompletePtr =
1193       CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1194     CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
1195 
1196     // If we're supposed to call the global delete, make sure we do so
1197     // even if the destructor throws.
1198     CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1199                                     ElementType);
1200   }
1201 
1202   // FIXME: Provide a source location here even though there's no
1203   // CXXMemberCallExpr for dtor call.
1204   CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1205   EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1206 
1207   if (UseGlobalDelete)
1208     CGF.PopCleanupBlock();
1209 }
1210 
1211 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1212   // void __cxa_rethrow();
1213 
1214   llvm::FunctionType *FTy =
1215     llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1216 
1217   llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1218 
1219   if (isNoReturn)
1220     CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1221   else
1222     CGF.EmitRuntimeCallOrInvoke(Fn);
1223 }
1224 
1225 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1226   // void *__cxa_allocate_exception(size_t thrown_size);
1227 
1228   llvm::FunctionType *FTy =
1229     llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1230 
1231   return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1232 }
1233 
1234 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1235   // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1236   //                  void (*dest) (void *));
1237 
1238   llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1239   llvm::FunctionType *FTy =
1240     llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1241 
1242   return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1243 }
1244 
1245 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1246   QualType ThrowType = E->getSubExpr()->getType();
1247   // Now allocate the exception object.
1248   llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1249   uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1250 
1251   llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1252   llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1253       AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1254 
1255   CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1256   CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1257 
1258   // Now throw the exception.
1259   llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1260                                                          /*ForEH=*/true);
1261 
1262   // The address of the destructor.  If the exception type has a
1263   // trivial destructor (or isn't a record), we just pass null.
1264   llvm::Constant *Dtor = nullptr;
1265   if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1266     CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1267     if (!Record->hasTrivialDestructor()) {
1268       CXXDestructorDecl *DtorD = Record->getDestructor();
1269       Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1270       Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1271     }
1272   }
1273   if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1274 
1275   llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1276   CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1277 }
1278 
1279 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1280   // void *__dynamic_cast(const void *sub,
1281   //                      const abi::__class_type_info *src,
1282   //                      const abi::__class_type_info *dst,
1283   //                      std::ptrdiff_t src2dst_offset);
1284 
1285   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1286   llvm::Type *PtrDiffTy =
1287     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1288 
1289   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1290 
1291   llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1292 
1293   // Mark the function as nounwind readonly.
1294   llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1295                                             llvm::Attribute::ReadOnly };
1296   llvm::AttributeList Attrs = llvm::AttributeList::get(
1297       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1298 
1299   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1300 }
1301 
1302 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1303   // void __cxa_bad_cast();
1304   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1305   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1306 }
1307 
1308 /// Compute the src2dst_offset hint as described in the
1309 /// Itanium C++ ABI [2.9.7]
1310 static CharUnits computeOffsetHint(ASTContext &Context,
1311                                    const CXXRecordDecl *Src,
1312                                    const CXXRecordDecl *Dst) {
1313   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1314                      /*DetectVirtual=*/false);
1315 
1316   // If Dst is not derived from Src we can skip the whole computation below and
1317   // return that Src is not a public base of Dst.  Record all inheritance paths.
1318   if (!Dst->isDerivedFrom(Src, Paths))
1319     return CharUnits::fromQuantity(-2ULL);
1320 
1321   unsigned NumPublicPaths = 0;
1322   CharUnits Offset;
1323 
1324   // Now walk all possible inheritance paths.
1325   for (const CXXBasePath &Path : Paths) {
1326     if (Path.Access != AS_public)  // Ignore non-public inheritance.
1327       continue;
1328 
1329     ++NumPublicPaths;
1330 
1331     for (const CXXBasePathElement &PathElement : Path) {
1332       // If the path contains a virtual base class we can't give any hint.
1333       // -1: no hint.
1334       if (PathElement.Base->isVirtual())
1335         return CharUnits::fromQuantity(-1ULL);
1336 
1337       if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1338         continue;
1339 
1340       // Accumulate the base class offsets.
1341       const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1342       Offset += L.getBaseClassOffset(
1343           PathElement.Base->getType()->getAsCXXRecordDecl());
1344     }
1345   }
1346 
1347   // -2: Src is not a public base of Dst.
1348   if (NumPublicPaths == 0)
1349     return CharUnits::fromQuantity(-2ULL);
1350 
1351   // -3: Src is a multiple public base type but never a virtual base type.
1352   if (NumPublicPaths > 1)
1353     return CharUnits::fromQuantity(-3ULL);
1354 
1355   // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1356   // Return the offset of Src from the origin of Dst.
1357   return Offset;
1358 }
1359 
1360 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1361   // void __cxa_bad_typeid();
1362   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1363 
1364   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1365 }
1366 
1367 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1368                                               QualType SrcRecordTy) {
1369   return IsDeref;
1370 }
1371 
1372 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1373   llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1374   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1375   Call->setDoesNotReturn();
1376   CGF.Builder.CreateUnreachable();
1377 }
1378 
1379 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1380                                        QualType SrcRecordTy,
1381                                        Address ThisPtr,
1382                                        llvm::Type *StdTypeInfoPtrTy) {
1383   auto *ClassDecl =
1384       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1385   llvm::Value *Value =
1386       CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1387 
1388   // Load the type info.
1389   Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1390   return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
1391 }
1392 
1393 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1394                                                        QualType SrcRecordTy) {
1395   return SrcIsPtr;
1396 }
1397 
1398 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1399     CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1400     QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1401   llvm::Type *PtrDiffLTy =
1402       CGF.ConvertType(CGF.getContext().getPointerDiffType());
1403   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1404 
1405   llvm::Value *SrcRTTI =
1406       CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1407   llvm::Value *DestRTTI =
1408       CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1409 
1410   // Compute the offset hint.
1411   const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1412   const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1413   llvm::Value *OffsetHint = llvm::ConstantInt::get(
1414       PtrDiffLTy,
1415       computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1416 
1417   // Emit the call to __dynamic_cast.
1418   llvm::Value *Value = ThisAddr.getPointer();
1419   Value = CGF.EmitCastToVoidPtr(Value);
1420 
1421   llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1422   Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1423   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1424 
1425   /// C++ [expr.dynamic.cast]p9:
1426   ///   A failed cast to reference type throws std::bad_cast
1427   if (DestTy->isReferenceType()) {
1428     llvm::BasicBlock *BadCastBlock =
1429         CGF.createBasicBlock("dynamic_cast.bad_cast");
1430 
1431     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1432     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1433 
1434     CGF.EmitBlock(BadCastBlock);
1435     EmitBadCastCall(CGF);
1436   }
1437 
1438   return Value;
1439 }
1440 
1441 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1442                                                   Address ThisAddr,
1443                                                   QualType SrcRecordTy,
1444                                                   QualType DestTy) {
1445   llvm::Type *PtrDiffLTy =
1446       CGF.ConvertType(CGF.getContext().getPointerDiffType());
1447   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1448 
1449   auto *ClassDecl =
1450       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1451   // Get the vtable pointer.
1452   llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
1453       ClassDecl);
1454 
1455   // Get the offset-to-top from the vtable.
1456   llvm::Value *OffsetToTop =
1457       CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1458   OffsetToTop =
1459     CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
1460                                   "offset.to.top");
1461 
1462   // Finally, add the offset to the pointer.
1463   llvm::Value *Value = ThisAddr.getPointer();
1464   Value = CGF.EmitCastToVoidPtr(Value);
1465   Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1466 
1467   return CGF.Builder.CreateBitCast(Value, DestLTy);
1468 }
1469 
1470 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1471   llvm::FunctionCallee Fn = getBadCastFn(CGF);
1472   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1473   Call->setDoesNotReturn();
1474   CGF.Builder.CreateUnreachable();
1475   return true;
1476 }
1477 
1478 llvm::Value *
1479 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1480                                          Address This,
1481                                          const CXXRecordDecl *ClassDecl,
1482                                          const CXXRecordDecl *BaseClassDecl) {
1483   llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1484   CharUnits VBaseOffsetOffset =
1485       CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1486                                                                BaseClassDecl);
1487 
1488   llvm::Value *VBaseOffsetPtr =
1489     CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1490                                    "vbase.offset.ptr");
1491   VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1492                                              CGM.PtrDiffTy->getPointerTo());
1493 
1494   llvm::Value *VBaseOffset =
1495     CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
1496                                   "vbase.offset");
1497 
1498   return VBaseOffset;
1499 }
1500 
1501 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1502   // Just make sure we're in sync with TargetCXXABI.
1503   assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1504 
1505   // The constructor used for constructing this as a base class;
1506   // ignores virtual bases.
1507   CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1508 
1509   // The constructor used for constructing this as a complete class;
1510   // constructs the virtual bases, then calls the base constructor.
1511   if (!D->getParent()->isAbstract()) {
1512     // We don't need to emit the complete ctor if the class is abstract.
1513     CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1514   }
1515 }
1516 
1517 CGCXXABI::AddedStructorArgs
1518 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1519                                       SmallVectorImpl<CanQualType> &ArgTys) {
1520   ASTContext &Context = getContext();
1521 
1522   // All parameters are already in place except VTT, which goes after 'this'.
1523   // These are Clang types, so we don't need to worry about sret yet.
1524 
1525   // Check if we need to add a VTT parameter (which has type void **).
1526   if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1527                                              : GD.getDtorType() == Dtor_Base) &&
1528       cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1529     ArgTys.insert(ArgTys.begin() + 1,
1530                   Context.getPointerType(Context.VoidPtrTy));
1531     return AddedStructorArgs::prefix(1);
1532   }
1533   return AddedStructorArgs{};
1534 }
1535 
1536 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1537   // The destructor used for destructing this as a base class; ignores
1538   // virtual bases.
1539   CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1540 
1541   // The destructor used for destructing this as a most-derived class;
1542   // call the base destructor and then destructs any virtual bases.
1543   CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1544 
1545   // The destructor in a virtual table is always a 'deleting'
1546   // destructor, which calls the complete destructor and then uses the
1547   // appropriate operator delete.
1548   if (D->isVirtual())
1549     CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1550 }
1551 
1552 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1553                                               QualType &ResTy,
1554                                               FunctionArgList &Params) {
1555   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1556   assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1557 
1558   // Check if we need a VTT parameter as well.
1559   if (NeedsVTTParameter(CGF.CurGD)) {
1560     ASTContext &Context = getContext();
1561 
1562     // FIXME: avoid the fake decl
1563     QualType T = Context.getPointerType(Context.VoidPtrTy);
1564     auto *VTTDecl = ImplicitParamDecl::Create(
1565         Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1566         T, ImplicitParamDecl::CXXVTT);
1567     Params.insert(Params.begin() + 1, VTTDecl);
1568     getStructorImplicitParamDecl(CGF) = VTTDecl;
1569   }
1570 }
1571 
1572 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1573   // Naked functions have no prolog.
1574   if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1575     return;
1576 
1577   /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1578   /// adjustments are required, because they are all handled by thunks.
1579   setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1580 
1581   /// Initialize the 'vtt' slot if needed.
1582   if (getStructorImplicitParamDecl(CGF)) {
1583     getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1584         CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1585   }
1586 
1587   /// If this is a function that the ABI specifies returns 'this', initialize
1588   /// the return slot to 'this' at the start of the function.
1589   ///
1590   /// Unlike the setting of return types, this is done within the ABI
1591   /// implementation instead of by clients of CGCXXABI because:
1592   /// 1) getThisValue is currently protected
1593   /// 2) in theory, an ABI could implement 'this' returns some other way;
1594   ///    HasThisReturn only specifies a contract, not the implementation
1595   if (HasThisReturn(CGF.CurGD))
1596     CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1597 }
1598 
1599 CGCXXABI::AddedStructorArgs ItaniumCXXABI::addImplicitConstructorArgs(
1600     CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1601     bool ForVirtualBase, bool Delegating, CallArgList &Args) {
1602   if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1603     return AddedStructorArgs{};
1604 
1605   // Insert the implicit 'vtt' argument as the second argument.
1606   llvm::Value *VTT =
1607       CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1608   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1609   Args.insert(Args.begin() + 1, CallArg(RValue::get(VTT), VTTTy));
1610   return AddedStructorArgs::prefix(1);  // Added one arg.
1611 }
1612 
1613 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1614                                        const CXXDestructorDecl *DD,
1615                                        CXXDtorType Type, bool ForVirtualBase,
1616                                        bool Delegating, Address This,
1617                                        QualType ThisTy) {
1618   GlobalDecl GD(DD, Type);
1619   llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1620   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1621 
1622   CGCallee Callee;
1623   if (getContext().getLangOpts().AppleKext &&
1624       Type != Dtor_Base && DD->isVirtual())
1625     Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1626   else
1627     Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1628 
1629   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1630                             nullptr);
1631 }
1632 
1633 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1634                                           const CXXRecordDecl *RD) {
1635   llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1636   if (VTable->hasInitializer())
1637     return;
1638 
1639   ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1640   const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1641   llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1642   llvm::Constant *RTTI =
1643       CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1644 
1645   // Create and set the initializer.
1646   ConstantInitBuilder Builder(CGM);
1647   auto Components = Builder.beginStruct();
1648   CGVT.createVTableInitializer(Components, VTLayout, RTTI);
1649   Components.finishAndSetAsInitializer(VTable);
1650 
1651   // Set the correct linkage.
1652   VTable->setLinkage(Linkage);
1653 
1654   if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1655     VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1656 
1657   // Set the right visibility.
1658   CGM.setGVProperties(VTable, RD);
1659 
1660   // If this is the magic class __cxxabiv1::__fundamental_type_info,
1661   // we will emit the typeinfo for the fundamental types. This is the
1662   // same behaviour as GCC.
1663   const DeclContext *DC = RD->getDeclContext();
1664   if (RD->getIdentifier() &&
1665       RD->getIdentifier()->isStr("__fundamental_type_info") &&
1666       isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1667       cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1668       DC->getParent()->isTranslationUnit())
1669     EmitFundamentalRTTIDescriptors(RD);
1670 
1671   if (!VTable->isDeclarationForLinker())
1672     CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1673 }
1674 
1675 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1676     CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1677   if (Vptr.NearestVBase == nullptr)
1678     return false;
1679   return NeedsVTTParameter(CGF.CurGD);
1680 }
1681 
1682 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1683     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1684     const CXXRecordDecl *NearestVBase) {
1685 
1686   if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1687       NeedsVTTParameter(CGF.CurGD)) {
1688     return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1689                                                   NearestVBase);
1690   }
1691   return getVTableAddressPoint(Base, VTableClass);
1692 }
1693 
1694 llvm::Constant *
1695 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1696                                      const CXXRecordDecl *VTableClass) {
1697   llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1698 
1699   // Find the appropriate vtable within the vtable group, and the address point
1700   // within that vtable.
1701   VTableLayout::AddressPointLocation AddressPoint =
1702       CGM.getItaniumVTableContext()
1703           .getVTableLayout(VTableClass)
1704           .getAddressPoint(Base);
1705   llvm::Value *Indices[] = {
1706     llvm::ConstantInt::get(CGM.Int32Ty, 0),
1707     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1708     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1709   };
1710 
1711   return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1712                                               Indices, /*InBounds=*/true,
1713                                               /*InRangeIndex=*/1);
1714 }
1715 
1716 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1717     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1718     const CXXRecordDecl *NearestVBase) {
1719   assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1720          NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1721 
1722   // Get the secondary vpointer index.
1723   uint64_t VirtualPointerIndex =
1724       CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1725 
1726   /// Load the VTT.
1727   llvm::Value *VTT = CGF.LoadCXXVTT();
1728   if (VirtualPointerIndex)
1729     VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1730 
1731   // And load the address point from the VTT.
1732   return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
1733 }
1734 
1735 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1736     BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1737   return getVTableAddressPoint(Base, VTableClass);
1738 }
1739 
1740 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1741                                                      CharUnits VPtrOffset) {
1742   assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1743 
1744   llvm::GlobalVariable *&VTable = VTables[RD];
1745   if (VTable)
1746     return VTable;
1747 
1748   // Queue up this vtable for possible deferred emission.
1749   CGM.addDeferredVTable(RD);
1750 
1751   SmallString<256> Name;
1752   llvm::raw_svector_ostream Out(Name);
1753   getMangleContext().mangleCXXVTable(RD, Out);
1754 
1755   const VTableLayout &VTLayout =
1756       CGM.getItaniumVTableContext().getVTableLayout(RD);
1757   llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1758 
1759   // Use pointer alignment for the vtable. Otherwise we would align them based
1760   // on the size of the initializer which doesn't make sense as only single
1761   // values are read.
1762   unsigned PAlign = CGM.getTarget().getPointerAlign(0);
1763 
1764   VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1765       Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1766       getContext().toCharUnitsFromBits(PAlign).getQuantity());
1767   VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1768 
1769   CGM.setGVProperties(VTable, RD);
1770 
1771   return VTable;
1772 }
1773 
1774 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1775                                                   GlobalDecl GD,
1776                                                   Address This,
1777                                                   llvm::Type *Ty,
1778                                                   SourceLocation Loc) {
1779   Ty = Ty->getPointerTo()->getPointerTo();
1780   auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1781   llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
1782 
1783   uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1784   llvm::Value *VFunc;
1785   if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1786     VFunc = CGF.EmitVTableTypeCheckedLoad(
1787         MethodDecl->getParent(), VTable,
1788         VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1789   } else {
1790     CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1791 
1792     llvm::Value *VFuncPtr =
1793         CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1794     auto *VFuncLoad =
1795         CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
1796 
1797     // Add !invariant.load md to virtual function load to indicate that
1798     // function didn't change inside vtable.
1799     // It's safe to add it without -fstrict-vtable-pointers, but it would not
1800     // help in devirtualization because it will only matter if we will have 2
1801     // the same virtual function loads from the same vtable load, which won't
1802     // happen without enabled devirtualization with -fstrict-vtable-pointers.
1803     if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1804         CGM.getCodeGenOpts().StrictVTablePointers)
1805       VFuncLoad->setMetadata(
1806           llvm::LLVMContext::MD_invariant_load,
1807           llvm::MDNode::get(CGM.getLLVMContext(),
1808                             llvm::ArrayRef<llvm::Metadata *>()));
1809     VFunc = VFuncLoad;
1810   }
1811 
1812   CGCallee Callee(GD, VFunc);
1813   return Callee;
1814 }
1815 
1816 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1817     CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1818     Address This, DeleteOrMemberCallExpr E) {
1819   auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
1820   auto *D = E.dyn_cast<const CXXDeleteExpr *>();
1821   assert((CE != nullptr) ^ (D != nullptr));
1822   assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1823   assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1824 
1825   GlobalDecl GD(Dtor, DtorType);
1826   const CGFunctionInfo *FInfo =
1827       &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
1828   llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1829   CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
1830 
1831   QualType ThisTy;
1832   if (CE) {
1833     ThisTy = CE->getObjectType();
1834   } else {
1835     ThisTy = D->getDestroyedType();
1836   }
1837 
1838   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
1839                             QualType(), nullptr);
1840   return nullptr;
1841 }
1842 
1843 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
1844   CodeGenVTables &VTables = CGM.getVTables();
1845   llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
1846   VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
1847 }
1848 
1849 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
1850     const CXXRecordDecl *RD) const {
1851   // We don't emit available_externally vtables if we are in -fapple-kext mode
1852   // because kext mode does not permit devirtualization.
1853   if (CGM.getLangOpts().AppleKext)
1854     return false;
1855 
1856   // If the vtable is hidden then it is not safe to emit an available_externally
1857   // copy of vtable.
1858   if (isVTableHidden(RD))
1859     return false;
1860 
1861   if (CGM.getCodeGenOpts().ForceEmitVTables)
1862     return true;
1863 
1864   // If we don't have any not emitted inline virtual function then we are safe
1865   // to emit an available_externally copy of vtable.
1866   // FIXME we can still emit a copy of the vtable if we
1867   // can emit definition of the inline functions.
1868   if (hasAnyUnusedVirtualInlineFunction(RD))
1869     return false;
1870 
1871   // For a class with virtual bases, we must also be able to speculatively
1872   // emit the VTT, because CodeGen doesn't have separate notions of "can emit
1873   // the vtable" and "can emit the VTT". For a base subobject, this means we
1874   // need to be able to emit non-virtual base vtables.
1875   if (RD->getNumVBases()) {
1876     for (const auto &B : RD->bases()) {
1877       auto *BRD = B.getType()->getAsCXXRecordDecl();
1878       assert(BRD && "no class for base specifier");
1879       if (B.isVirtual() || !BRD->isDynamicClass())
1880         continue;
1881       if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
1882         return false;
1883     }
1884   }
1885 
1886   return true;
1887 }
1888 
1889 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
1890   if (!canSpeculativelyEmitVTableAsBaseClass(RD))
1891     return false;
1892 
1893   // For a complete-object vtable (or more specifically, for the VTT), we need
1894   // to be able to speculatively emit the vtables of all dynamic virtual bases.
1895   for (const auto &B : RD->vbases()) {
1896     auto *BRD = B.getType()->getAsCXXRecordDecl();
1897     assert(BRD && "no class for base specifier");
1898     if (!BRD->isDynamicClass())
1899       continue;
1900     if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
1901       return false;
1902   }
1903 
1904   return true;
1905 }
1906 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
1907                                           Address InitialPtr,
1908                                           int64_t NonVirtualAdjustment,
1909                                           int64_t VirtualAdjustment,
1910                                           bool IsReturnAdjustment) {
1911   if (!NonVirtualAdjustment && !VirtualAdjustment)
1912     return InitialPtr.getPointer();
1913 
1914   Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
1915 
1916   // In a base-to-derived cast, the non-virtual adjustment is applied first.
1917   if (NonVirtualAdjustment && !IsReturnAdjustment) {
1918     V = CGF.Builder.CreateConstInBoundsByteGEP(V,
1919                               CharUnits::fromQuantity(NonVirtualAdjustment));
1920   }
1921 
1922   // Perform the virtual adjustment if we have one.
1923   llvm::Value *ResultPtr;
1924   if (VirtualAdjustment) {
1925     llvm::Type *PtrDiffTy =
1926         CGF.ConvertType(CGF.getContext().getPointerDiffType());
1927 
1928     Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
1929     llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
1930 
1931     llvm::Value *OffsetPtr =
1932         CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
1933 
1934     OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
1935 
1936     // Load the adjustment offset from the vtable.
1937     llvm::Value *Offset =
1938       CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1939 
1940     // Adjust our pointer.
1941     ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
1942   } else {
1943     ResultPtr = V.getPointer();
1944   }
1945 
1946   // In a derived-to-base conversion, the non-virtual adjustment is
1947   // applied second.
1948   if (NonVirtualAdjustment && IsReturnAdjustment) {
1949     ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
1950                                                        NonVirtualAdjustment);
1951   }
1952 
1953   // Cast back to the original type.
1954   return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
1955 }
1956 
1957 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
1958                                                   Address This,
1959                                                   const ThisAdjustment &TA) {
1960   return performTypeAdjustment(CGF, This, TA.NonVirtual,
1961                                TA.Virtual.Itanium.VCallOffsetOffset,
1962                                /*IsReturnAdjustment=*/false);
1963 }
1964 
1965 llvm::Value *
1966 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
1967                                        const ReturnAdjustment &RA) {
1968   return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
1969                                RA.Virtual.Itanium.VBaseOffsetOffset,
1970                                /*IsReturnAdjustment=*/true);
1971 }
1972 
1973 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
1974                                     RValue RV, QualType ResultType) {
1975   if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
1976     return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
1977 
1978   // Destructor thunks in the ARM ABI have indeterminate results.
1979   llvm::Type *T = CGF.ReturnValue.getElementType();
1980   RValue Undef = RValue::get(llvm::UndefValue::get(T));
1981   return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
1982 }
1983 
1984 /************************** Array allocation cookies **************************/
1985 
1986 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1987   // The array cookie is a size_t; pad that up to the element alignment.
1988   // The cookie is actually right-justified in that space.
1989   return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
1990                   CGM.getContext().getTypeAlignInChars(elementType));
1991 }
1992 
1993 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1994                                              Address NewPtr,
1995                                              llvm::Value *NumElements,
1996                                              const CXXNewExpr *expr,
1997                                              QualType ElementType) {
1998   assert(requiresArrayCookie(expr));
1999 
2000   unsigned AS = NewPtr.getAddressSpace();
2001 
2002   ASTContext &Ctx = getContext();
2003   CharUnits SizeSize = CGF.getSizeSize();
2004 
2005   // The size of the cookie.
2006   CharUnits CookieSize =
2007     std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
2008   assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2009 
2010   // Compute an offset to the cookie.
2011   Address CookiePtr = NewPtr;
2012   CharUnits CookieOffset = CookieSize - SizeSize;
2013   if (!CookieOffset.isZero())
2014     CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2015 
2016   // Write the number of elements into the appropriate slot.
2017   Address NumElementsPtr =
2018       CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2019   llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2020 
2021   // Handle the array cookie specially in ASan.
2022   if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2023       (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2024        CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2025     // The store to the CookiePtr does not need to be instrumented.
2026     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2027     llvm::FunctionType *FTy =
2028         llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2029     llvm::FunctionCallee F =
2030         CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2031     CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2032   }
2033 
2034   // Finally, compute a pointer to the actual data buffer by skipping
2035   // over the cookie completely.
2036   return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2037 }
2038 
2039 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2040                                                 Address allocPtr,
2041                                                 CharUnits cookieSize) {
2042   // The element size is right-justified in the cookie.
2043   Address numElementsPtr = allocPtr;
2044   CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2045   if (!numElementsOffset.isZero())
2046     numElementsPtr =
2047       CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2048 
2049   unsigned AS = allocPtr.getAddressSpace();
2050   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2051   if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2052     return CGF.Builder.CreateLoad(numElementsPtr);
2053   // In asan mode emit a function call instead of a regular load and let the
2054   // run-time deal with it: if the shadow is properly poisoned return the
2055   // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2056   // We can't simply ignore this load using nosanitize metadata because
2057   // the metadata may be lost.
2058   llvm::FunctionType *FTy =
2059       llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2060   llvm::FunctionCallee F =
2061       CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2062   return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2063 }
2064 
2065 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2066   // ARM says that the cookie is always:
2067   //   struct array_cookie {
2068   //     std::size_t element_size; // element_size != 0
2069   //     std::size_t element_count;
2070   //   };
2071   // But the base ABI doesn't give anything an alignment greater than
2072   // 8, so we can dismiss this as typical ABI-author blindness to
2073   // actual language complexity and round up to the element alignment.
2074   return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2075                   CGM.getContext().getTypeAlignInChars(elementType));
2076 }
2077 
2078 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2079                                          Address newPtr,
2080                                          llvm::Value *numElements,
2081                                          const CXXNewExpr *expr,
2082                                          QualType elementType) {
2083   assert(requiresArrayCookie(expr));
2084 
2085   // The cookie is always at the start of the buffer.
2086   Address cookie = newPtr;
2087 
2088   // The first element is the element size.
2089   cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2090   llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2091                  getContext().getTypeSizeInChars(elementType).getQuantity());
2092   CGF.Builder.CreateStore(elementSize, cookie);
2093 
2094   // The second element is the element count.
2095   cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2096   CGF.Builder.CreateStore(numElements, cookie);
2097 
2098   // Finally, compute a pointer to the actual data buffer by skipping
2099   // over the cookie completely.
2100   CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2101   return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2102 }
2103 
2104 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2105                                             Address allocPtr,
2106                                             CharUnits cookieSize) {
2107   // The number of elements is at offset sizeof(size_t) relative to
2108   // the allocated pointer.
2109   Address numElementsPtr
2110     = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2111 
2112   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2113   return CGF.Builder.CreateLoad(numElementsPtr);
2114 }
2115 
2116 /*********************** Static local initialization **************************/
2117 
2118 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2119                                               llvm::PointerType *GuardPtrTy) {
2120   // int __cxa_guard_acquire(__guard *guard_object);
2121   llvm::FunctionType *FTy =
2122     llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2123                             GuardPtrTy, /*isVarArg=*/false);
2124   return CGM.CreateRuntimeFunction(
2125       FTy, "__cxa_guard_acquire",
2126       llvm::AttributeList::get(CGM.getLLVMContext(),
2127                                llvm::AttributeList::FunctionIndex,
2128                                llvm::Attribute::NoUnwind));
2129 }
2130 
2131 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2132                                               llvm::PointerType *GuardPtrTy) {
2133   // void __cxa_guard_release(__guard *guard_object);
2134   llvm::FunctionType *FTy =
2135     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2136   return CGM.CreateRuntimeFunction(
2137       FTy, "__cxa_guard_release",
2138       llvm::AttributeList::get(CGM.getLLVMContext(),
2139                                llvm::AttributeList::FunctionIndex,
2140                                llvm::Attribute::NoUnwind));
2141 }
2142 
2143 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2144                                             llvm::PointerType *GuardPtrTy) {
2145   // void __cxa_guard_abort(__guard *guard_object);
2146   llvm::FunctionType *FTy =
2147     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2148   return CGM.CreateRuntimeFunction(
2149       FTy, "__cxa_guard_abort",
2150       llvm::AttributeList::get(CGM.getLLVMContext(),
2151                                llvm::AttributeList::FunctionIndex,
2152                                llvm::Attribute::NoUnwind));
2153 }
2154 
2155 namespace {
2156   struct CallGuardAbort final : EHScopeStack::Cleanup {
2157     llvm::GlobalVariable *Guard;
2158     CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2159 
2160     void Emit(CodeGenFunction &CGF, Flags flags) override {
2161       CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2162                                   Guard);
2163     }
2164   };
2165 }
2166 
2167 /// The ARM code here follows the Itanium code closely enough that we
2168 /// just special-case it at particular places.
2169 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2170                                     const VarDecl &D,
2171                                     llvm::GlobalVariable *var,
2172                                     bool shouldPerformInit) {
2173   CGBuilderTy &Builder = CGF.Builder;
2174 
2175   // Inline variables that weren't instantiated from variable templates have
2176   // partially-ordered initialization within their translation unit.
2177   bool NonTemplateInline =
2178       D.isInline() &&
2179       !isTemplateInstantiation(D.getTemplateSpecializationKind());
2180 
2181   // We only need to use thread-safe statics for local non-TLS variables and
2182   // inline variables; other global initialization is always single-threaded
2183   // or (through lazy dynamic loading in multiple threads) unsequenced.
2184   bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2185                     (D.isLocalVarDecl() || NonTemplateInline) &&
2186                     !D.getTLSKind();
2187 
2188   // If we have a global variable with internal linkage and thread-safe statics
2189   // are disabled, we can just let the guard variable be of type i8.
2190   bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2191 
2192   llvm::IntegerType *guardTy;
2193   CharUnits guardAlignment;
2194   if (useInt8GuardVariable) {
2195     guardTy = CGF.Int8Ty;
2196     guardAlignment = CharUnits::One();
2197   } else {
2198     // Guard variables are 64 bits in the generic ABI and size width on ARM
2199     // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2200     if (UseARMGuardVarABI) {
2201       guardTy = CGF.SizeTy;
2202       guardAlignment = CGF.getSizeAlign();
2203     } else {
2204       guardTy = CGF.Int64Ty;
2205       guardAlignment = CharUnits::fromQuantity(
2206                              CGM.getDataLayout().getABITypeAlignment(guardTy));
2207     }
2208   }
2209   llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
2210 
2211   // Create the guard variable if we don't already have it (as we
2212   // might if we're double-emitting this function body).
2213   llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2214   if (!guard) {
2215     // Mangle the name for the guard.
2216     SmallString<256> guardName;
2217     {
2218       llvm::raw_svector_ostream out(guardName);
2219       getMangleContext().mangleStaticGuardVariable(&D, out);
2220     }
2221 
2222     // Create the guard variable with a zero-initializer.
2223     // Just absorb linkage and visibility from the guarded variable.
2224     guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2225                                      false, var->getLinkage(),
2226                                      llvm::ConstantInt::get(guardTy, 0),
2227                                      guardName.str());
2228     guard->setDSOLocal(var->isDSOLocal());
2229     guard->setVisibility(var->getVisibility());
2230     // If the variable is thread-local, so is its guard variable.
2231     guard->setThreadLocalMode(var->getThreadLocalMode());
2232     guard->setAlignment(guardAlignment.getAsAlign());
2233 
2234     // The ABI says: "It is suggested that it be emitted in the same COMDAT
2235     // group as the associated data object." In practice, this doesn't work for
2236     // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2237     llvm::Comdat *C = var->getComdat();
2238     if (!D.isLocalVarDecl() && C &&
2239         (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2240          CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2241       guard->setComdat(C);
2242       // An inline variable's guard function is run from the per-TU
2243       // initialization function, not via a dedicated global ctor function, so
2244       // we can't put it in a comdat.
2245       if (!NonTemplateInline)
2246         CGF.CurFn->setComdat(C);
2247     } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2248       guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2249     }
2250 
2251     CGM.setStaticLocalDeclGuardAddress(&D, guard);
2252   }
2253 
2254   Address guardAddr = Address(guard, guardAlignment);
2255 
2256   // Test whether the variable has completed initialization.
2257   //
2258   // Itanium C++ ABI 3.3.2:
2259   //   The following is pseudo-code showing how these functions can be used:
2260   //     if (obj_guard.first_byte == 0) {
2261   //       if ( __cxa_guard_acquire (&obj_guard) ) {
2262   //         try {
2263   //           ... initialize the object ...;
2264   //         } catch (...) {
2265   //            __cxa_guard_abort (&obj_guard);
2266   //            throw;
2267   //         }
2268   //         ... queue object destructor with __cxa_atexit() ...;
2269   //         __cxa_guard_release (&obj_guard);
2270   //       }
2271   //     }
2272 
2273   // Load the first byte of the guard variable.
2274   llvm::LoadInst *LI =
2275       Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2276 
2277   // Itanium ABI:
2278   //   An implementation supporting thread-safety on multiprocessor
2279   //   systems must also guarantee that references to the initialized
2280   //   object do not occur before the load of the initialization flag.
2281   //
2282   // In LLVM, we do this by marking the load Acquire.
2283   if (threadsafe)
2284     LI->setAtomic(llvm::AtomicOrdering::Acquire);
2285 
2286   // For ARM, we should only check the first bit, rather than the entire byte:
2287   //
2288   // ARM C++ ABI 3.2.3.1:
2289   //   To support the potential use of initialization guard variables
2290   //   as semaphores that are the target of ARM SWP and LDREX/STREX
2291   //   synchronizing instructions we define a static initialization
2292   //   guard variable to be a 4-byte aligned, 4-byte word with the
2293   //   following inline access protocol.
2294   //     #define INITIALIZED 1
2295   //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
2296   //       if (__cxa_guard_acquire(&obj_guard))
2297   //         ...
2298   //     }
2299   //
2300   // and similarly for ARM64:
2301   //
2302   // ARM64 C++ ABI 3.2.2:
2303   //   This ABI instead only specifies the value bit 0 of the static guard
2304   //   variable; all other bits are platform defined. Bit 0 shall be 0 when the
2305   //   variable is not initialized and 1 when it is.
2306   llvm::Value *V =
2307       (UseARMGuardVarABI && !useInt8GuardVariable)
2308           ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2309           : LI;
2310   llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2311 
2312   llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2313   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2314 
2315   // Check if the first byte of the guard variable is zero.
2316   CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2317                                CodeGenFunction::GuardKind::VariableGuard, &D);
2318 
2319   CGF.EmitBlock(InitCheckBlock);
2320 
2321   // Variables used when coping with thread-safe statics and exceptions.
2322   if (threadsafe) {
2323     // Call __cxa_guard_acquire.
2324     llvm::Value *V
2325       = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2326 
2327     llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2328 
2329     Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2330                          InitBlock, EndBlock);
2331 
2332     // Call __cxa_guard_abort along the exceptional edge.
2333     CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2334 
2335     CGF.EmitBlock(InitBlock);
2336   }
2337 
2338   // Emit the initializer and add a global destructor if appropriate.
2339   CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2340 
2341   if (threadsafe) {
2342     // Pop the guard-abort cleanup if we pushed one.
2343     CGF.PopCleanupBlock();
2344 
2345     // Call __cxa_guard_release.  This cannot throw.
2346     CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2347                                 guardAddr.getPointer());
2348   } else {
2349     Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
2350   }
2351 
2352   CGF.EmitBlock(EndBlock);
2353 }
2354 
2355 /// Register a global destructor using __cxa_atexit.
2356 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2357                                         llvm::FunctionCallee dtor,
2358                                         llvm::Constant *addr, bool TLS) {
2359   assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2360          "__cxa_atexit is disabled");
2361   const char *Name = "__cxa_atexit";
2362   if (TLS) {
2363     const llvm::Triple &T = CGF.getTarget().getTriple();
2364     Name = T.isOSDarwin() ?  "_tlv_atexit" : "__cxa_thread_atexit";
2365   }
2366 
2367   // We're assuming that the destructor function is something we can
2368   // reasonably call with the default CC.  Go ahead and cast it to the
2369   // right prototype.
2370   llvm::Type *dtorTy =
2371     llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2372 
2373   // Preserve address space of addr.
2374   auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2375   auto AddrInt8PtrTy =
2376       AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
2377 
2378   // Create a variable that binds the atexit to this shared object.
2379   llvm::Constant *handle =
2380       CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2381   auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2382   GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2383 
2384   // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2385   llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2386   llvm::FunctionType *atexitTy =
2387     llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2388 
2389   // Fetch the actual function.
2390   llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2391   if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2392     fn->setDoesNotThrow();
2393 
2394   if (!addr)
2395     // addr is null when we are trying to register a dtor annotated with
2396     // __attribute__((destructor)) in a constructor function. Using null here is
2397     // okay because this argument is just passed back to the destructor
2398     // function.
2399     addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2400 
2401   llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2402                              cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2403                          llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2404                          handle};
2405   CGF.EmitNounwindRuntimeCall(atexit, args);
2406 }
2407 
2408 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2409   for (const auto I : DtorsUsingAtExit) {
2410     int Priority = I.first;
2411     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2412 
2413     // Create a function that registers destructors that have the same priority.
2414     //
2415     // Since constructor functions are run in non-descending order of their
2416     // priorities, destructors are registered in non-descending order of their
2417     // priorities, and since destructor functions are run in the reverse order
2418     // of their registration, destructor functions are run in non-ascending
2419     // order of their priorities.
2420     CodeGenFunction CGF(*this);
2421     std::string GlobalInitFnName =
2422         std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2423     llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
2424     llvm::Function *GlobalInitFn = CreateGlobalInitOrDestructFunction(
2425         FTy, GlobalInitFnName, getTypes().arrangeNullaryFunction(),
2426         SourceLocation());
2427     ASTContext &Ctx = getContext();
2428     QualType ReturnTy = Ctx.VoidTy;
2429     QualType FunctionTy = Ctx.getFunctionType(ReturnTy, llvm::None, {});
2430     FunctionDecl *FD = FunctionDecl::Create(
2431         Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
2432         &Ctx.Idents.get(GlobalInitFnName), FunctionTy, nullptr, SC_Static,
2433         false, false);
2434     CGF.StartFunction(GlobalDecl(FD), ReturnTy, GlobalInitFn,
2435                       getTypes().arrangeNullaryFunction(), FunctionArgList(),
2436                       SourceLocation(), SourceLocation());
2437 
2438     for (auto *Dtor : Dtors) {
2439       // Register the destructor function calling __cxa_atexit if it is
2440       // available. Otherwise fall back on calling atexit.
2441       if (getCodeGenOpts().CXAAtExit)
2442         emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2443       else
2444         CGF.registerGlobalDtorWithAtExit(Dtor);
2445     }
2446 
2447     CGF.FinishFunction();
2448     AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2449   }
2450 }
2451 
2452 /// Register a global destructor as best as we know how.
2453 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2454                                        llvm::FunctionCallee dtor,
2455                                        llvm::Constant *addr) {
2456   if (D.isNoDestroy(CGM.getContext()))
2457     return;
2458 
2459   // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2460   // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2461   // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2462   // We can always use __cxa_thread_atexit.
2463   if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2464     return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2465 
2466   // In Apple kexts, we want to add a global destructor entry.
2467   // FIXME: shouldn't this be guarded by some variable?
2468   if (CGM.getLangOpts().AppleKext) {
2469     // Generate a global destructor entry.
2470     return CGM.AddCXXDtorEntry(dtor, addr);
2471   }
2472 
2473   CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2474 }
2475 
2476 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2477                                        CodeGen::CodeGenModule &CGM) {
2478   assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2479   // Darwin prefers to have references to thread local variables to go through
2480   // the thread wrapper instead of directly referencing the backing variable.
2481   return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2482          CGM.getTarget().getTriple().isOSDarwin();
2483 }
2484 
2485 /// Get the appropriate linkage for the wrapper function. This is essentially
2486 /// the weak form of the variable's linkage; every translation unit which needs
2487 /// the wrapper emits a copy, and we want the linker to merge them.
2488 static llvm::GlobalValue::LinkageTypes
2489 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2490   llvm::GlobalValue::LinkageTypes VarLinkage =
2491       CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2492 
2493   // For internal linkage variables, we don't need an external or weak wrapper.
2494   if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2495     return VarLinkage;
2496 
2497   // If the thread wrapper is replaceable, give it appropriate linkage.
2498   if (isThreadWrapperReplaceable(VD, CGM))
2499     if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2500         !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2501       return VarLinkage;
2502   return llvm::GlobalValue::WeakODRLinkage;
2503 }
2504 
2505 llvm::Function *
2506 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2507                                              llvm::Value *Val) {
2508   // Mangle the name for the thread_local wrapper function.
2509   SmallString<256> WrapperName;
2510   {
2511     llvm::raw_svector_ostream Out(WrapperName);
2512     getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2513   }
2514 
2515   // FIXME: If VD is a definition, we should regenerate the function attributes
2516   // before returning.
2517   if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2518     return cast<llvm::Function>(V);
2519 
2520   QualType RetQT = VD->getType();
2521   if (RetQT->isReferenceType())
2522     RetQT = RetQT.getNonReferenceType();
2523 
2524   const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2525       getContext().getPointerType(RetQT), FunctionArgList());
2526 
2527   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2528   llvm::Function *Wrapper =
2529       llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2530                              WrapperName.str(), &CGM.getModule());
2531 
2532   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper);
2533 
2534   // Always resolve references to the wrapper at link time.
2535   if (!Wrapper->hasLocalLinkage())
2536     if (!isThreadWrapperReplaceable(VD, CGM) ||
2537         llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2538         llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2539         VD->getVisibility() == HiddenVisibility)
2540       Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2541 
2542   if (isThreadWrapperReplaceable(VD, CGM)) {
2543     Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2544     Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2545   }
2546 
2547   ThreadWrappers.push_back({VD, Wrapper});
2548   return Wrapper;
2549 }
2550 
2551 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2552     CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2553     ArrayRef<llvm::Function *> CXXThreadLocalInits,
2554     ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2555   llvm::Function *InitFunc = nullptr;
2556 
2557   // Separate initializers into those with ordered (or partially-ordered)
2558   // initialization and those with unordered initialization.
2559   llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2560   llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2561   for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2562     if (isTemplateInstantiation(
2563             CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2564       UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2565           CXXThreadLocalInits[I];
2566     else
2567       OrderedInits.push_back(CXXThreadLocalInits[I]);
2568   }
2569 
2570   if (!OrderedInits.empty()) {
2571     // Generate a guarded initialization function.
2572     llvm::FunctionType *FTy =
2573         llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2574     const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2575     InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
2576                                                       SourceLocation(),
2577                                                       /*TLS=*/true);
2578     llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2579         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2580         llvm::GlobalVariable::InternalLinkage,
2581         llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2582     Guard->setThreadLocal(true);
2583 
2584     CharUnits GuardAlign = CharUnits::One();
2585     Guard->setAlignment(GuardAlign.getAsAlign());
2586 
2587     CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2588         InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
2589     // On Darwin platforms, use CXX_FAST_TLS calling convention.
2590     if (CGM.getTarget().getTriple().isOSDarwin()) {
2591       InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2592       InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2593     }
2594   }
2595 
2596   // Create declarations for thread wrappers for all thread-local variables
2597   // with non-discardable definitions in this translation unit.
2598   for (const VarDecl *VD : CXXThreadLocals) {
2599     if (VD->hasDefinition() &&
2600         !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2601       llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2602       getOrCreateThreadLocalWrapper(VD, GV);
2603     }
2604   }
2605 
2606   // Emit all referenced thread wrappers.
2607   for (auto VDAndWrapper : ThreadWrappers) {
2608     const VarDecl *VD = VDAndWrapper.first;
2609     llvm::GlobalVariable *Var =
2610         cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2611     llvm::Function *Wrapper = VDAndWrapper.second;
2612 
2613     // Some targets require that all access to thread local variables go through
2614     // the thread wrapper.  This means that we cannot attempt to create a thread
2615     // wrapper or a thread helper.
2616     if (!VD->hasDefinition()) {
2617       if (isThreadWrapperReplaceable(VD, CGM)) {
2618         Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2619         continue;
2620       }
2621 
2622       // If this isn't a TU in which this variable is defined, the thread
2623       // wrapper is discardable.
2624       if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2625         Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2626     }
2627 
2628     CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2629 
2630     // Mangle the name for the thread_local initialization function.
2631     SmallString<256> InitFnName;
2632     {
2633       llvm::raw_svector_ostream Out(InitFnName);
2634       getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2635     }
2636 
2637     llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2638 
2639     // If we have a definition for the variable, emit the initialization
2640     // function as an alias to the global Init function (if any). Otherwise,
2641     // produce a declaration of the initialization function.
2642     llvm::GlobalValue *Init = nullptr;
2643     bool InitIsInitFunc = false;
2644     bool HasConstantInitialization = false;
2645     if (!usesThreadWrapperFunction(VD)) {
2646       HasConstantInitialization = true;
2647     } else if (VD->hasDefinition()) {
2648       InitIsInitFunc = true;
2649       llvm::Function *InitFuncToUse = InitFunc;
2650       if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2651         InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2652       if (InitFuncToUse)
2653         Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2654                                          InitFuncToUse);
2655     } else {
2656       // Emit a weak global function referring to the initialization function.
2657       // This function will not exist if the TU defining the thread_local
2658       // variable in question does not need any dynamic initialization for
2659       // its thread_local variables.
2660       Init = llvm::Function::Create(InitFnTy,
2661                                     llvm::GlobalVariable::ExternalWeakLinkage,
2662                                     InitFnName.str(), &CGM.getModule());
2663       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2664       CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2665                                     cast<llvm::Function>(Init));
2666     }
2667 
2668     if (Init) {
2669       Init->setVisibility(Var->getVisibility());
2670       Init->setDSOLocal(Var->isDSOLocal());
2671     }
2672 
2673     llvm::LLVMContext &Context = CGM.getModule().getContext();
2674     llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2675     CGBuilderTy Builder(CGM, Entry);
2676     if (HasConstantInitialization) {
2677       // No dynamic initialization to invoke.
2678     } else if (InitIsInitFunc) {
2679       if (Init) {
2680         llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
2681         if (isThreadWrapperReplaceable(VD, CGM)) {
2682           CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2683           llvm::Function *Fn =
2684               cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2685           Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2686         }
2687       }
2688     } else {
2689       // Don't know whether we have an init function. Call it if it exists.
2690       llvm::Value *Have = Builder.CreateIsNotNull(Init);
2691       llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2692       llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2693       Builder.CreateCondBr(Have, InitBB, ExitBB);
2694 
2695       Builder.SetInsertPoint(InitBB);
2696       Builder.CreateCall(InitFnTy, Init);
2697       Builder.CreateBr(ExitBB);
2698 
2699       Builder.SetInsertPoint(ExitBB);
2700     }
2701 
2702     // For a reference, the result of the wrapper function is a pointer to
2703     // the referenced object.
2704     llvm::Value *Val = Var;
2705     if (VD->getType()->isReferenceType()) {
2706       CharUnits Align = CGM.getContext().getDeclAlign(VD);
2707       Val = Builder.CreateAlignedLoad(Val, Align);
2708     }
2709     if (Val->getType() != Wrapper->getReturnType())
2710       Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2711           Val, Wrapper->getReturnType(), "");
2712     Builder.CreateRet(Val);
2713   }
2714 }
2715 
2716 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2717                                                    const VarDecl *VD,
2718                                                    QualType LValType) {
2719   llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
2720   llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2721 
2722   llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
2723   CallVal->setCallingConv(Wrapper->getCallingConv());
2724 
2725   LValue LV;
2726   if (VD->getType()->isReferenceType())
2727     LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
2728   else
2729     LV = CGF.MakeAddrLValue(CallVal, LValType,
2730                             CGF.getContext().getDeclAlign(VD));
2731   // FIXME: need setObjCGCLValueClass?
2732   return LV;
2733 }
2734 
2735 /// Return whether the given global decl needs a VTT parameter, which it does
2736 /// if it's a base constructor or destructor with virtual bases.
2737 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2738   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2739 
2740   // We don't have any virtual bases, just return early.
2741   if (!MD->getParent()->getNumVBases())
2742     return false;
2743 
2744   // Check if we have a base constructor.
2745   if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
2746     return true;
2747 
2748   // Check if we have a base destructor.
2749   if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
2750     return true;
2751 
2752   return false;
2753 }
2754 
2755 namespace {
2756 class ItaniumRTTIBuilder {
2757   CodeGenModule &CGM;  // Per-module state.
2758   llvm::LLVMContext &VMContext;
2759   const ItaniumCXXABI &CXXABI;  // Per-module state.
2760 
2761   /// Fields - The fields of the RTTI descriptor currently being built.
2762   SmallVector<llvm::Constant *, 16> Fields;
2763 
2764   /// GetAddrOfTypeName - Returns the mangled type name of the given type.
2765   llvm::GlobalVariable *
2766   GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
2767 
2768   /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
2769   /// descriptor of the given type.
2770   llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
2771 
2772   /// BuildVTablePointer - Build the vtable pointer for the given type.
2773   void BuildVTablePointer(const Type *Ty);
2774 
2775   /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2776   /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
2777   void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
2778 
2779   /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2780   /// classes with bases that do not satisfy the abi::__si_class_type_info
2781   /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
2782   void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
2783 
2784   /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
2785   /// for pointer types.
2786   void BuildPointerTypeInfo(QualType PointeeTy);
2787 
2788   /// BuildObjCObjectTypeInfo - Build the appropriate kind of
2789   /// type_info for an object type.
2790   void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
2791 
2792   /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
2793   /// struct, used for member pointer types.
2794   void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
2795 
2796 public:
2797   ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
2798       : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
2799 
2800   // Pointer type info flags.
2801   enum {
2802     /// PTI_Const - Type has const qualifier.
2803     PTI_Const = 0x1,
2804 
2805     /// PTI_Volatile - Type has volatile qualifier.
2806     PTI_Volatile = 0x2,
2807 
2808     /// PTI_Restrict - Type has restrict qualifier.
2809     PTI_Restrict = 0x4,
2810 
2811     /// PTI_Incomplete - Type is incomplete.
2812     PTI_Incomplete = 0x8,
2813 
2814     /// PTI_ContainingClassIncomplete - Containing class is incomplete.
2815     /// (in pointer to member).
2816     PTI_ContainingClassIncomplete = 0x10,
2817 
2818     /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
2819     //PTI_TransactionSafe = 0x20,
2820 
2821     /// PTI_Noexcept - Pointee is noexcept function (C++1z).
2822     PTI_Noexcept = 0x40,
2823   };
2824 
2825   // VMI type info flags.
2826   enum {
2827     /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
2828     VMI_NonDiamondRepeat = 0x1,
2829 
2830     /// VMI_DiamondShaped - Class is diamond shaped.
2831     VMI_DiamondShaped = 0x2
2832   };
2833 
2834   // Base class type info flags.
2835   enum {
2836     /// BCTI_Virtual - Base class is virtual.
2837     BCTI_Virtual = 0x1,
2838 
2839     /// BCTI_Public - Base class is public.
2840     BCTI_Public = 0x2
2841   };
2842 
2843   /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
2844   /// link to an existing RTTI descriptor if one already exists.
2845   llvm::Constant *BuildTypeInfo(QualType Ty);
2846 
2847   /// BuildTypeInfo - Build the RTTI type info struct for the given type.
2848   llvm::Constant *BuildTypeInfo(
2849       QualType Ty,
2850       llvm::GlobalVariable::LinkageTypes Linkage,
2851       llvm::GlobalValue::VisibilityTypes Visibility,
2852       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
2853 };
2854 }
2855 
2856 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
2857     QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
2858   SmallString<256> Name;
2859   llvm::raw_svector_ostream Out(Name);
2860   CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
2861 
2862   // We know that the mangled name of the type starts at index 4 of the
2863   // mangled name of the typename, so we can just index into it in order to
2864   // get the mangled name of the type.
2865   llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
2866                                                             Name.substr(4));
2867   auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
2868 
2869   llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
2870       Name, Init->getType(), Linkage, Align.getQuantity());
2871 
2872   GV->setInitializer(Init);
2873 
2874   return GV;
2875 }
2876 
2877 llvm::Constant *
2878 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
2879   // Mangle the RTTI name.
2880   SmallString<256> Name;
2881   llvm::raw_svector_ostream Out(Name);
2882   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2883 
2884   // Look for an existing global.
2885   llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
2886 
2887   if (!GV) {
2888     // Create a new global variable.
2889     // Note for the future: If we would ever like to do deferred emission of
2890     // RTTI, check if emitting vtables opportunistically need any adjustment.
2891 
2892     GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
2893                                   /*isConstant=*/true,
2894                                   llvm::GlobalValue::ExternalLinkage, nullptr,
2895                                   Name);
2896     const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
2897     CGM.setGVProperties(GV, RD);
2898   }
2899 
2900   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
2901 }
2902 
2903 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
2904 /// info for that type is defined in the standard library.
2905 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
2906   // Itanium C++ ABI 2.9.2:
2907   //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
2908   //   the run-time support library. Specifically, the run-time support
2909   //   library should contain type_info objects for the types X, X* and
2910   //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
2911   //   unsigned char, signed char, short, unsigned short, int, unsigned int,
2912   //   long, unsigned long, long long, unsigned long long, float, double,
2913   //   long double, char16_t, char32_t, and the IEEE 754r decimal and
2914   //   half-precision floating point types.
2915   //
2916   // GCC also emits RTTI for __int128.
2917   // FIXME: We do not emit RTTI information for decimal types here.
2918 
2919   // Types added here must also be added to EmitFundamentalRTTIDescriptors.
2920   switch (Ty->getKind()) {
2921     case BuiltinType::Void:
2922     case BuiltinType::NullPtr:
2923     case BuiltinType::Bool:
2924     case BuiltinType::WChar_S:
2925     case BuiltinType::WChar_U:
2926     case BuiltinType::Char_U:
2927     case BuiltinType::Char_S:
2928     case BuiltinType::UChar:
2929     case BuiltinType::SChar:
2930     case BuiltinType::Short:
2931     case BuiltinType::UShort:
2932     case BuiltinType::Int:
2933     case BuiltinType::UInt:
2934     case BuiltinType::Long:
2935     case BuiltinType::ULong:
2936     case BuiltinType::LongLong:
2937     case BuiltinType::ULongLong:
2938     case BuiltinType::Half:
2939     case BuiltinType::Float:
2940     case BuiltinType::Double:
2941     case BuiltinType::LongDouble:
2942     case BuiltinType::Float16:
2943     case BuiltinType::Float128:
2944     case BuiltinType::Char8:
2945     case BuiltinType::Char16:
2946     case BuiltinType::Char32:
2947     case BuiltinType::Int128:
2948     case BuiltinType::UInt128:
2949       return true;
2950 
2951 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2952     case BuiltinType::Id:
2953 #include "clang/Basic/OpenCLImageTypes.def"
2954 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2955     case BuiltinType::Id:
2956 #include "clang/Basic/OpenCLExtensionTypes.def"
2957     case BuiltinType::OCLSampler:
2958     case BuiltinType::OCLEvent:
2959     case BuiltinType::OCLClkEvent:
2960     case BuiltinType::OCLQueue:
2961     case BuiltinType::OCLReserveID:
2962 #define SVE_TYPE(Name, Id, SingletonId) \
2963     case BuiltinType::Id:
2964 #include "clang/Basic/AArch64SVEACLETypes.def"
2965     case BuiltinType::ShortAccum:
2966     case BuiltinType::Accum:
2967     case BuiltinType::LongAccum:
2968     case BuiltinType::UShortAccum:
2969     case BuiltinType::UAccum:
2970     case BuiltinType::ULongAccum:
2971     case BuiltinType::ShortFract:
2972     case BuiltinType::Fract:
2973     case BuiltinType::LongFract:
2974     case BuiltinType::UShortFract:
2975     case BuiltinType::UFract:
2976     case BuiltinType::ULongFract:
2977     case BuiltinType::SatShortAccum:
2978     case BuiltinType::SatAccum:
2979     case BuiltinType::SatLongAccum:
2980     case BuiltinType::SatUShortAccum:
2981     case BuiltinType::SatUAccum:
2982     case BuiltinType::SatULongAccum:
2983     case BuiltinType::SatShortFract:
2984     case BuiltinType::SatFract:
2985     case BuiltinType::SatLongFract:
2986     case BuiltinType::SatUShortFract:
2987     case BuiltinType::SatUFract:
2988     case BuiltinType::SatULongFract:
2989       return false;
2990 
2991     case BuiltinType::Dependent:
2992 #define BUILTIN_TYPE(Id, SingletonId)
2993 #define PLACEHOLDER_TYPE(Id, SingletonId) \
2994     case BuiltinType::Id:
2995 #include "clang/AST/BuiltinTypes.def"
2996       llvm_unreachable("asking for RRTI for a placeholder type!");
2997 
2998     case BuiltinType::ObjCId:
2999     case BuiltinType::ObjCClass:
3000     case BuiltinType::ObjCSel:
3001       llvm_unreachable("FIXME: Objective-C types are unsupported!");
3002   }
3003 
3004   llvm_unreachable("Invalid BuiltinType Kind!");
3005 }
3006 
3007 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3008   QualType PointeeTy = PointerTy->getPointeeType();
3009   const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3010   if (!BuiltinTy)
3011     return false;
3012 
3013   // Check the qualifiers.
3014   Qualifiers Quals = PointeeTy.getQualifiers();
3015   Quals.removeConst();
3016 
3017   if (!Quals.empty())
3018     return false;
3019 
3020   return TypeInfoIsInStandardLibrary(BuiltinTy);
3021 }
3022 
3023 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
3024 /// information for the given type exists in the standard library.
3025 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3026   // Type info for builtin types is defined in the standard library.
3027   if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3028     return TypeInfoIsInStandardLibrary(BuiltinTy);
3029 
3030   // Type info for some pointer types to builtin types is defined in the
3031   // standard library.
3032   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3033     return TypeInfoIsInStandardLibrary(PointerTy);
3034 
3035   return false;
3036 }
3037 
3038 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3039 /// the given type exists somewhere else, and that we should not emit the type
3040 /// information in this translation unit.  Assumes that it is not a
3041 /// standard-library type.
3042 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3043                                             QualType Ty) {
3044   ASTContext &Context = CGM.getContext();
3045 
3046   // If RTTI is disabled, assume it might be disabled in the
3047   // translation unit that defines any potential key function, too.
3048   if (!Context.getLangOpts().RTTI) return false;
3049 
3050   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3051     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3052     if (!RD->hasDefinition())
3053       return false;
3054 
3055     if (!RD->isDynamicClass())
3056       return false;
3057 
3058     // FIXME: this may need to be reconsidered if the key function
3059     // changes.
3060     // N.B. We must always emit the RTTI data ourselves if there exists a key
3061     // function.
3062     bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3063 
3064     // Don't import the RTTI but emit it locally.
3065     if (CGM.getTriple().isWindowsGNUEnvironment())
3066       return false;
3067 
3068     if (CGM.getVTables().isVTableExternal(RD))
3069       return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3070                  ? false
3071                  : true;
3072 
3073     if (IsDLLImport)
3074       return true;
3075   }
3076 
3077   return false;
3078 }
3079 
3080 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
3081 static bool IsIncompleteClassType(const RecordType *RecordTy) {
3082   return !RecordTy->getDecl()->isCompleteDefinition();
3083 }
3084 
3085 /// ContainsIncompleteClassType - Returns whether the given type contains an
3086 /// incomplete class type. This is true if
3087 ///
3088 ///   * The given type is an incomplete class type.
3089 ///   * The given type is a pointer type whose pointee type contains an
3090 ///     incomplete class type.
3091 ///   * The given type is a member pointer type whose class is an incomplete
3092 ///     class type.
3093 ///   * The given type is a member pointer type whoise pointee type contains an
3094 ///     incomplete class type.
3095 /// is an indirect or direct pointer to an incomplete class type.
3096 static bool ContainsIncompleteClassType(QualType Ty) {
3097   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3098     if (IsIncompleteClassType(RecordTy))
3099       return true;
3100   }
3101 
3102   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3103     return ContainsIncompleteClassType(PointerTy->getPointeeType());
3104 
3105   if (const MemberPointerType *MemberPointerTy =
3106       dyn_cast<MemberPointerType>(Ty)) {
3107     // Check if the class type is incomplete.
3108     const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3109     if (IsIncompleteClassType(ClassType))
3110       return true;
3111 
3112     return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3113   }
3114 
3115   return false;
3116 }
3117 
3118 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3119 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3120 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3121 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3122   // Check the number of bases.
3123   if (RD->getNumBases() != 1)
3124     return false;
3125 
3126   // Get the base.
3127   CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3128 
3129   // Check that the base is not virtual.
3130   if (Base->isVirtual())
3131     return false;
3132 
3133   // Check that the base is public.
3134   if (Base->getAccessSpecifier() != AS_public)
3135     return false;
3136 
3137   // Check that the class is dynamic iff the base is.
3138   auto *BaseDecl =
3139       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3140   if (!BaseDecl->isEmpty() &&
3141       BaseDecl->isDynamicClass() != RD->isDynamicClass())
3142     return false;
3143 
3144   return true;
3145 }
3146 
3147 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3148   // abi::__class_type_info.
3149   static const char * const ClassTypeInfo =
3150     "_ZTVN10__cxxabiv117__class_type_infoE";
3151   // abi::__si_class_type_info.
3152   static const char * const SIClassTypeInfo =
3153     "_ZTVN10__cxxabiv120__si_class_type_infoE";
3154   // abi::__vmi_class_type_info.
3155   static const char * const VMIClassTypeInfo =
3156     "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3157 
3158   const char *VTableName = nullptr;
3159 
3160   switch (Ty->getTypeClass()) {
3161 #define TYPE(Class, Base)
3162 #define ABSTRACT_TYPE(Class, Base)
3163 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3164 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3165 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3166 #include "clang/AST/TypeNodes.inc"
3167     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3168 
3169   case Type::LValueReference:
3170   case Type::RValueReference:
3171     llvm_unreachable("References shouldn't get here");
3172 
3173   case Type::Auto:
3174   case Type::DeducedTemplateSpecialization:
3175     llvm_unreachable("Undeduced type shouldn't get here");
3176 
3177   case Type::Pipe:
3178     llvm_unreachable("Pipe types shouldn't get here");
3179 
3180   case Type::Builtin:
3181   // GCC treats vector and complex types as fundamental types.
3182   case Type::Vector:
3183   case Type::ExtVector:
3184   case Type::Complex:
3185   case Type::Atomic:
3186   // FIXME: GCC treats block pointers as fundamental types?!
3187   case Type::BlockPointer:
3188     // abi::__fundamental_type_info.
3189     VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3190     break;
3191 
3192   case Type::ConstantArray:
3193   case Type::IncompleteArray:
3194   case Type::VariableArray:
3195     // abi::__array_type_info.
3196     VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3197     break;
3198 
3199   case Type::FunctionNoProto:
3200   case Type::FunctionProto:
3201     // abi::__function_type_info.
3202     VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3203     break;
3204 
3205   case Type::Enum:
3206     // abi::__enum_type_info.
3207     VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3208     break;
3209 
3210   case Type::Record: {
3211     const CXXRecordDecl *RD =
3212       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3213 
3214     if (!RD->hasDefinition() || !RD->getNumBases()) {
3215       VTableName = ClassTypeInfo;
3216     } else if (CanUseSingleInheritance(RD)) {
3217       VTableName = SIClassTypeInfo;
3218     } else {
3219       VTableName = VMIClassTypeInfo;
3220     }
3221 
3222     break;
3223   }
3224 
3225   case Type::ObjCObject:
3226     // Ignore protocol qualifiers.
3227     Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3228 
3229     // Handle id and Class.
3230     if (isa<BuiltinType>(Ty)) {
3231       VTableName = ClassTypeInfo;
3232       break;
3233     }
3234 
3235     assert(isa<ObjCInterfaceType>(Ty));
3236     LLVM_FALLTHROUGH;
3237 
3238   case Type::ObjCInterface:
3239     if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3240       VTableName = SIClassTypeInfo;
3241     } else {
3242       VTableName = ClassTypeInfo;
3243     }
3244     break;
3245 
3246   case Type::ObjCObjectPointer:
3247   case Type::Pointer:
3248     // abi::__pointer_type_info.
3249     VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3250     break;
3251 
3252   case Type::MemberPointer:
3253     // abi::__pointer_to_member_type_info.
3254     VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3255     break;
3256   }
3257 
3258   llvm::Constant *VTable =
3259     CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3260   CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3261 
3262   llvm::Type *PtrDiffTy =
3263     CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3264 
3265   // The vtable address point is 2.
3266   llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3267   VTable =
3268       llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
3269   VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3270 
3271   Fields.push_back(VTable);
3272 }
3273 
3274 /// Return the linkage that the type info and type info name constants
3275 /// should have for the given type.
3276 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3277                                                              QualType Ty) {
3278   // Itanium C++ ABI 2.9.5p7:
3279   //   In addition, it and all of the intermediate abi::__pointer_type_info
3280   //   structs in the chain down to the abi::__class_type_info for the
3281   //   incomplete class type must be prevented from resolving to the
3282   //   corresponding type_info structs for the complete class type, possibly
3283   //   by making them local static objects. Finally, a dummy class RTTI is
3284   //   generated for the incomplete type that will not resolve to the final
3285   //   complete class RTTI (because the latter need not exist), possibly by
3286   //   making it a local static object.
3287   if (ContainsIncompleteClassType(Ty))
3288     return llvm::GlobalValue::InternalLinkage;
3289 
3290   switch (Ty->getLinkage()) {
3291   case NoLinkage:
3292   case InternalLinkage:
3293   case UniqueExternalLinkage:
3294     return llvm::GlobalValue::InternalLinkage;
3295 
3296   case VisibleNoLinkage:
3297   case ModuleInternalLinkage:
3298   case ModuleLinkage:
3299   case ExternalLinkage:
3300     // RTTI is not enabled, which means that this type info struct is going
3301     // to be used for exception handling. Give it linkonce_odr linkage.
3302     if (!CGM.getLangOpts().RTTI)
3303       return llvm::GlobalValue::LinkOnceODRLinkage;
3304 
3305     if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3306       const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3307       if (RD->hasAttr<WeakAttr>())
3308         return llvm::GlobalValue::WeakODRLinkage;
3309       if (CGM.getTriple().isWindowsItaniumEnvironment())
3310         if (RD->hasAttr<DLLImportAttr>() &&
3311             ShouldUseExternalRTTIDescriptor(CGM, Ty))
3312           return llvm::GlobalValue::ExternalLinkage;
3313       // MinGW always uses LinkOnceODRLinkage for type info.
3314       if (RD->isDynamicClass() &&
3315           !CGM.getContext()
3316                .getTargetInfo()
3317                .getTriple()
3318                .isWindowsGNUEnvironment())
3319         return CGM.getVTableLinkage(RD);
3320     }
3321 
3322     return llvm::GlobalValue::LinkOnceODRLinkage;
3323   }
3324 
3325   llvm_unreachable("Invalid linkage!");
3326 }
3327 
3328 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3329   // We want to operate on the canonical type.
3330   Ty = Ty.getCanonicalType();
3331 
3332   // Check if we've already emitted an RTTI descriptor for this type.
3333   SmallString<256> Name;
3334   llvm::raw_svector_ostream Out(Name);
3335   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3336 
3337   llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3338   if (OldGV && !OldGV->isDeclaration()) {
3339     assert(!OldGV->hasAvailableExternallyLinkage() &&
3340            "available_externally typeinfos not yet implemented");
3341 
3342     return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3343   }
3344 
3345   // Check if there is already an external RTTI descriptor for this type.
3346   if (IsStandardLibraryRTTIDescriptor(Ty) ||
3347       ShouldUseExternalRTTIDescriptor(CGM, Ty))
3348     return GetAddrOfExternalRTTIDescriptor(Ty);
3349 
3350   // Emit the standard library with external linkage.
3351   llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3352 
3353   // Give the type_info object and name the formal visibility of the
3354   // type itself.
3355   llvm::GlobalValue::VisibilityTypes llvmVisibility;
3356   if (llvm::GlobalValue::isLocalLinkage(Linkage))
3357     // If the linkage is local, only default visibility makes sense.
3358     llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3359   else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3360            ItaniumCXXABI::RUK_NonUniqueHidden)
3361     llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3362   else
3363     llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3364 
3365   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3366       llvm::GlobalValue::DefaultStorageClass;
3367   if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3368     auto RD = Ty->getAsCXXRecordDecl();
3369     if (RD && RD->hasAttr<DLLExportAttr>())
3370       DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3371   }
3372 
3373   return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3374 }
3375 
3376 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3377       QualType Ty,
3378       llvm::GlobalVariable::LinkageTypes Linkage,
3379       llvm::GlobalValue::VisibilityTypes Visibility,
3380       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3381   // Add the vtable pointer.
3382   BuildVTablePointer(cast<Type>(Ty));
3383 
3384   // And the name.
3385   llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3386   llvm::Constant *TypeNameField;
3387 
3388   // If we're supposed to demote the visibility, be sure to set a flag
3389   // to use a string comparison for type_info comparisons.
3390   ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3391       CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3392   if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3393     // The flag is the sign bit, which on ARM64 is defined to be clear
3394     // for global pointers.  This is very ARM64-specific.
3395     TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3396     llvm::Constant *flag =
3397         llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3398     TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3399     TypeNameField =
3400         llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3401   } else {
3402     TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3403   }
3404   Fields.push_back(TypeNameField);
3405 
3406   switch (Ty->getTypeClass()) {
3407 #define TYPE(Class, Base)
3408 #define ABSTRACT_TYPE(Class, Base)
3409 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3410 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3411 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3412 #include "clang/AST/TypeNodes.inc"
3413     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3414 
3415   // GCC treats vector types as fundamental types.
3416   case Type::Builtin:
3417   case Type::Vector:
3418   case Type::ExtVector:
3419   case Type::Complex:
3420   case Type::BlockPointer:
3421     // Itanium C++ ABI 2.9.5p4:
3422     // abi::__fundamental_type_info adds no data members to std::type_info.
3423     break;
3424 
3425   case Type::LValueReference:
3426   case Type::RValueReference:
3427     llvm_unreachable("References shouldn't get here");
3428 
3429   case Type::Auto:
3430   case Type::DeducedTemplateSpecialization:
3431     llvm_unreachable("Undeduced type shouldn't get here");
3432 
3433   case Type::Pipe:
3434     llvm_unreachable("Pipe type shouldn't get here");
3435 
3436   case Type::ConstantArray:
3437   case Type::IncompleteArray:
3438   case Type::VariableArray:
3439     // Itanium C++ ABI 2.9.5p5:
3440     // abi::__array_type_info adds no data members to std::type_info.
3441     break;
3442 
3443   case Type::FunctionNoProto:
3444   case Type::FunctionProto:
3445     // Itanium C++ ABI 2.9.5p5:
3446     // abi::__function_type_info adds no data members to std::type_info.
3447     break;
3448 
3449   case Type::Enum:
3450     // Itanium C++ ABI 2.9.5p5:
3451     // abi::__enum_type_info adds no data members to std::type_info.
3452     break;
3453 
3454   case Type::Record: {
3455     const CXXRecordDecl *RD =
3456       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3457     if (!RD->hasDefinition() || !RD->getNumBases()) {
3458       // We don't need to emit any fields.
3459       break;
3460     }
3461 
3462     if (CanUseSingleInheritance(RD))
3463       BuildSIClassTypeInfo(RD);
3464     else
3465       BuildVMIClassTypeInfo(RD);
3466 
3467     break;
3468   }
3469 
3470   case Type::ObjCObject:
3471   case Type::ObjCInterface:
3472     BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3473     break;
3474 
3475   case Type::ObjCObjectPointer:
3476     BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3477     break;
3478 
3479   case Type::Pointer:
3480     BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3481     break;
3482 
3483   case Type::MemberPointer:
3484     BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3485     break;
3486 
3487   case Type::Atomic:
3488     // No fields, at least for the moment.
3489     break;
3490   }
3491 
3492   llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3493 
3494   SmallString<256> Name;
3495   llvm::raw_svector_ostream Out(Name);
3496   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3497   llvm::Module &M = CGM.getModule();
3498   llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3499   llvm::GlobalVariable *GV =
3500       new llvm::GlobalVariable(M, Init->getType(),
3501                                /*isConstant=*/true, Linkage, Init, Name);
3502 
3503   // If there's already an old global variable, replace it with the new one.
3504   if (OldGV) {
3505     GV->takeName(OldGV);
3506     llvm::Constant *NewPtr =
3507       llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3508     OldGV->replaceAllUsesWith(NewPtr);
3509     OldGV->eraseFromParent();
3510   }
3511 
3512   if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3513     GV->setComdat(M.getOrInsertComdat(GV->getName()));
3514 
3515   CharUnits Align =
3516       CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3517   GV->setAlignment(Align.getAsAlign());
3518 
3519   // The Itanium ABI specifies that type_info objects must be globally
3520   // unique, with one exception: if the type is an incomplete class
3521   // type or a (possibly indirect) pointer to one.  That exception
3522   // affects the general case of comparing type_info objects produced
3523   // by the typeid operator, which is why the comparison operators on
3524   // std::type_info generally use the type_info name pointers instead
3525   // of the object addresses.  However, the language's built-in uses
3526   // of RTTI generally require class types to be complete, even when
3527   // manipulating pointers to those class types.  This allows the
3528   // implementation of dynamic_cast to rely on address equality tests,
3529   // which is much faster.
3530 
3531   // All of this is to say that it's important that both the type_info
3532   // object and the type_info name be uniqued when weakly emitted.
3533 
3534   TypeName->setVisibility(Visibility);
3535   CGM.setDSOLocal(TypeName);
3536 
3537   GV->setVisibility(Visibility);
3538   CGM.setDSOLocal(GV);
3539 
3540   TypeName->setDLLStorageClass(DLLStorageClass);
3541   GV->setDLLStorageClass(DLLStorageClass);
3542 
3543   TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3544   GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3545 
3546   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3547 }
3548 
3549 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3550 /// for the given Objective-C object type.
3551 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3552   // Drop qualifiers.
3553   const Type *T = OT->getBaseType().getTypePtr();
3554   assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3555 
3556   // The builtin types are abi::__class_type_infos and don't require
3557   // extra fields.
3558   if (isa<BuiltinType>(T)) return;
3559 
3560   ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3561   ObjCInterfaceDecl *Super = Class->getSuperClass();
3562 
3563   // Root classes are also __class_type_info.
3564   if (!Super) return;
3565 
3566   QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3567 
3568   // Everything else is single inheritance.
3569   llvm::Constant *BaseTypeInfo =
3570       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3571   Fields.push_back(BaseTypeInfo);
3572 }
3573 
3574 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3575 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3576 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3577   // Itanium C++ ABI 2.9.5p6b:
3578   // It adds to abi::__class_type_info a single member pointing to the
3579   // type_info structure for the base type,
3580   llvm::Constant *BaseTypeInfo =
3581     ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3582   Fields.push_back(BaseTypeInfo);
3583 }
3584 
3585 namespace {
3586   /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3587   /// a class hierarchy.
3588   struct SeenBases {
3589     llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3590     llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3591   };
3592 }
3593 
3594 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3595 /// abi::__vmi_class_type_info.
3596 ///
3597 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3598                                              SeenBases &Bases) {
3599 
3600   unsigned Flags = 0;
3601 
3602   auto *BaseDecl =
3603       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3604 
3605   if (Base->isVirtual()) {
3606     // Mark the virtual base as seen.
3607     if (!Bases.VirtualBases.insert(BaseDecl).second) {
3608       // If this virtual base has been seen before, then the class is diamond
3609       // shaped.
3610       Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3611     } else {
3612       if (Bases.NonVirtualBases.count(BaseDecl))
3613         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3614     }
3615   } else {
3616     // Mark the non-virtual base as seen.
3617     if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3618       // If this non-virtual base has been seen before, then the class has non-
3619       // diamond shaped repeated inheritance.
3620       Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3621     } else {
3622       if (Bases.VirtualBases.count(BaseDecl))
3623         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3624     }
3625   }
3626 
3627   // Walk all bases.
3628   for (const auto &I : BaseDecl->bases())
3629     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3630 
3631   return Flags;
3632 }
3633 
3634 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3635   unsigned Flags = 0;
3636   SeenBases Bases;
3637 
3638   // Walk all bases.
3639   for (const auto &I : RD->bases())
3640     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3641 
3642   return Flags;
3643 }
3644 
3645 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3646 /// classes with bases that do not satisfy the abi::__si_class_type_info
3647 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3648 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3649   llvm::Type *UnsignedIntLTy =
3650     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3651 
3652   // Itanium C++ ABI 2.9.5p6c:
3653   //   __flags is a word with flags describing details about the class
3654   //   structure, which may be referenced by using the __flags_masks
3655   //   enumeration. These flags refer to both direct and indirect bases.
3656   unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3657   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3658 
3659   // Itanium C++ ABI 2.9.5p6c:
3660   //   __base_count is a word with the number of direct proper base class
3661   //   descriptions that follow.
3662   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3663 
3664   if (!RD->getNumBases())
3665     return;
3666 
3667   // Now add the base class descriptions.
3668 
3669   // Itanium C++ ABI 2.9.5p6c:
3670   //   __base_info[] is an array of base class descriptions -- one for every
3671   //   direct proper base. Each description is of the type:
3672   //
3673   //   struct abi::__base_class_type_info {
3674   //   public:
3675   //     const __class_type_info *__base_type;
3676   //     long __offset_flags;
3677   //
3678   //     enum __offset_flags_masks {
3679   //       __virtual_mask = 0x1,
3680   //       __public_mask = 0x2,
3681   //       __offset_shift = 8
3682   //     };
3683   //   };
3684 
3685   // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
3686   // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
3687   // LLP64 platforms.
3688   // FIXME: Consider updating libc++abi to match, and extend this logic to all
3689   // LLP64 platforms.
3690   QualType OffsetFlagsTy = CGM.getContext().LongTy;
3691   const TargetInfo &TI = CGM.getContext().getTargetInfo();
3692   if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
3693     OffsetFlagsTy = CGM.getContext().LongLongTy;
3694   llvm::Type *OffsetFlagsLTy =
3695       CGM.getTypes().ConvertType(OffsetFlagsTy);
3696 
3697   for (const auto &Base : RD->bases()) {
3698     // The __base_type member points to the RTTI for the base type.
3699     Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3700 
3701     auto *BaseDecl =
3702         cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
3703 
3704     int64_t OffsetFlags = 0;
3705 
3706     // All but the lower 8 bits of __offset_flags are a signed offset.
3707     // For a non-virtual base, this is the offset in the object of the base
3708     // subobject. For a virtual base, this is the offset in the virtual table of
3709     // the virtual base offset for the virtual base referenced (negative).
3710     CharUnits Offset;
3711     if (Base.isVirtual())
3712       Offset =
3713         CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3714     else {
3715       const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3716       Offset = Layout.getBaseClassOffset(BaseDecl);
3717     };
3718 
3719     OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3720 
3721     // The low-order byte of __offset_flags contains flags, as given by the
3722     // masks from the enumeration __offset_flags_masks.
3723     if (Base.isVirtual())
3724       OffsetFlags |= BCTI_Virtual;
3725     if (Base.getAccessSpecifier() == AS_public)
3726       OffsetFlags |= BCTI_Public;
3727 
3728     Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
3729   }
3730 }
3731 
3732 /// Compute the flags for a __pbase_type_info, and remove the corresponding
3733 /// pieces from \p Type.
3734 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
3735   unsigned Flags = 0;
3736 
3737   if (Type.isConstQualified())
3738     Flags |= ItaniumRTTIBuilder::PTI_Const;
3739   if (Type.isVolatileQualified())
3740     Flags |= ItaniumRTTIBuilder::PTI_Volatile;
3741   if (Type.isRestrictQualified())
3742     Flags |= ItaniumRTTIBuilder::PTI_Restrict;
3743   Type = Type.getUnqualifiedType();
3744 
3745   // Itanium C++ ABI 2.9.5p7:
3746   //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
3747   //   incomplete class type, the incomplete target type flag is set.
3748   if (ContainsIncompleteClassType(Type))
3749     Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
3750 
3751   if (auto *Proto = Type->getAs<FunctionProtoType>()) {
3752     if (Proto->isNothrow()) {
3753       Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
3754       Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
3755     }
3756   }
3757 
3758   return Flags;
3759 }
3760 
3761 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
3762 /// used for pointer types.
3763 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
3764   // Itanium C++ ABI 2.9.5p7:
3765   //   __flags is a flag word describing the cv-qualification and other
3766   //   attributes of the type pointed to
3767   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3768 
3769   llvm::Type *UnsignedIntLTy =
3770     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3771   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3772 
3773   // Itanium C++ ABI 2.9.5p7:
3774   //  __pointee is a pointer to the std::type_info derivation for the
3775   //  unqualified type being pointed to.
3776   llvm::Constant *PointeeTypeInfo =
3777       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3778   Fields.push_back(PointeeTypeInfo);
3779 }
3780 
3781 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3782 /// struct, used for member pointer types.
3783 void
3784 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
3785   QualType PointeeTy = Ty->getPointeeType();
3786 
3787   // Itanium C++ ABI 2.9.5p7:
3788   //   __flags is a flag word describing the cv-qualification and other
3789   //   attributes of the type pointed to.
3790   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3791 
3792   const RecordType *ClassType = cast<RecordType>(Ty->getClass());
3793   if (IsIncompleteClassType(ClassType))
3794     Flags |= PTI_ContainingClassIncomplete;
3795 
3796   llvm::Type *UnsignedIntLTy =
3797     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3798   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3799 
3800   // Itanium C++ ABI 2.9.5p7:
3801   //   __pointee is a pointer to the std::type_info derivation for the
3802   //   unqualified type being pointed to.
3803   llvm::Constant *PointeeTypeInfo =
3804       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3805   Fields.push_back(PointeeTypeInfo);
3806 
3807   // Itanium C++ ABI 2.9.5p9:
3808   //   __context is a pointer to an abi::__class_type_info corresponding to the
3809   //   class type containing the member pointed to
3810   //   (e.g., the "A" in "int A::*").
3811   Fields.push_back(
3812       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
3813 }
3814 
3815 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
3816   return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
3817 }
3818 
3819 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
3820   // Types added here must also be added to TypeInfoIsInStandardLibrary.
3821   QualType FundamentalTypes[] = {
3822       getContext().VoidTy,             getContext().NullPtrTy,
3823       getContext().BoolTy,             getContext().WCharTy,
3824       getContext().CharTy,             getContext().UnsignedCharTy,
3825       getContext().SignedCharTy,       getContext().ShortTy,
3826       getContext().UnsignedShortTy,    getContext().IntTy,
3827       getContext().UnsignedIntTy,      getContext().LongTy,
3828       getContext().UnsignedLongTy,     getContext().LongLongTy,
3829       getContext().UnsignedLongLongTy, getContext().Int128Ty,
3830       getContext().UnsignedInt128Ty,   getContext().HalfTy,
3831       getContext().FloatTy,            getContext().DoubleTy,
3832       getContext().LongDoubleTy,       getContext().Float128Ty,
3833       getContext().Char8Ty,            getContext().Char16Ty,
3834       getContext().Char32Ty
3835   };
3836   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3837       RD->hasAttr<DLLExportAttr>()
3838       ? llvm::GlobalValue::DLLExportStorageClass
3839       : llvm::GlobalValue::DefaultStorageClass;
3840   llvm::GlobalValue::VisibilityTypes Visibility =
3841       CodeGenModule::GetLLVMVisibility(RD->getVisibility());
3842   for (const QualType &FundamentalType : FundamentalTypes) {
3843     QualType PointerType = getContext().getPointerType(FundamentalType);
3844     QualType PointerTypeConst = getContext().getPointerType(
3845         FundamentalType.withConst());
3846     for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
3847       ItaniumRTTIBuilder(*this).BuildTypeInfo(
3848           Type, llvm::GlobalValue::ExternalLinkage,
3849           Visibility, DLLStorageClass);
3850   }
3851 }
3852 
3853 /// What sort of uniqueness rules should we use for the RTTI for the
3854 /// given type?
3855 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
3856     QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
3857   if (shouldRTTIBeUnique())
3858     return RUK_Unique;
3859 
3860   // It's only necessary for linkonce_odr or weak_odr linkage.
3861   if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
3862       Linkage != llvm::GlobalValue::WeakODRLinkage)
3863     return RUK_Unique;
3864 
3865   // It's only necessary with default visibility.
3866   if (CanTy->getVisibility() != DefaultVisibility)
3867     return RUK_Unique;
3868 
3869   // If we're not required to publish this symbol, hide it.
3870   if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3871     return RUK_NonUniqueHidden;
3872 
3873   // If we're required to publish this symbol, as we might be under an
3874   // explicit instantiation, leave it with default visibility but
3875   // enable string-comparisons.
3876   assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
3877   return RUK_NonUniqueVisible;
3878 }
3879 
3880 // Find out how to codegen the complete destructor and constructor
3881 namespace {
3882 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
3883 }
3884 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
3885                                        const CXXMethodDecl *MD) {
3886   if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
3887     return StructorCodegen::Emit;
3888 
3889   // The complete and base structors are not equivalent if there are any virtual
3890   // bases, so emit separate functions.
3891   if (MD->getParent()->getNumVBases())
3892     return StructorCodegen::Emit;
3893 
3894   GlobalDecl AliasDecl;
3895   if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
3896     AliasDecl = GlobalDecl(DD, Dtor_Complete);
3897   } else {
3898     const auto *CD = cast<CXXConstructorDecl>(MD);
3899     AliasDecl = GlobalDecl(CD, Ctor_Complete);
3900   }
3901   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3902 
3903   if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
3904     return StructorCodegen::RAUW;
3905 
3906   // FIXME: Should we allow available_externally aliases?
3907   if (!llvm::GlobalAlias::isValidLinkage(Linkage))
3908     return StructorCodegen::RAUW;
3909 
3910   if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
3911     // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
3912     if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
3913         CGM.getTarget().getTriple().isOSBinFormatWasm())
3914       return StructorCodegen::COMDAT;
3915     return StructorCodegen::Emit;
3916   }
3917 
3918   return StructorCodegen::Alias;
3919 }
3920 
3921 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
3922                                            GlobalDecl AliasDecl,
3923                                            GlobalDecl TargetDecl) {
3924   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3925 
3926   StringRef MangledName = CGM.getMangledName(AliasDecl);
3927   llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
3928   if (Entry && !Entry->isDeclaration())
3929     return;
3930 
3931   auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
3932 
3933   // Create the alias with no name.
3934   auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
3935 
3936   // Constructors and destructors are always unnamed_addr.
3937   Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3938 
3939   // Switch any previous uses to the alias.
3940   if (Entry) {
3941     assert(Entry->getType() == Aliasee->getType() &&
3942            "declaration exists with different type");
3943     Alias->takeName(Entry);
3944     Entry->replaceAllUsesWith(Alias);
3945     Entry->eraseFromParent();
3946   } else {
3947     Alias->setName(MangledName);
3948   }
3949 
3950   // Finally, set up the alias with its proper name and attributes.
3951   CGM.SetCommonAttributes(AliasDecl, Alias);
3952 }
3953 
3954 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
3955   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
3956   auto *CD = dyn_cast<CXXConstructorDecl>(MD);
3957   const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
3958 
3959   StructorCodegen CGType = getCodegenToUse(CGM, MD);
3960 
3961   if (CD ? GD.getCtorType() == Ctor_Complete
3962          : GD.getDtorType() == Dtor_Complete) {
3963     GlobalDecl BaseDecl;
3964     if (CD)
3965       BaseDecl = GD.getWithCtorType(Ctor_Base);
3966     else
3967       BaseDecl = GD.getWithDtorType(Dtor_Base);
3968 
3969     if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
3970       emitConstructorDestructorAlias(CGM, GD, BaseDecl);
3971       return;
3972     }
3973 
3974     if (CGType == StructorCodegen::RAUW) {
3975       StringRef MangledName = CGM.getMangledName(GD);
3976       auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
3977       CGM.addReplacement(MangledName, Aliasee);
3978       return;
3979     }
3980   }
3981 
3982   // The base destructor is equivalent to the base destructor of its
3983   // base class if there is exactly one non-virtual base class with a
3984   // non-trivial destructor, there are no fields with a non-trivial
3985   // destructor, and the body of the destructor is trivial.
3986   if (DD && GD.getDtorType() == Dtor_Base &&
3987       CGType != StructorCodegen::COMDAT &&
3988       !CGM.TryEmitBaseDestructorAsAlias(DD))
3989     return;
3990 
3991   // FIXME: The deleting destructor is equivalent to the selected operator
3992   // delete if:
3993   //  * either the delete is a destroying operator delete or the destructor
3994   //    would be trivial if it weren't virtual,
3995   //  * the conversion from the 'this' parameter to the first parameter of the
3996   //    destructor is equivalent to a bitcast,
3997   //  * the destructor does not have an implicit "this" return, and
3998   //  * the operator delete has the same calling convention and IR function type
3999   //    as the destructor.
4000   // In such cases we should try to emit the deleting dtor as an alias to the
4001   // selected 'operator delete'.
4002 
4003   llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4004 
4005   if (CGType == StructorCodegen::COMDAT) {
4006     SmallString<256> Buffer;
4007     llvm::raw_svector_ostream Out(Buffer);
4008     if (DD)
4009       getMangleContext().mangleCXXDtorComdat(DD, Out);
4010     else
4011       getMangleContext().mangleCXXCtorComdat(CD, Out);
4012     llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4013     Fn->setComdat(C);
4014   } else {
4015     CGM.maybeSetTrivialComdat(*MD, *Fn);
4016   }
4017 }
4018 
4019 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4020   // void *__cxa_begin_catch(void*);
4021   llvm::FunctionType *FTy = llvm::FunctionType::get(
4022       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4023 
4024   return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4025 }
4026 
4027 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4028   // void __cxa_end_catch();
4029   llvm::FunctionType *FTy =
4030       llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4031 
4032   return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4033 }
4034 
4035 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4036   // void *__cxa_get_exception_ptr(void*);
4037   llvm::FunctionType *FTy = llvm::FunctionType::get(
4038       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4039 
4040   return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4041 }
4042 
4043 namespace {
4044   /// A cleanup to call __cxa_end_catch.  In many cases, the caught
4045   /// exception type lets us state definitively that the thrown exception
4046   /// type does not have a destructor.  In particular:
4047   ///   - Catch-alls tell us nothing, so we have to conservatively
4048   ///     assume that the thrown exception might have a destructor.
4049   ///   - Catches by reference behave according to their base types.
4050   ///   - Catches of non-record types will only trigger for exceptions
4051   ///     of non-record types, which never have destructors.
4052   ///   - Catches of record types can trigger for arbitrary subclasses
4053   ///     of the caught type, so we have to assume the actual thrown
4054   ///     exception type might have a throwing destructor, even if the
4055   ///     caught type's destructor is trivial or nothrow.
4056   struct CallEndCatch final : EHScopeStack::Cleanup {
4057     CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4058     bool MightThrow;
4059 
4060     void Emit(CodeGenFunction &CGF, Flags flags) override {
4061       if (!MightThrow) {
4062         CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4063         return;
4064       }
4065 
4066       CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4067     }
4068   };
4069 }
4070 
4071 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
4072 /// __cxa_end_catch.
4073 ///
4074 /// \param EndMightThrow - true if __cxa_end_catch might throw
4075 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4076                                    llvm::Value *Exn,
4077                                    bool EndMightThrow) {
4078   llvm::CallInst *call =
4079     CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4080 
4081   CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4082 
4083   return call;
4084 }
4085 
4086 /// A "special initializer" callback for initializing a catch
4087 /// parameter during catch initialization.
4088 static void InitCatchParam(CodeGenFunction &CGF,
4089                            const VarDecl &CatchParam,
4090                            Address ParamAddr,
4091                            SourceLocation Loc) {
4092   // Load the exception from where the landing pad saved it.
4093   llvm::Value *Exn = CGF.getExceptionFromSlot();
4094 
4095   CanQualType CatchType =
4096     CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4097   llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4098 
4099   // If we're catching by reference, we can just cast the object
4100   // pointer to the appropriate pointer.
4101   if (isa<ReferenceType>(CatchType)) {
4102     QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4103     bool EndCatchMightThrow = CaughtType->isRecordType();
4104 
4105     // __cxa_begin_catch returns the adjusted object pointer.
4106     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4107 
4108     // We have no way to tell the personality function that we're
4109     // catching by reference, so if we're catching a pointer,
4110     // __cxa_begin_catch will actually return that pointer by value.
4111     if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4112       QualType PointeeType = PT->getPointeeType();
4113 
4114       // When catching by reference, generally we should just ignore
4115       // this by-value pointer and use the exception object instead.
4116       if (!PointeeType->isRecordType()) {
4117 
4118         // Exn points to the struct _Unwind_Exception header, which
4119         // we have to skip past in order to reach the exception data.
4120         unsigned HeaderSize =
4121           CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4122         AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
4123 
4124       // However, if we're catching a pointer-to-record type that won't
4125       // work, because the personality function might have adjusted
4126       // the pointer.  There's actually no way for us to fully satisfy
4127       // the language/ABI contract here:  we can't use Exn because it
4128       // might have the wrong adjustment, but we can't use the by-value
4129       // pointer because it's off by a level of abstraction.
4130       //
4131       // The current solution is to dump the adjusted pointer into an
4132       // alloca, which breaks language semantics (because changing the
4133       // pointer doesn't change the exception) but at least works.
4134       // The better solution would be to filter out non-exact matches
4135       // and rethrow them, but this is tricky because the rethrow
4136       // really needs to be catchable by other sites at this landing
4137       // pad.  The best solution is to fix the personality function.
4138       } else {
4139         // Pull the pointer for the reference type off.
4140         llvm::Type *PtrTy =
4141           cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
4142 
4143         // Create the temporary and write the adjusted pointer into it.
4144         Address ExnPtrTmp =
4145           CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4146         llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4147         CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4148 
4149         // Bind the reference to the temporary.
4150         AdjustedExn = ExnPtrTmp.getPointer();
4151       }
4152     }
4153 
4154     llvm::Value *ExnCast =
4155       CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4156     CGF.Builder.CreateStore(ExnCast, ParamAddr);
4157     return;
4158   }
4159 
4160   // Scalars and complexes.
4161   TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4162   if (TEK != TEK_Aggregate) {
4163     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4164 
4165     // If the catch type is a pointer type, __cxa_begin_catch returns
4166     // the pointer by value.
4167     if (CatchType->hasPointerRepresentation()) {
4168       llvm::Value *CastExn =
4169         CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4170 
4171       switch (CatchType.getQualifiers().getObjCLifetime()) {
4172       case Qualifiers::OCL_Strong:
4173         CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4174         LLVM_FALLTHROUGH;
4175 
4176       case Qualifiers::OCL_None:
4177       case Qualifiers::OCL_ExplicitNone:
4178       case Qualifiers::OCL_Autoreleasing:
4179         CGF.Builder.CreateStore(CastExn, ParamAddr);
4180         return;
4181 
4182       case Qualifiers::OCL_Weak:
4183         CGF.EmitARCInitWeak(ParamAddr, CastExn);
4184         return;
4185       }
4186       llvm_unreachable("bad ownership qualifier!");
4187     }
4188 
4189     // Otherwise, it returns a pointer into the exception object.
4190 
4191     llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4192     llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4193 
4194     LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4195     LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4196     switch (TEK) {
4197     case TEK_Complex:
4198       CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4199                              /*init*/ true);
4200       return;
4201     case TEK_Scalar: {
4202       llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4203       CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4204       return;
4205     }
4206     case TEK_Aggregate:
4207       llvm_unreachable("evaluation kind filtered out!");
4208     }
4209     llvm_unreachable("bad evaluation kind");
4210   }
4211 
4212   assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4213   auto catchRD = CatchType->getAsCXXRecordDecl();
4214   CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4215 
4216   llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4217 
4218   // Check for a copy expression.  If we don't have a copy expression,
4219   // that means a trivial copy is okay.
4220   const Expr *copyExpr = CatchParam.getInit();
4221   if (!copyExpr) {
4222     llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4223     Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4224                         caughtExnAlignment);
4225     LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4226     LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4227     CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4228     return;
4229   }
4230 
4231   // We have to call __cxa_get_exception_ptr to get the adjusted
4232   // pointer before copying.
4233   llvm::CallInst *rawAdjustedExn =
4234     CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4235 
4236   // Cast that to the appropriate type.
4237   Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4238                       caughtExnAlignment);
4239 
4240   // The copy expression is defined in terms of an OpaqueValueExpr.
4241   // Find it and map it to the adjusted expression.
4242   CodeGenFunction::OpaqueValueMapping
4243     opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4244            CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4245 
4246   // Call the copy ctor in a terminate scope.
4247   CGF.EHStack.pushTerminate();
4248 
4249   // Perform the copy construction.
4250   CGF.EmitAggExpr(copyExpr,
4251                   AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4252                                         AggValueSlot::IsNotDestructed,
4253                                         AggValueSlot::DoesNotNeedGCBarriers,
4254                                         AggValueSlot::IsNotAliased,
4255                                         AggValueSlot::DoesNotOverlap));
4256 
4257   // Leave the terminate scope.
4258   CGF.EHStack.popTerminate();
4259 
4260   // Undo the opaque value mapping.
4261   opaque.pop();
4262 
4263   // Finally we can call __cxa_begin_catch.
4264   CallBeginCatch(CGF, Exn, true);
4265 }
4266 
4267 /// Begins a catch statement by initializing the catch variable and
4268 /// calling __cxa_begin_catch.
4269 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4270                                    const CXXCatchStmt *S) {
4271   // We have to be very careful with the ordering of cleanups here:
4272   //   C++ [except.throw]p4:
4273   //     The destruction [of the exception temporary] occurs
4274   //     immediately after the destruction of the object declared in
4275   //     the exception-declaration in the handler.
4276   //
4277   // So the precise ordering is:
4278   //   1.  Construct catch variable.
4279   //   2.  __cxa_begin_catch
4280   //   3.  Enter __cxa_end_catch cleanup
4281   //   4.  Enter dtor cleanup
4282   //
4283   // We do this by using a slightly abnormal initialization process.
4284   // Delegation sequence:
4285   //   - ExitCXXTryStmt opens a RunCleanupsScope
4286   //     - EmitAutoVarAlloca creates the variable and debug info
4287   //       - InitCatchParam initializes the variable from the exception
4288   //       - CallBeginCatch calls __cxa_begin_catch
4289   //       - CallBeginCatch enters the __cxa_end_catch cleanup
4290   //     - EmitAutoVarCleanups enters the variable destructor cleanup
4291   //   - EmitCXXTryStmt emits the code for the catch body
4292   //   - EmitCXXTryStmt close the RunCleanupsScope
4293 
4294   VarDecl *CatchParam = S->getExceptionDecl();
4295   if (!CatchParam) {
4296     llvm::Value *Exn = CGF.getExceptionFromSlot();
4297     CallBeginCatch(CGF, Exn, true);
4298     return;
4299   }
4300 
4301   // Emit the local.
4302   CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4303   InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4304   CGF.EmitAutoVarCleanups(var);
4305 }
4306 
4307 /// Get or define the following function:
4308 ///   void @__clang_call_terminate(i8* %exn) nounwind noreturn
4309 /// This code is used only in C++.
4310 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4311   llvm::FunctionType *fnTy =
4312     llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4313   llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4314       fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4315   llvm::Function *fn =
4316       cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4317   if (fn->empty()) {
4318     fn->setDoesNotThrow();
4319     fn->setDoesNotReturn();
4320 
4321     // What we really want is to massively penalize inlining without
4322     // forbidding it completely.  The difference between that and
4323     // 'noinline' is negligible.
4324     fn->addFnAttr(llvm::Attribute::NoInline);
4325 
4326     // Allow this function to be shared across translation units, but
4327     // we don't want it to turn into an exported symbol.
4328     fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4329     fn->setVisibility(llvm::Function::HiddenVisibility);
4330     if (CGM.supportsCOMDAT())
4331       fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4332 
4333     // Set up the function.
4334     llvm::BasicBlock *entry =
4335         llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4336     CGBuilderTy builder(CGM, entry);
4337 
4338     // Pull the exception pointer out of the parameter list.
4339     llvm::Value *exn = &*fn->arg_begin();
4340 
4341     // Call __cxa_begin_catch(exn).
4342     llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4343     catchCall->setDoesNotThrow();
4344     catchCall->setCallingConv(CGM.getRuntimeCC());
4345 
4346     // Call std::terminate().
4347     llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4348     termCall->setDoesNotThrow();
4349     termCall->setDoesNotReturn();
4350     termCall->setCallingConv(CGM.getRuntimeCC());
4351 
4352     // std::terminate cannot return.
4353     builder.CreateUnreachable();
4354   }
4355   return fnRef;
4356 }
4357 
4358 llvm::CallInst *
4359 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4360                                                    llvm::Value *Exn) {
4361   // In C++, we want to call __cxa_begin_catch() before terminating.
4362   if (Exn) {
4363     assert(CGF.CGM.getLangOpts().CPlusPlus);
4364     return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4365   }
4366   return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4367 }
4368 
4369 std::pair<llvm::Value *, const CXXRecordDecl *>
4370 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4371                              const CXXRecordDecl *RD) {
4372   return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4373 }
4374 
4375 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4376                                        const CXXCatchStmt *C) {
4377   if (CGF.getTarget().hasFeature("exception-handling"))
4378     CGF.EHStack.pushCleanup<CatchRetScope>(
4379         NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4380   ItaniumCXXABI::emitBeginCatch(CGF, C);
4381 }
4382