1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This provides C++ code generation targeting the Itanium C++ ABI. The class 10 // in this file generates structures that follow the Itanium C++ ABI, which is 11 // documented at: 12 // http://www.codesourcery.com/public/cxx-abi/abi.html 13 // http://www.codesourcery.com/public/cxx-abi/abi-eh.html 14 // 15 // It also supports the closely-related ARM ABI, documented at: 16 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "CGCXXABI.h" 21 #include "CGCleanup.h" 22 #include "CGRecordLayout.h" 23 #include "CGVTables.h" 24 #include "CodeGenFunction.h" 25 #include "CodeGenModule.h" 26 #include "TargetInfo.h" 27 #include "clang/AST/Attr.h" 28 #include "clang/AST/Mangle.h" 29 #include "clang/AST/StmtCXX.h" 30 #include "clang/AST/Type.h" 31 #include "clang/CodeGen/ConstantInitBuilder.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/GlobalValue.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/Intrinsics.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/Support/ScopedPrinter.h" 38 39 using namespace clang; 40 using namespace CodeGen; 41 42 namespace { 43 class ItaniumCXXABI : public CodeGen::CGCXXABI { 44 /// VTables - All the vtables which have been defined. 45 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables; 46 47 /// All the thread wrapper functions that have been used. 48 llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8> 49 ThreadWrappers; 50 51 protected: 52 bool UseARMMethodPtrABI; 53 bool UseARMGuardVarABI; 54 bool Use32BitVTableOffsetABI; 55 56 ItaniumMangleContext &getMangleContext() { 57 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext()); 58 } 59 60 public: 61 ItaniumCXXABI(CodeGen::CodeGenModule &CGM, 62 bool UseARMMethodPtrABI = false, 63 bool UseARMGuardVarABI = false) : 64 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI), 65 UseARMGuardVarABI(UseARMGuardVarABI), 66 Use32BitVTableOffsetABI(false) { } 67 68 bool classifyReturnType(CGFunctionInfo &FI) const override; 69 70 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override { 71 // If C++ prohibits us from making a copy, pass by address. 72 if (!RD->canPassInRegisters()) 73 return RAA_Indirect; 74 return RAA_Default; 75 } 76 77 bool isThisCompleteObject(GlobalDecl GD) const override { 78 // The Itanium ABI has separate complete-object vs. base-object 79 // variants of both constructors and destructors. 80 if (isa<CXXDestructorDecl>(GD.getDecl())) { 81 switch (GD.getDtorType()) { 82 case Dtor_Complete: 83 case Dtor_Deleting: 84 return true; 85 86 case Dtor_Base: 87 return false; 88 89 case Dtor_Comdat: 90 llvm_unreachable("emitting dtor comdat as function?"); 91 } 92 llvm_unreachable("bad dtor kind"); 93 } 94 if (isa<CXXConstructorDecl>(GD.getDecl())) { 95 switch (GD.getCtorType()) { 96 case Ctor_Complete: 97 return true; 98 99 case Ctor_Base: 100 return false; 101 102 case Ctor_CopyingClosure: 103 case Ctor_DefaultClosure: 104 llvm_unreachable("closure ctors in Itanium ABI?"); 105 106 case Ctor_Comdat: 107 llvm_unreachable("emitting ctor comdat as function?"); 108 } 109 llvm_unreachable("bad dtor kind"); 110 } 111 112 // No other kinds. 113 return false; 114 } 115 116 bool isZeroInitializable(const MemberPointerType *MPT) override; 117 118 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override; 119 120 CGCallee 121 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, 122 const Expr *E, 123 Address This, 124 llvm::Value *&ThisPtrForCall, 125 llvm::Value *MemFnPtr, 126 const MemberPointerType *MPT) override; 127 128 llvm::Value * 129 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E, 130 Address Base, 131 llvm::Value *MemPtr, 132 const MemberPointerType *MPT) override; 133 134 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF, 135 const CastExpr *E, 136 llvm::Value *Src) override; 137 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E, 138 llvm::Constant *Src) override; 139 140 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override; 141 142 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override; 143 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT, 144 CharUnits offset) override; 145 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override; 146 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD, 147 CharUnits ThisAdjustment); 148 149 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF, 150 llvm::Value *L, llvm::Value *R, 151 const MemberPointerType *MPT, 152 bool Inequality) override; 153 154 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 155 llvm::Value *Addr, 156 const MemberPointerType *MPT) override; 157 158 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, 159 Address Ptr, QualType ElementType, 160 const CXXDestructorDecl *Dtor) override; 161 162 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override; 163 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override; 164 165 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; 166 167 llvm::CallInst * 168 emitTerminateForUnexpectedException(CodeGenFunction &CGF, 169 llvm::Value *Exn) override; 170 171 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD); 172 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override; 173 CatchTypeInfo 174 getAddrOfCXXCatchHandlerType(QualType Ty, 175 QualType CatchHandlerType) override { 176 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0}; 177 } 178 179 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override; 180 void EmitBadTypeidCall(CodeGenFunction &CGF) override; 181 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy, 182 Address ThisPtr, 183 llvm::Type *StdTypeInfoPtrTy) override; 184 185 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, 186 QualType SrcRecordTy) override; 187 188 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value, 189 QualType SrcRecordTy, QualType DestTy, 190 QualType DestRecordTy, 191 llvm::BasicBlock *CastEnd) override; 192 193 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value, 194 QualType SrcRecordTy, 195 QualType DestTy) override; 196 197 bool EmitBadCastCall(CodeGenFunction &CGF) override; 198 199 llvm::Value * 200 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This, 201 const CXXRecordDecl *ClassDecl, 202 const CXXRecordDecl *BaseClassDecl) override; 203 204 void EmitCXXConstructors(const CXXConstructorDecl *D) override; 205 206 AddedStructorArgCounts 207 buildStructorSignature(GlobalDecl GD, 208 SmallVectorImpl<CanQualType> &ArgTys) override; 209 210 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, 211 CXXDtorType DT) const override { 212 // Itanium does not emit any destructor variant as an inline thunk. 213 // Delegating may occur as an optimization, but all variants are either 214 // emitted with external linkage or as linkonce if they are inline and used. 215 return false; 216 } 217 218 void EmitCXXDestructors(const CXXDestructorDecl *D) override; 219 220 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, 221 FunctionArgList &Params) override; 222 223 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override; 224 225 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF, 226 const CXXConstructorDecl *D, 227 CXXCtorType Type, 228 bool ForVirtualBase, 229 bool Delegating) override; 230 231 llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF, 232 const CXXDestructorDecl *DD, 233 CXXDtorType Type, 234 bool ForVirtualBase, 235 bool Delegating) override; 236 237 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD, 238 CXXDtorType Type, bool ForVirtualBase, 239 bool Delegating, Address This, 240 QualType ThisTy) override; 241 242 void emitVTableDefinitions(CodeGenVTables &CGVT, 243 const CXXRecordDecl *RD) override; 244 245 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF, 246 CodeGenFunction::VPtr Vptr) override; 247 248 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override { 249 return true; 250 } 251 252 llvm::Constant * 253 getVTableAddressPoint(BaseSubobject Base, 254 const CXXRecordDecl *VTableClass) override; 255 256 llvm::Value *getVTableAddressPointInStructor( 257 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, 258 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override; 259 260 llvm::Value *getVTableAddressPointInStructorWithVTT( 261 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, 262 BaseSubobject Base, const CXXRecordDecl *NearestVBase); 263 264 llvm::Constant * 265 getVTableAddressPointForConstExpr(BaseSubobject Base, 266 const CXXRecordDecl *VTableClass) override; 267 268 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD, 269 CharUnits VPtrOffset) override; 270 271 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, 272 Address This, llvm::Type *Ty, 273 SourceLocation Loc) override; 274 275 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF, 276 const CXXDestructorDecl *Dtor, 277 CXXDtorType DtorType, Address This, 278 DeleteOrMemberCallExpr E) override; 279 280 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override; 281 282 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override; 283 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const; 284 285 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD, 286 bool ReturnAdjustment) override { 287 // Allow inlining of thunks by emitting them with available_externally 288 // linkage together with vtables when needed. 289 if (ForVTable && !Thunk->hasLocalLinkage()) 290 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage); 291 CGM.setGVProperties(Thunk, GD); 292 } 293 294 bool exportThunk() override { return true; } 295 296 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This, 297 const ThisAdjustment &TA) override; 298 299 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret, 300 const ReturnAdjustment &RA) override; 301 302 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, 303 FunctionArgList &Args) const override { 304 assert(!Args.empty() && "expected the arglist to not be empty!"); 305 return Args.size() - 1; 306 } 307 308 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; } 309 StringRef GetDeletedVirtualCallName() override 310 { return "__cxa_deleted_virtual"; } 311 312 CharUnits getArrayCookieSizeImpl(QualType elementType) override; 313 Address InitializeArrayCookie(CodeGenFunction &CGF, 314 Address NewPtr, 315 llvm::Value *NumElements, 316 const CXXNewExpr *expr, 317 QualType ElementType) override; 318 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, 319 Address allocPtr, 320 CharUnits cookieSize) override; 321 322 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, 323 llvm::GlobalVariable *DeclPtr, 324 bool PerformInit) override; 325 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 326 llvm::FunctionCallee dtor, 327 llvm::Constant *addr) override; 328 329 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD, 330 llvm::Value *Val); 331 void EmitThreadLocalInitFuncs( 332 CodeGenModule &CGM, 333 ArrayRef<const VarDecl *> CXXThreadLocals, 334 ArrayRef<llvm::Function *> CXXThreadLocalInits, 335 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override; 336 337 /// Determine whether we will definitely emit this variable with a constant 338 /// initializer, either because the language semantics demand it or because 339 /// we know that the initializer is a constant. 340 bool isEmittedWithConstantInitializer(const VarDecl *VD) const { 341 VD = VD->getMostRecentDecl(); 342 if (VD->hasAttr<ConstInitAttr>()) 343 return true; 344 345 // All later checks examine the initializer specified on the variable. If 346 // the variable is weak, such examination would not be correct. 347 if (VD->isWeak() || VD->hasAttr<SelectAnyAttr>()) 348 return false; 349 350 const VarDecl *InitDecl = VD->getInitializingDeclaration(); 351 if (!InitDecl) 352 return false; 353 354 // If there's no initializer to run, this is constant initialization. 355 if (!InitDecl->hasInit()) 356 return true; 357 358 // If we have the only definition, we don't need a thread wrapper if we 359 // will emit the value as a constant. 360 if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD))) 361 return !VD->needsDestruction(getContext()) && InitDecl->evaluateValue(); 362 363 // Otherwise, we need a thread wrapper unless we know that every 364 // translation unit will emit the value as a constant. We rely on 365 // ICE-ness not varying between translation units, which isn't actually 366 // guaranteed by the standard but is necessary for sanity. 367 return InitDecl->isInitKnownICE() && InitDecl->isInitICE(); 368 } 369 370 bool usesThreadWrapperFunction(const VarDecl *VD) const override { 371 return !isEmittedWithConstantInitializer(VD) || 372 VD->needsDestruction(getContext()); 373 } 374 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, 375 QualType LValType) override; 376 377 bool NeedsVTTParameter(GlobalDecl GD) override; 378 379 /**************************** RTTI Uniqueness ******************************/ 380 381 protected: 382 /// Returns true if the ABI requires RTTI type_info objects to be unique 383 /// across a program. 384 virtual bool shouldRTTIBeUnique() const { return true; } 385 386 public: 387 /// What sort of unique-RTTI behavior should we use? 388 enum RTTIUniquenessKind { 389 /// We are guaranteeing, or need to guarantee, that the RTTI string 390 /// is unique. 391 RUK_Unique, 392 393 /// We are not guaranteeing uniqueness for the RTTI string, so we 394 /// can demote to hidden visibility but must use string comparisons. 395 RUK_NonUniqueHidden, 396 397 /// We are not guaranteeing uniqueness for the RTTI string, so we 398 /// have to use string comparisons, but we also have to emit it with 399 /// non-hidden visibility. 400 RUK_NonUniqueVisible 401 }; 402 403 /// Return the required visibility status for the given type and linkage in 404 /// the current ABI. 405 RTTIUniquenessKind 406 classifyRTTIUniqueness(QualType CanTy, 407 llvm::GlobalValue::LinkageTypes Linkage) const; 408 friend class ItaniumRTTIBuilder; 409 410 void emitCXXStructor(GlobalDecl GD) override; 411 412 std::pair<llvm::Value *, const CXXRecordDecl *> 413 LoadVTablePtr(CodeGenFunction &CGF, Address This, 414 const CXXRecordDecl *RD) override; 415 416 private: 417 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const { 418 const auto &VtableLayout = 419 CGM.getItaniumVTableContext().getVTableLayout(RD); 420 421 for (const auto &VtableComponent : VtableLayout.vtable_components()) { 422 // Skip empty slot. 423 if (!VtableComponent.isUsedFunctionPointerKind()) 424 continue; 425 426 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); 427 if (!Method->getCanonicalDecl()->isInlined()) 428 continue; 429 430 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl()); 431 auto *Entry = CGM.GetGlobalValue(Name); 432 // This checks if virtual inline function has already been emitted. 433 // Note that it is possible that this inline function would be emitted 434 // after trying to emit vtable speculatively. Because of this we do 435 // an extra pass after emitting all deferred vtables to find and emit 436 // these vtables opportunistically. 437 if (!Entry || Entry->isDeclaration()) 438 return true; 439 } 440 return false; 441 } 442 443 bool isVTableHidden(const CXXRecordDecl *RD) const { 444 const auto &VtableLayout = 445 CGM.getItaniumVTableContext().getVTableLayout(RD); 446 447 for (const auto &VtableComponent : VtableLayout.vtable_components()) { 448 if (VtableComponent.isRTTIKind()) { 449 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl(); 450 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility) 451 return true; 452 } else if (VtableComponent.isUsedFunctionPointerKind()) { 453 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); 454 if (Method->getVisibility() == Visibility::HiddenVisibility && 455 !Method->isDefined()) 456 return true; 457 } 458 } 459 return false; 460 } 461 }; 462 463 class ARMCXXABI : public ItaniumCXXABI { 464 public: 465 ARMCXXABI(CodeGen::CodeGenModule &CGM) : 466 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 467 /*UseARMGuardVarABI=*/true) {} 468 469 bool HasThisReturn(GlobalDecl GD) const override { 470 return (isa<CXXConstructorDecl>(GD.getDecl()) || ( 471 isa<CXXDestructorDecl>(GD.getDecl()) && 472 GD.getDtorType() != Dtor_Deleting)); 473 } 474 475 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, 476 QualType ResTy) override; 477 478 CharUnits getArrayCookieSizeImpl(QualType elementType) override; 479 Address InitializeArrayCookie(CodeGenFunction &CGF, 480 Address NewPtr, 481 llvm::Value *NumElements, 482 const CXXNewExpr *expr, 483 QualType ElementType) override; 484 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr, 485 CharUnits cookieSize) override; 486 }; 487 488 class iOS64CXXABI : public ARMCXXABI { 489 public: 490 iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) { 491 Use32BitVTableOffsetABI = true; 492 } 493 494 // ARM64 libraries are prepared for non-unique RTTI. 495 bool shouldRTTIBeUnique() const override { return false; } 496 }; 497 498 class FuchsiaCXXABI final : public ItaniumCXXABI { 499 public: 500 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM) 501 : ItaniumCXXABI(CGM) {} 502 503 private: 504 bool HasThisReturn(GlobalDecl GD) const override { 505 return isa<CXXConstructorDecl>(GD.getDecl()) || 506 (isa<CXXDestructorDecl>(GD.getDecl()) && 507 GD.getDtorType() != Dtor_Deleting); 508 } 509 }; 510 511 class WebAssemblyCXXABI final : public ItaniumCXXABI { 512 public: 513 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM) 514 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 515 /*UseARMGuardVarABI=*/true) {} 516 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; 517 518 private: 519 bool HasThisReturn(GlobalDecl GD) const override { 520 return isa<CXXConstructorDecl>(GD.getDecl()) || 521 (isa<CXXDestructorDecl>(GD.getDecl()) && 522 GD.getDtorType() != Dtor_Deleting); 523 } 524 bool canCallMismatchedFunctionType() const override { return false; } 525 }; 526 527 class XLCXXABI final : public ItaniumCXXABI { 528 public: 529 explicit XLCXXABI(CodeGen::CodeGenModule &CGM) 530 : ItaniumCXXABI(CGM) {} 531 532 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 533 llvm::FunctionCallee dtor, 534 llvm::Constant *addr) override; 535 536 bool useSinitAndSterm() const override { return true; } 537 538 private: 539 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub, 540 llvm::Constant *addr); 541 }; 542 } 543 544 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) { 545 switch (CGM.getTarget().getCXXABI().getKind()) { 546 // For IR-generation purposes, there's no significant difference 547 // between the ARM and iOS ABIs. 548 case TargetCXXABI::GenericARM: 549 case TargetCXXABI::iOS: 550 case TargetCXXABI::WatchOS: 551 return new ARMCXXABI(CGM); 552 553 case TargetCXXABI::iOS64: 554 return new iOS64CXXABI(CGM); 555 556 case TargetCXXABI::Fuchsia: 557 return new FuchsiaCXXABI(CGM); 558 559 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't 560 // include the other 32-bit ARM oddities: constructor/destructor return values 561 // and array cookies. 562 case TargetCXXABI::GenericAArch64: 563 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 564 /*UseARMGuardVarABI=*/true); 565 566 case TargetCXXABI::GenericMIPS: 567 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true); 568 569 case TargetCXXABI::WebAssembly: 570 return new WebAssemblyCXXABI(CGM); 571 572 case TargetCXXABI::XL: 573 return new XLCXXABI(CGM); 574 575 case TargetCXXABI::GenericItanium: 576 if (CGM.getContext().getTargetInfo().getTriple().getArch() 577 == llvm::Triple::le32) { 578 // For PNaCl, use ARM-style method pointers so that PNaCl code 579 // does not assume anything about the alignment of function 580 // pointers. 581 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true); 582 } 583 return new ItaniumCXXABI(CGM); 584 585 case TargetCXXABI::Microsoft: 586 llvm_unreachable("Microsoft ABI is not Itanium-based"); 587 } 588 llvm_unreachable("bad ABI kind"); 589 } 590 591 llvm::Type * 592 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) { 593 if (MPT->isMemberDataPointer()) 594 return CGM.PtrDiffTy; 595 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy); 596 } 597 598 /// In the Itanium and ARM ABIs, method pointers have the form: 599 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr; 600 /// 601 /// In the Itanium ABI: 602 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero 603 /// - the this-adjustment is (memptr.adj) 604 /// - the virtual offset is (memptr.ptr - 1) 605 /// 606 /// In the ARM ABI: 607 /// - method pointers are virtual if (memptr.adj & 1) is nonzero 608 /// - the this-adjustment is (memptr.adj >> 1) 609 /// - the virtual offset is (memptr.ptr) 610 /// ARM uses 'adj' for the virtual flag because Thumb functions 611 /// may be only single-byte aligned. 612 /// 613 /// If the member is virtual, the adjusted 'this' pointer points 614 /// to a vtable pointer from which the virtual offset is applied. 615 /// 616 /// If the member is non-virtual, memptr.ptr is the address of 617 /// the function to call. 618 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer( 619 CodeGenFunction &CGF, const Expr *E, Address ThisAddr, 620 llvm::Value *&ThisPtrForCall, 621 llvm::Value *MemFnPtr, const MemberPointerType *MPT) { 622 CGBuilderTy &Builder = CGF.Builder; 623 624 const FunctionProtoType *FPT = 625 MPT->getPointeeType()->getAs<FunctionProtoType>(); 626 auto *RD = 627 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl()); 628 629 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType( 630 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr)); 631 632 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1); 633 634 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual"); 635 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual"); 636 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end"); 637 638 // Extract memptr.adj, which is in the second field. 639 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj"); 640 641 // Compute the true adjustment. 642 llvm::Value *Adj = RawAdj; 643 if (UseARMMethodPtrABI) 644 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted"); 645 646 // Apply the adjustment and cast back to the original struct type 647 // for consistency. 648 llvm::Value *This = ThisAddr.getPointer(); 649 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy()); 650 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj); 651 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted"); 652 ThisPtrForCall = This; 653 654 // Load the function pointer. 655 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr"); 656 657 // If the LSB in the function pointer is 1, the function pointer points to 658 // a virtual function. 659 llvm::Value *IsVirtual; 660 if (UseARMMethodPtrABI) 661 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1); 662 else 663 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1); 664 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual"); 665 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual); 666 667 // In the virtual path, the adjustment left 'This' pointing to the 668 // vtable of the correct base subobject. The "function pointer" is an 669 // offset within the vtable (+1 for the virtual flag on non-ARM). 670 CGF.EmitBlock(FnVirtual); 671 672 // Cast the adjusted this to a pointer to vtable pointer and load. 673 llvm::Type *VTableTy = Builder.getInt8PtrTy(); 674 CharUnits VTablePtrAlign = 675 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD, 676 CGF.getPointerAlign()); 677 llvm::Value *VTable = 678 CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD); 679 680 // Apply the offset. 681 // On ARM64, to reserve extra space in virtual member function pointers, 682 // we only pay attention to the low 32 bits of the offset. 683 llvm::Value *VTableOffset = FnAsInt; 684 if (!UseARMMethodPtrABI) 685 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1); 686 if (Use32BitVTableOffsetABI) { 687 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty); 688 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy); 689 } 690 691 // Check the address of the function pointer if CFI on member function 692 // pointers is enabled. 693 llvm::Constant *CheckSourceLocation; 694 llvm::Constant *CheckTypeDesc; 695 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) && 696 CGM.HasHiddenLTOVisibility(RD); 697 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination && 698 CGM.HasHiddenLTOVisibility(RD); 699 bool ShouldEmitWPDInfo = 700 CGM.getCodeGenOpts().WholeProgramVTables && 701 // Don't insert type tests if we are forcing public std visibility. 702 !CGM.HasLTOVisibilityPublicStd(RD); 703 llvm::Value *VirtualFn = nullptr; 704 705 { 706 CodeGenFunction::SanitizerScope SanScope(&CGF); 707 llvm::Value *TypeId = nullptr; 708 llvm::Value *CheckResult = nullptr; 709 710 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) { 711 // If doing CFI, VFE or WPD, we will need the metadata node to check 712 // against. 713 llvm::Metadata *MD = 714 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0)); 715 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD); 716 } 717 718 if (ShouldEmitVFEInfo) { 719 llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset); 720 721 // If doing VFE, load from the vtable with a type.checked.load intrinsic 722 // call. Note that we use the GEP to calculate the address to load from 723 // and pass 0 as the offset to the intrinsic. This is because every 724 // vtable slot of the correct type is marked with matching metadata, and 725 // we know that the load must be from one of these slots. 726 llvm::Value *CheckedLoad = Builder.CreateCall( 727 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load), 728 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId}); 729 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1); 730 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0); 731 VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(), 732 "memptr.virtualfn"); 733 } else { 734 // When not doing VFE, emit a normal load, as it allows more 735 // optimisations than type.checked.load. 736 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) { 737 llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset); 738 CheckResult = Builder.CreateCall( 739 CGM.getIntrinsic(llvm::Intrinsic::type_test), 740 {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId}); 741 } 742 743 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 744 VirtualFn = CGF.Builder.CreateCall( 745 CGM.getIntrinsic(llvm::Intrinsic::load_relative, 746 {VTableOffset->getType()}), 747 {VTable, VTableOffset}); 748 VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo()); 749 } else { 750 llvm::Value *VFPAddr = CGF.Builder.CreateGEP(VTable, VTableOffset); 751 VFPAddr = CGF.Builder.CreateBitCast( 752 VFPAddr, FTy->getPointerTo()->getPointerTo()); 753 VirtualFn = CGF.Builder.CreateAlignedLoad( 754 VFPAddr, CGF.getPointerAlign(), "memptr.virtualfn"); 755 } 756 } 757 assert(VirtualFn && "Virtual fuction pointer not created!"); 758 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo || 759 CheckResult) && 760 "Check result required but not created!"); 761 762 if (ShouldEmitCFICheck) { 763 // If doing CFI, emit the check. 764 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc()); 765 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0)); 766 llvm::Constant *StaticData[] = { 767 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall), 768 CheckSourceLocation, 769 CheckTypeDesc, 770 }; 771 772 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) { 773 CGF.EmitTrapCheck(CheckResult); 774 } else { 775 llvm::Value *AllVtables = llvm::MetadataAsValue::get( 776 CGM.getLLVMContext(), 777 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables")); 778 llvm::Value *ValidVtable = Builder.CreateCall( 779 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables}); 780 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall), 781 SanitizerHandler::CFICheckFail, StaticData, 782 {VTable, ValidVtable}); 783 } 784 785 FnVirtual = Builder.GetInsertBlock(); 786 } 787 } // End of sanitizer scope 788 789 CGF.EmitBranch(FnEnd); 790 791 // In the non-virtual path, the function pointer is actually a 792 // function pointer. 793 CGF.EmitBlock(FnNonVirtual); 794 llvm::Value *NonVirtualFn = 795 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn"); 796 797 // Check the function pointer if CFI on member function pointers is enabled. 798 if (ShouldEmitCFICheck) { 799 CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl(); 800 if (RD->hasDefinition()) { 801 CodeGenFunction::SanitizerScope SanScope(&CGF); 802 803 llvm::Constant *StaticData[] = { 804 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall), 805 CheckSourceLocation, 806 CheckTypeDesc, 807 }; 808 809 llvm::Value *Bit = Builder.getFalse(); 810 llvm::Value *CastedNonVirtualFn = 811 Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy); 812 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) { 813 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType( 814 getContext().getMemberPointerType( 815 MPT->getPointeeType(), 816 getContext().getRecordType(Base).getTypePtr())); 817 llvm::Value *TypeId = 818 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD); 819 820 llvm::Value *TypeTest = 821 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test), 822 {CastedNonVirtualFn, TypeId}); 823 Bit = Builder.CreateOr(Bit, TypeTest); 824 } 825 826 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall), 827 SanitizerHandler::CFICheckFail, StaticData, 828 {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)}); 829 830 FnNonVirtual = Builder.GetInsertBlock(); 831 } 832 } 833 834 // We're done. 835 CGF.EmitBlock(FnEnd); 836 llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2); 837 CalleePtr->addIncoming(VirtualFn, FnVirtual); 838 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual); 839 840 CGCallee Callee(FPT, CalleePtr); 841 return Callee; 842 } 843 844 /// Compute an l-value by applying the given pointer-to-member to a 845 /// base object. 846 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress( 847 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr, 848 const MemberPointerType *MPT) { 849 assert(MemPtr->getType() == CGM.PtrDiffTy); 850 851 CGBuilderTy &Builder = CGF.Builder; 852 853 // Cast to char*. 854 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty); 855 856 // Apply the offset, which we assume is non-null. 857 llvm::Value *Addr = 858 Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset"); 859 860 // Cast the address to the appropriate pointer type, adopting the 861 // address space of the base pointer. 862 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType()) 863 ->getPointerTo(Base.getAddressSpace()); 864 return Builder.CreateBitCast(Addr, PType); 865 } 866 867 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer 868 /// conversion. 869 /// 870 /// Bitcast conversions are always a no-op under Itanium. 871 /// 872 /// Obligatory offset/adjustment diagram: 873 /// <-- offset --> <-- adjustment --> 874 /// |--------------------------|----------------------|--------------------| 875 /// ^Derived address point ^Base address point ^Member address point 876 /// 877 /// So when converting a base member pointer to a derived member pointer, 878 /// we add the offset to the adjustment because the address point has 879 /// decreased; and conversely, when converting a derived MP to a base MP 880 /// we subtract the offset from the adjustment because the address point 881 /// has increased. 882 /// 883 /// The standard forbids (at compile time) conversion to and from 884 /// virtual bases, which is why we don't have to consider them here. 885 /// 886 /// The standard forbids (at run time) casting a derived MP to a base 887 /// MP when the derived MP does not point to a member of the base. 888 /// This is why -1 is a reasonable choice for null data member 889 /// pointers. 890 llvm::Value * 891 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF, 892 const CastExpr *E, 893 llvm::Value *src) { 894 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 895 E->getCastKind() == CK_BaseToDerivedMemberPointer || 896 E->getCastKind() == CK_ReinterpretMemberPointer); 897 898 // Under Itanium, reinterprets don't require any additional processing. 899 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 900 901 // Use constant emission if we can. 902 if (isa<llvm::Constant>(src)) 903 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src)); 904 905 llvm::Constant *adj = getMemberPointerAdjustment(E); 906 if (!adj) return src; 907 908 CGBuilderTy &Builder = CGF.Builder; 909 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 910 911 const MemberPointerType *destTy = 912 E->getType()->castAs<MemberPointerType>(); 913 914 // For member data pointers, this is just a matter of adding the 915 // offset if the source is non-null. 916 if (destTy->isMemberDataPointer()) { 917 llvm::Value *dst; 918 if (isDerivedToBase) 919 dst = Builder.CreateNSWSub(src, adj, "adj"); 920 else 921 dst = Builder.CreateNSWAdd(src, adj, "adj"); 922 923 // Null check. 924 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType()); 925 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull"); 926 return Builder.CreateSelect(isNull, src, dst); 927 } 928 929 // The this-adjustment is left-shifted by 1 on ARM. 930 if (UseARMMethodPtrABI) { 931 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 932 offset <<= 1; 933 adj = llvm::ConstantInt::get(adj->getType(), offset); 934 } 935 936 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj"); 937 llvm::Value *dstAdj; 938 if (isDerivedToBase) 939 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj"); 940 else 941 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj"); 942 943 return Builder.CreateInsertValue(src, dstAdj, 1); 944 } 945 946 llvm::Constant * 947 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E, 948 llvm::Constant *src) { 949 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 950 E->getCastKind() == CK_BaseToDerivedMemberPointer || 951 E->getCastKind() == CK_ReinterpretMemberPointer); 952 953 // Under Itanium, reinterprets don't require any additional processing. 954 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 955 956 // If the adjustment is trivial, we don't need to do anything. 957 llvm::Constant *adj = getMemberPointerAdjustment(E); 958 if (!adj) return src; 959 960 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 961 962 const MemberPointerType *destTy = 963 E->getType()->castAs<MemberPointerType>(); 964 965 // For member data pointers, this is just a matter of adding the 966 // offset if the source is non-null. 967 if (destTy->isMemberDataPointer()) { 968 // null maps to null. 969 if (src->isAllOnesValue()) return src; 970 971 if (isDerivedToBase) 972 return llvm::ConstantExpr::getNSWSub(src, adj); 973 else 974 return llvm::ConstantExpr::getNSWAdd(src, adj); 975 } 976 977 // The this-adjustment is left-shifted by 1 on ARM. 978 if (UseARMMethodPtrABI) { 979 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 980 offset <<= 1; 981 adj = llvm::ConstantInt::get(adj->getType(), offset); 982 } 983 984 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1); 985 llvm::Constant *dstAdj; 986 if (isDerivedToBase) 987 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj); 988 else 989 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj); 990 991 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1); 992 } 993 994 llvm::Constant * 995 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) { 996 // Itanium C++ ABI 2.3: 997 // A NULL pointer is represented as -1. 998 if (MPT->isMemberDataPointer()) 999 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true); 1000 1001 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0); 1002 llvm::Constant *Values[2] = { Zero, Zero }; 1003 return llvm::ConstantStruct::getAnon(Values); 1004 } 1005 1006 llvm::Constant * 1007 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT, 1008 CharUnits offset) { 1009 // Itanium C++ ABI 2.3: 1010 // A pointer to data member is an offset from the base address of 1011 // the class object containing it, represented as a ptrdiff_t 1012 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()); 1013 } 1014 1015 llvm::Constant * 1016 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) { 1017 return BuildMemberPointer(MD, CharUnits::Zero()); 1018 } 1019 1020 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, 1021 CharUnits ThisAdjustment) { 1022 assert(MD->isInstance() && "Member function must not be static!"); 1023 1024 CodeGenTypes &Types = CGM.getTypes(); 1025 1026 // Get the function pointer (or index if this is a virtual function). 1027 llvm::Constant *MemPtr[2]; 1028 if (MD->isVirtual()) { 1029 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD); 1030 uint64_t VTableOffset; 1031 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1032 // Multiply by 4-byte relative offsets. 1033 VTableOffset = Index * 4; 1034 } else { 1035 const ASTContext &Context = getContext(); 1036 CharUnits PointerWidth = Context.toCharUnitsFromBits( 1037 Context.getTargetInfo().getPointerWidth(0)); 1038 VTableOffset = Index * PointerWidth.getQuantity(); 1039 } 1040 1041 if (UseARMMethodPtrABI) { 1042 // ARM C++ ABI 3.2.1: 1043 // This ABI specifies that adj contains twice the this 1044 // adjustment, plus 1 if the member function is virtual. The 1045 // least significant bit of adj then makes exactly the same 1046 // discrimination as the least significant bit of ptr does for 1047 // Itanium. 1048 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset); 1049 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 1050 2 * ThisAdjustment.getQuantity() + 1); 1051 } else { 1052 // Itanium C++ ABI 2.3: 1053 // For a virtual function, [the pointer field] is 1 plus the 1054 // virtual table offset (in bytes) of the function, 1055 // represented as a ptrdiff_t. 1056 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1); 1057 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 1058 ThisAdjustment.getQuantity()); 1059 } 1060 } else { 1061 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 1062 llvm::Type *Ty; 1063 // Check whether the function has a computable LLVM signature. 1064 if (Types.isFuncTypeConvertible(FPT)) { 1065 // The function has a computable LLVM signature; use the correct type. 1066 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD)); 1067 } else { 1068 // Use an arbitrary non-function type to tell GetAddrOfFunction that the 1069 // function type is incomplete. 1070 Ty = CGM.PtrDiffTy; 1071 } 1072 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty); 1073 1074 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy); 1075 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 1076 (UseARMMethodPtrABI ? 2 : 1) * 1077 ThisAdjustment.getQuantity()); 1078 } 1079 1080 return llvm::ConstantStruct::getAnon(MemPtr); 1081 } 1082 1083 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP, 1084 QualType MPType) { 1085 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>(); 1086 const ValueDecl *MPD = MP.getMemberPointerDecl(); 1087 if (!MPD) 1088 return EmitNullMemberPointer(MPT); 1089 1090 CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP); 1091 1092 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) 1093 return BuildMemberPointer(MD, ThisAdjustment); 1094 1095 CharUnits FieldOffset = 1096 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD)); 1097 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset); 1098 } 1099 1100 /// The comparison algorithm is pretty easy: the member pointers are 1101 /// the same if they're either bitwise identical *or* both null. 1102 /// 1103 /// ARM is different here only because null-ness is more complicated. 1104 llvm::Value * 1105 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF, 1106 llvm::Value *L, 1107 llvm::Value *R, 1108 const MemberPointerType *MPT, 1109 bool Inequality) { 1110 CGBuilderTy &Builder = CGF.Builder; 1111 1112 llvm::ICmpInst::Predicate Eq; 1113 llvm::Instruction::BinaryOps And, Or; 1114 if (Inequality) { 1115 Eq = llvm::ICmpInst::ICMP_NE; 1116 And = llvm::Instruction::Or; 1117 Or = llvm::Instruction::And; 1118 } else { 1119 Eq = llvm::ICmpInst::ICMP_EQ; 1120 And = llvm::Instruction::And; 1121 Or = llvm::Instruction::Or; 1122 } 1123 1124 // Member data pointers are easy because there's a unique null 1125 // value, so it just comes down to bitwise equality. 1126 if (MPT->isMemberDataPointer()) 1127 return Builder.CreateICmp(Eq, L, R); 1128 1129 // For member function pointers, the tautologies are more complex. 1130 // The Itanium tautology is: 1131 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj)) 1132 // The ARM tautology is: 1133 // (L == R) <==> (L.ptr == R.ptr && 1134 // (L.adj == R.adj || 1135 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0))) 1136 // The inequality tautologies have exactly the same structure, except 1137 // applying De Morgan's laws. 1138 1139 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr"); 1140 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr"); 1141 1142 // This condition tests whether L.ptr == R.ptr. This must always be 1143 // true for equality to hold. 1144 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr"); 1145 1146 // This condition, together with the assumption that L.ptr == R.ptr, 1147 // tests whether the pointers are both null. ARM imposes an extra 1148 // condition. 1149 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType()); 1150 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null"); 1151 1152 // This condition tests whether L.adj == R.adj. If this isn't 1153 // true, the pointers are unequal unless they're both null. 1154 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj"); 1155 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj"); 1156 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj"); 1157 1158 // Null member function pointers on ARM clear the low bit of Adj, 1159 // so the zero condition has to check that neither low bit is set. 1160 if (UseARMMethodPtrABI) { 1161 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1); 1162 1163 // Compute (l.adj | r.adj) & 1 and test it against zero. 1164 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj"); 1165 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One); 1166 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero, 1167 "cmp.or.adj"); 1168 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero); 1169 } 1170 1171 // Tie together all our conditions. 1172 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq); 1173 Result = Builder.CreateBinOp(And, PtrEq, Result, 1174 Inequality ? "memptr.ne" : "memptr.eq"); 1175 return Result; 1176 } 1177 1178 llvm::Value * 1179 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 1180 llvm::Value *MemPtr, 1181 const MemberPointerType *MPT) { 1182 CGBuilderTy &Builder = CGF.Builder; 1183 1184 /// For member data pointers, this is just a check against -1. 1185 if (MPT->isMemberDataPointer()) { 1186 assert(MemPtr->getType() == CGM.PtrDiffTy); 1187 llvm::Value *NegativeOne = 1188 llvm::Constant::getAllOnesValue(MemPtr->getType()); 1189 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool"); 1190 } 1191 1192 // In Itanium, a member function pointer is not null if 'ptr' is not null. 1193 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr"); 1194 1195 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0); 1196 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool"); 1197 1198 // On ARM, a member function pointer is also non-null if the low bit of 'adj' 1199 // (the virtual bit) is set. 1200 if (UseARMMethodPtrABI) { 1201 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1); 1202 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj"); 1203 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit"); 1204 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero, 1205 "memptr.isvirtual"); 1206 Result = Builder.CreateOr(Result, IsVirtual); 1207 } 1208 1209 return Result; 1210 } 1211 1212 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const { 1213 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl(); 1214 if (!RD) 1215 return false; 1216 1217 // If C++ prohibits us from making a copy, return by address. 1218 if (!RD->canPassInRegisters()) { 1219 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType()); 1220 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 1221 return true; 1222 } 1223 return false; 1224 } 1225 1226 /// The Itanium ABI requires non-zero initialization only for data 1227 /// member pointers, for which '0' is a valid offset. 1228 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) { 1229 return MPT->isMemberFunctionPointer(); 1230 } 1231 1232 /// The Itanium ABI always places an offset to the complete object 1233 /// at entry -2 in the vtable. 1234 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF, 1235 const CXXDeleteExpr *DE, 1236 Address Ptr, 1237 QualType ElementType, 1238 const CXXDestructorDecl *Dtor) { 1239 bool UseGlobalDelete = DE->isGlobalDelete(); 1240 if (UseGlobalDelete) { 1241 // Derive the complete-object pointer, which is what we need 1242 // to pass to the deallocation function. 1243 1244 // Grab the vtable pointer as an intptr_t*. 1245 auto *ClassDecl = 1246 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl()); 1247 llvm::Value *VTable = 1248 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl); 1249 1250 // Track back to entry -2 and pull out the offset there. 1251 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64( 1252 VTable, -2, "complete-offset.ptr"); 1253 llvm::Value *Offset = 1254 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign()); 1255 1256 // Apply the offset. 1257 llvm::Value *CompletePtr = 1258 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy); 1259 CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset); 1260 1261 // If we're supposed to call the global delete, make sure we do so 1262 // even if the destructor throws. 1263 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr, 1264 ElementType); 1265 } 1266 1267 // FIXME: Provide a source location here even though there's no 1268 // CXXMemberCallExpr for dtor call. 1269 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting; 1270 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE); 1271 1272 if (UseGlobalDelete) 1273 CGF.PopCleanupBlock(); 1274 } 1275 1276 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) { 1277 // void __cxa_rethrow(); 1278 1279 llvm::FunctionType *FTy = 1280 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 1281 1282 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow"); 1283 1284 if (isNoReturn) 1285 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None); 1286 else 1287 CGF.EmitRuntimeCallOrInvoke(Fn); 1288 } 1289 1290 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) { 1291 // void *__cxa_allocate_exception(size_t thrown_size); 1292 1293 llvm::FunctionType *FTy = 1294 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false); 1295 1296 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception"); 1297 } 1298 1299 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) { 1300 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo, 1301 // void (*dest) (void *)); 1302 1303 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy }; 1304 llvm::FunctionType *FTy = 1305 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false); 1306 1307 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw"); 1308 } 1309 1310 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) { 1311 QualType ThrowType = E->getSubExpr()->getType(); 1312 // Now allocate the exception object. 1313 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType()); 1314 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity(); 1315 1316 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM); 1317 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall( 1318 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception"); 1319 1320 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment(); 1321 CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign)); 1322 1323 // Now throw the exception. 1324 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType, 1325 /*ForEH=*/true); 1326 1327 // The address of the destructor. If the exception type has a 1328 // trivial destructor (or isn't a record), we just pass null. 1329 llvm::Constant *Dtor = nullptr; 1330 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) { 1331 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl()); 1332 if (!Record->hasTrivialDestructor()) { 1333 CXXDestructorDecl *DtorD = Record->getDestructor(); 1334 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete)); 1335 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy); 1336 } 1337 } 1338 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy); 1339 1340 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor }; 1341 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args); 1342 } 1343 1344 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) { 1345 // void *__dynamic_cast(const void *sub, 1346 // const abi::__class_type_info *src, 1347 // const abi::__class_type_info *dst, 1348 // std::ptrdiff_t src2dst_offset); 1349 1350 llvm::Type *Int8PtrTy = CGF.Int8PtrTy; 1351 llvm::Type *PtrDiffTy = 1352 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1353 1354 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy }; 1355 1356 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false); 1357 1358 // Mark the function as nounwind readonly. 1359 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind, 1360 llvm::Attribute::ReadOnly }; 1361 llvm::AttributeList Attrs = llvm::AttributeList::get( 1362 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs); 1363 1364 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs); 1365 } 1366 1367 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) { 1368 // void __cxa_bad_cast(); 1369 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1370 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast"); 1371 } 1372 1373 /// Compute the src2dst_offset hint as described in the 1374 /// Itanium C++ ABI [2.9.7] 1375 static CharUnits computeOffsetHint(ASTContext &Context, 1376 const CXXRecordDecl *Src, 1377 const CXXRecordDecl *Dst) { 1378 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, 1379 /*DetectVirtual=*/false); 1380 1381 // If Dst is not derived from Src we can skip the whole computation below and 1382 // return that Src is not a public base of Dst. Record all inheritance paths. 1383 if (!Dst->isDerivedFrom(Src, Paths)) 1384 return CharUnits::fromQuantity(-2ULL); 1385 1386 unsigned NumPublicPaths = 0; 1387 CharUnits Offset; 1388 1389 // Now walk all possible inheritance paths. 1390 for (const CXXBasePath &Path : Paths) { 1391 if (Path.Access != AS_public) // Ignore non-public inheritance. 1392 continue; 1393 1394 ++NumPublicPaths; 1395 1396 for (const CXXBasePathElement &PathElement : Path) { 1397 // If the path contains a virtual base class we can't give any hint. 1398 // -1: no hint. 1399 if (PathElement.Base->isVirtual()) 1400 return CharUnits::fromQuantity(-1ULL); 1401 1402 if (NumPublicPaths > 1) // Won't use offsets, skip computation. 1403 continue; 1404 1405 // Accumulate the base class offsets. 1406 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class); 1407 Offset += L.getBaseClassOffset( 1408 PathElement.Base->getType()->getAsCXXRecordDecl()); 1409 } 1410 } 1411 1412 // -2: Src is not a public base of Dst. 1413 if (NumPublicPaths == 0) 1414 return CharUnits::fromQuantity(-2ULL); 1415 1416 // -3: Src is a multiple public base type but never a virtual base type. 1417 if (NumPublicPaths > 1) 1418 return CharUnits::fromQuantity(-3ULL); 1419 1420 // Otherwise, the Src type is a unique public nonvirtual base type of Dst. 1421 // Return the offset of Src from the origin of Dst. 1422 return Offset; 1423 } 1424 1425 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) { 1426 // void __cxa_bad_typeid(); 1427 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1428 1429 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); 1430 } 1431 1432 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref, 1433 QualType SrcRecordTy) { 1434 return IsDeref; 1435 } 1436 1437 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) { 1438 llvm::FunctionCallee Fn = getBadTypeidFn(CGF); 1439 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn); 1440 Call->setDoesNotReturn(); 1441 CGF.Builder.CreateUnreachable(); 1442 } 1443 1444 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF, 1445 QualType SrcRecordTy, 1446 Address ThisPtr, 1447 llvm::Type *StdTypeInfoPtrTy) { 1448 auto *ClassDecl = 1449 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl()); 1450 llvm::Value *Value = 1451 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl); 1452 1453 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1454 // Load the type info. 1455 Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy); 1456 Value = CGF.Builder.CreateCall( 1457 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}), 1458 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)}); 1459 1460 // Setup to dereference again since this is a proxy we accessed. 1461 Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo()); 1462 } else { 1463 // Load the type info. 1464 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL); 1465 } 1466 return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign()); 1467 } 1468 1469 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, 1470 QualType SrcRecordTy) { 1471 return SrcIsPtr; 1472 } 1473 1474 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall( 1475 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy, 1476 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) { 1477 llvm::Type *PtrDiffLTy = 1478 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1479 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1480 1481 llvm::Value *SrcRTTI = 1482 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType()); 1483 llvm::Value *DestRTTI = 1484 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType()); 1485 1486 // Compute the offset hint. 1487 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); 1488 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl(); 1489 llvm::Value *OffsetHint = llvm::ConstantInt::get( 1490 PtrDiffLTy, 1491 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity()); 1492 1493 // Emit the call to __dynamic_cast. 1494 llvm::Value *Value = ThisAddr.getPointer(); 1495 Value = CGF.EmitCastToVoidPtr(Value); 1496 1497 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint}; 1498 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args); 1499 Value = CGF.Builder.CreateBitCast(Value, DestLTy); 1500 1501 /// C++ [expr.dynamic.cast]p9: 1502 /// A failed cast to reference type throws std::bad_cast 1503 if (DestTy->isReferenceType()) { 1504 llvm::BasicBlock *BadCastBlock = 1505 CGF.createBasicBlock("dynamic_cast.bad_cast"); 1506 1507 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value); 1508 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd); 1509 1510 CGF.EmitBlock(BadCastBlock); 1511 EmitBadCastCall(CGF); 1512 } 1513 1514 return Value; 1515 } 1516 1517 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, 1518 Address ThisAddr, 1519 QualType SrcRecordTy, 1520 QualType DestTy) { 1521 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1522 auto *ClassDecl = 1523 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl()); 1524 llvm::Value *OffsetToTop; 1525 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1526 // Get the vtable pointer. 1527 llvm::Value *VTable = 1528 CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl); 1529 1530 // Get the offset-to-top from the vtable. 1531 OffsetToTop = 1532 CGF.Builder.CreateConstInBoundsGEP1_32(/*Type=*/nullptr, VTable, -2U); 1533 OffsetToTop = CGF.Builder.CreateAlignedLoad( 1534 OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top"); 1535 } else { 1536 llvm::Type *PtrDiffLTy = 1537 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1538 1539 // Get the vtable pointer. 1540 llvm::Value *VTable = 1541 CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl); 1542 1543 // Get the offset-to-top from the vtable. 1544 OffsetToTop = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL); 1545 OffsetToTop = CGF.Builder.CreateAlignedLoad( 1546 OffsetToTop, CGF.getPointerAlign(), "offset.to.top"); 1547 } 1548 // Finally, add the offset to the pointer. 1549 llvm::Value *Value = ThisAddr.getPointer(); 1550 Value = CGF.EmitCastToVoidPtr(Value); 1551 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop); 1552 return CGF.Builder.CreateBitCast(Value, DestLTy); 1553 } 1554 1555 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) { 1556 llvm::FunctionCallee Fn = getBadCastFn(CGF); 1557 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn); 1558 Call->setDoesNotReturn(); 1559 CGF.Builder.CreateUnreachable(); 1560 return true; 1561 } 1562 1563 llvm::Value * 1564 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF, 1565 Address This, 1566 const CXXRecordDecl *ClassDecl, 1567 const CXXRecordDecl *BaseClassDecl) { 1568 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl); 1569 CharUnits VBaseOffsetOffset = 1570 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl, 1571 BaseClassDecl); 1572 llvm::Value *VBaseOffsetPtr = 1573 CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(), 1574 "vbase.offset.ptr"); 1575 1576 llvm::Value *VBaseOffset; 1577 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1578 VBaseOffsetPtr = 1579 CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo()); 1580 VBaseOffset = CGF.Builder.CreateAlignedLoad( 1581 VBaseOffsetPtr, CharUnits::fromQuantity(4), "vbase.offset"); 1582 } else { 1583 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr, 1584 CGM.PtrDiffTy->getPointerTo()); 1585 VBaseOffset = CGF.Builder.CreateAlignedLoad( 1586 VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset"); 1587 } 1588 return VBaseOffset; 1589 } 1590 1591 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) { 1592 // Just make sure we're in sync with TargetCXXABI. 1593 assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); 1594 1595 // The constructor used for constructing this as a base class; 1596 // ignores virtual bases. 1597 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base)); 1598 1599 // The constructor used for constructing this as a complete class; 1600 // constructs the virtual bases, then calls the base constructor. 1601 if (!D->getParent()->isAbstract()) { 1602 // We don't need to emit the complete ctor if the class is abstract. 1603 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete)); 1604 } 1605 } 1606 1607 CGCXXABI::AddedStructorArgCounts 1608 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD, 1609 SmallVectorImpl<CanQualType> &ArgTys) { 1610 ASTContext &Context = getContext(); 1611 1612 // All parameters are already in place except VTT, which goes after 'this'. 1613 // These are Clang types, so we don't need to worry about sret yet. 1614 1615 // Check if we need to add a VTT parameter (which has type void **). 1616 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base 1617 : GD.getDtorType() == Dtor_Base) && 1618 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) { 1619 ArgTys.insert(ArgTys.begin() + 1, 1620 Context.getPointerType(Context.VoidPtrTy)); 1621 return AddedStructorArgCounts::prefix(1); 1622 } 1623 return AddedStructorArgCounts{}; 1624 } 1625 1626 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) { 1627 // The destructor used for destructing this as a base class; ignores 1628 // virtual bases. 1629 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base)); 1630 1631 // The destructor used for destructing this as a most-derived class; 1632 // call the base destructor and then destructs any virtual bases. 1633 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete)); 1634 1635 // The destructor in a virtual table is always a 'deleting' 1636 // destructor, which calls the complete destructor and then uses the 1637 // appropriate operator delete. 1638 if (D->isVirtual()) 1639 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting)); 1640 } 1641 1642 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF, 1643 QualType &ResTy, 1644 FunctionArgList &Params) { 1645 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl()); 1646 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)); 1647 1648 // Check if we need a VTT parameter as well. 1649 if (NeedsVTTParameter(CGF.CurGD)) { 1650 ASTContext &Context = getContext(); 1651 1652 // FIXME: avoid the fake decl 1653 QualType T = Context.getPointerType(Context.VoidPtrTy); 1654 auto *VTTDecl = ImplicitParamDecl::Create( 1655 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"), 1656 T, ImplicitParamDecl::CXXVTT); 1657 Params.insert(Params.begin() + 1, VTTDecl); 1658 getStructorImplicitParamDecl(CGF) = VTTDecl; 1659 } 1660 } 1661 1662 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { 1663 // Naked functions have no prolog. 1664 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>()) 1665 return; 1666 1667 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue 1668 /// adjustments are required, because they are all handled by thunks. 1669 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF)); 1670 1671 /// Initialize the 'vtt' slot if needed. 1672 if (getStructorImplicitParamDecl(CGF)) { 1673 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad( 1674 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt"); 1675 } 1676 1677 /// If this is a function that the ABI specifies returns 'this', initialize 1678 /// the return slot to 'this' at the start of the function. 1679 /// 1680 /// Unlike the setting of return types, this is done within the ABI 1681 /// implementation instead of by clients of CGCXXABI because: 1682 /// 1) getThisValue is currently protected 1683 /// 2) in theory, an ABI could implement 'this' returns some other way; 1684 /// HasThisReturn only specifies a contract, not the implementation 1685 if (HasThisReturn(CGF.CurGD)) 1686 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue); 1687 } 1688 1689 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs( 1690 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, 1691 bool ForVirtualBase, bool Delegating) { 1692 if (!NeedsVTTParameter(GlobalDecl(D, Type))) 1693 return AddedStructorArgs{}; 1694 1695 // Insert the implicit 'vtt' argument as the second argument. 1696 llvm::Value *VTT = 1697 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating); 1698 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); 1699 return AddedStructorArgs::prefix({{VTT, VTTTy}}); 1700 } 1701 1702 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam( 1703 CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, 1704 bool ForVirtualBase, bool Delegating) { 1705 GlobalDecl GD(DD, Type); 1706 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating); 1707 } 1708 1709 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF, 1710 const CXXDestructorDecl *DD, 1711 CXXDtorType Type, bool ForVirtualBase, 1712 bool Delegating, Address This, 1713 QualType ThisTy) { 1714 GlobalDecl GD(DD, Type); 1715 llvm::Value *VTT = 1716 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating); 1717 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); 1718 1719 CGCallee Callee; 1720 if (getContext().getLangOpts().AppleKext && 1721 Type != Dtor_Base && DD->isVirtual()) 1722 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent()); 1723 else 1724 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD); 1725 1726 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy, 1727 nullptr); 1728 } 1729 1730 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT, 1731 const CXXRecordDecl *RD) { 1732 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits()); 1733 if (VTable->hasInitializer()) 1734 return; 1735 1736 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext(); 1737 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD); 1738 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD); 1739 llvm::Constant *RTTI = 1740 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD)); 1741 1742 // Create and set the initializer. 1743 ConstantInitBuilder builder(CGM); 1744 auto components = builder.beginStruct(); 1745 CGVT.createVTableInitializer(components, VTLayout, RTTI, 1746 llvm::GlobalValue::isLocalLinkage(Linkage)); 1747 components.finishAndSetAsInitializer(VTable); 1748 1749 // Set the correct linkage. 1750 VTable->setLinkage(Linkage); 1751 1752 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker()) 1753 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName())); 1754 1755 // Set the right visibility. 1756 CGM.setGVProperties(VTable, RD); 1757 1758 // If this is the magic class __cxxabiv1::__fundamental_type_info, 1759 // we will emit the typeinfo for the fundamental types. This is the 1760 // same behaviour as GCC. 1761 const DeclContext *DC = RD->getDeclContext(); 1762 if (RD->getIdentifier() && 1763 RD->getIdentifier()->isStr("__fundamental_type_info") && 1764 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() && 1765 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") && 1766 DC->getParent()->isTranslationUnit()) 1767 EmitFundamentalRTTIDescriptors(RD); 1768 1769 if (!VTable->isDeclarationForLinker()) 1770 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout); 1771 1772 if (VTContext.isRelativeLayout() && !VTable->isDSOLocal()) 1773 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName()); 1774 } 1775 1776 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField( 1777 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) { 1778 if (Vptr.NearestVBase == nullptr) 1779 return false; 1780 return NeedsVTTParameter(CGF.CurGD); 1781 } 1782 1783 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor( 1784 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, 1785 const CXXRecordDecl *NearestVBase) { 1786 1787 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && 1788 NeedsVTTParameter(CGF.CurGD)) { 1789 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base, 1790 NearestVBase); 1791 } 1792 return getVTableAddressPoint(Base, VTableClass); 1793 } 1794 1795 llvm::Constant * 1796 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, 1797 const CXXRecordDecl *VTableClass) { 1798 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits()); 1799 1800 // Find the appropriate vtable within the vtable group, and the address point 1801 // within that vtable. 1802 VTableLayout::AddressPointLocation AddressPoint = 1803 CGM.getItaniumVTableContext() 1804 .getVTableLayout(VTableClass) 1805 .getAddressPoint(Base); 1806 llvm::Value *Indices[] = { 1807 llvm::ConstantInt::get(CGM.Int32Ty, 0), 1808 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex), 1809 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex), 1810 }; 1811 1812 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable, 1813 Indices, /*InBounds=*/true, 1814 /*InRangeIndex=*/1); 1815 } 1816 1817 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT( 1818 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, 1819 const CXXRecordDecl *NearestVBase) { 1820 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && 1821 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT"); 1822 1823 // Get the secondary vpointer index. 1824 uint64_t VirtualPointerIndex = 1825 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base); 1826 1827 /// Load the VTT. 1828 llvm::Value *VTT = CGF.LoadCXXVTT(); 1829 if (VirtualPointerIndex) 1830 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex); 1831 1832 // And load the address point from the VTT. 1833 return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign()); 1834 } 1835 1836 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr( 1837 BaseSubobject Base, const CXXRecordDecl *VTableClass) { 1838 return getVTableAddressPoint(Base, VTableClass); 1839 } 1840 1841 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, 1842 CharUnits VPtrOffset) { 1843 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets"); 1844 1845 llvm::GlobalVariable *&VTable = VTables[RD]; 1846 if (VTable) 1847 return VTable; 1848 1849 // Queue up this vtable for possible deferred emission. 1850 CGM.addDeferredVTable(RD); 1851 1852 SmallString<256> Name; 1853 llvm::raw_svector_ostream Out(Name); 1854 getMangleContext().mangleCXXVTable(RD, Out); 1855 1856 const VTableLayout &VTLayout = 1857 CGM.getItaniumVTableContext().getVTableLayout(RD); 1858 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout); 1859 1860 // Use pointer alignment for the vtable. Otherwise we would align them based 1861 // on the size of the initializer which doesn't make sense as only single 1862 // values are read. 1863 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout() 1864 ? 32 1865 : CGM.getTarget().getPointerAlign(0); 1866 1867 VTable = CGM.CreateOrReplaceCXXRuntimeVariable( 1868 Name, VTableType, llvm::GlobalValue::ExternalLinkage, 1869 getContext().toCharUnitsFromBits(PAlign).getQuantity()); 1870 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 1871 1872 CGM.setGVProperties(VTable, RD); 1873 1874 return VTable; 1875 } 1876 1877 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF, 1878 GlobalDecl GD, 1879 Address This, 1880 llvm::Type *Ty, 1881 SourceLocation Loc) { 1882 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl()); 1883 llvm::Value *VTable = CGF.GetVTablePtr( 1884 This, Ty->getPointerTo()->getPointerTo(), MethodDecl->getParent()); 1885 1886 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD); 1887 llvm::Value *VFunc; 1888 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) { 1889 VFunc = CGF.EmitVTableTypeCheckedLoad( 1890 MethodDecl->getParent(), VTable, 1891 VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8); 1892 } else { 1893 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc); 1894 1895 llvm::Value *VFuncLoad; 1896 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1897 VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy); 1898 llvm::Value *Load = CGF.Builder.CreateCall( 1899 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}), 1900 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)}); 1901 VFuncLoad = CGF.Builder.CreateBitCast(Load, Ty->getPointerTo()); 1902 } else { 1903 VTable = 1904 CGF.Builder.CreateBitCast(VTable, Ty->getPointerTo()->getPointerTo()); 1905 llvm::Value *VTableSlotPtr = 1906 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn"); 1907 VFuncLoad = 1908 CGF.Builder.CreateAlignedLoad(VTableSlotPtr, CGF.getPointerAlign()); 1909 } 1910 1911 // Add !invariant.load md to virtual function load to indicate that 1912 // function didn't change inside vtable. 1913 // It's safe to add it without -fstrict-vtable-pointers, but it would not 1914 // help in devirtualization because it will only matter if we will have 2 1915 // the same virtual function loads from the same vtable load, which won't 1916 // happen without enabled devirtualization with -fstrict-vtable-pointers. 1917 if (CGM.getCodeGenOpts().OptimizationLevel > 0 && 1918 CGM.getCodeGenOpts().StrictVTablePointers) { 1919 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) { 1920 VFuncLoadInstr->setMetadata( 1921 llvm::LLVMContext::MD_invariant_load, 1922 llvm::MDNode::get(CGM.getLLVMContext(), 1923 llvm::ArrayRef<llvm::Metadata *>())); 1924 } 1925 } 1926 VFunc = VFuncLoad; 1927 } 1928 1929 CGCallee Callee(GD, VFunc); 1930 return Callee; 1931 } 1932 1933 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall( 1934 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType, 1935 Address This, DeleteOrMemberCallExpr E) { 1936 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>(); 1937 auto *D = E.dyn_cast<const CXXDeleteExpr *>(); 1938 assert((CE != nullptr) ^ (D != nullptr)); 1939 assert(CE == nullptr || CE->arg_begin() == CE->arg_end()); 1940 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete); 1941 1942 GlobalDecl GD(Dtor, DtorType); 1943 const CGFunctionInfo *FInfo = 1944 &CGM.getTypes().arrangeCXXStructorDeclaration(GD); 1945 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo); 1946 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty); 1947 1948 QualType ThisTy; 1949 if (CE) { 1950 ThisTy = CE->getObjectType(); 1951 } else { 1952 ThisTy = D->getDestroyedType(); 1953 } 1954 1955 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr, 1956 QualType(), nullptr); 1957 return nullptr; 1958 } 1959 1960 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) { 1961 CodeGenVTables &VTables = CGM.getVTables(); 1962 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD); 1963 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD); 1964 } 1965 1966 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass( 1967 const CXXRecordDecl *RD) const { 1968 // We don't emit available_externally vtables if we are in -fapple-kext mode 1969 // because kext mode does not permit devirtualization. 1970 if (CGM.getLangOpts().AppleKext) 1971 return false; 1972 1973 // If the vtable is hidden then it is not safe to emit an available_externally 1974 // copy of vtable. 1975 if (isVTableHidden(RD)) 1976 return false; 1977 1978 if (CGM.getCodeGenOpts().ForceEmitVTables) 1979 return true; 1980 1981 // If we don't have any not emitted inline virtual function then we are safe 1982 // to emit an available_externally copy of vtable. 1983 // FIXME we can still emit a copy of the vtable if we 1984 // can emit definition of the inline functions. 1985 if (hasAnyUnusedVirtualInlineFunction(RD)) 1986 return false; 1987 1988 // For a class with virtual bases, we must also be able to speculatively 1989 // emit the VTT, because CodeGen doesn't have separate notions of "can emit 1990 // the vtable" and "can emit the VTT". For a base subobject, this means we 1991 // need to be able to emit non-virtual base vtables. 1992 if (RD->getNumVBases()) { 1993 for (const auto &B : RD->bases()) { 1994 auto *BRD = B.getType()->getAsCXXRecordDecl(); 1995 assert(BRD && "no class for base specifier"); 1996 if (B.isVirtual() || !BRD->isDynamicClass()) 1997 continue; 1998 if (!canSpeculativelyEmitVTableAsBaseClass(BRD)) 1999 return false; 2000 } 2001 } 2002 2003 return true; 2004 } 2005 2006 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const { 2007 if (!canSpeculativelyEmitVTableAsBaseClass(RD)) 2008 return false; 2009 2010 // For a complete-object vtable (or more specifically, for the VTT), we need 2011 // to be able to speculatively emit the vtables of all dynamic virtual bases. 2012 for (const auto &B : RD->vbases()) { 2013 auto *BRD = B.getType()->getAsCXXRecordDecl(); 2014 assert(BRD && "no class for base specifier"); 2015 if (!BRD->isDynamicClass()) 2016 continue; 2017 if (!canSpeculativelyEmitVTableAsBaseClass(BRD)) 2018 return false; 2019 } 2020 2021 return true; 2022 } 2023 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF, 2024 Address InitialPtr, 2025 int64_t NonVirtualAdjustment, 2026 int64_t VirtualAdjustment, 2027 bool IsReturnAdjustment) { 2028 if (!NonVirtualAdjustment && !VirtualAdjustment) 2029 return InitialPtr.getPointer(); 2030 2031 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty); 2032 2033 // In a base-to-derived cast, the non-virtual adjustment is applied first. 2034 if (NonVirtualAdjustment && !IsReturnAdjustment) { 2035 V = CGF.Builder.CreateConstInBoundsByteGEP(V, 2036 CharUnits::fromQuantity(NonVirtualAdjustment)); 2037 } 2038 2039 // Perform the virtual adjustment if we have one. 2040 llvm::Value *ResultPtr; 2041 if (VirtualAdjustment) { 2042 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy); 2043 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr); 2044 2045 llvm::Value *Offset; 2046 llvm::Value *OffsetPtr = 2047 CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment); 2048 if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) { 2049 // Load the adjustment offset from the vtable as a 32-bit int. 2050 OffsetPtr = 2051 CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo()); 2052 Offset = 2053 CGF.Builder.CreateAlignedLoad(OffsetPtr, CharUnits::fromQuantity(4)); 2054 } else { 2055 llvm::Type *PtrDiffTy = 2056 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 2057 2058 OffsetPtr = 2059 CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo()); 2060 2061 // Load the adjustment offset from the vtable. 2062 Offset = CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign()); 2063 } 2064 // Adjust our pointer. 2065 ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset); 2066 } else { 2067 ResultPtr = V.getPointer(); 2068 } 2069 2070 // In a derived-to-base conversion, the non-virtual adjustment is 2071 // applied second. 2072 if (NonVirtualAdjustment && IsReturnAdjustment) { 2073 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr, 2074 NonVirtualAdjustment); 2075 } 2076 2077 // Cast back to the original type. 2078 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType()); 2079 } 2080 2081 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF, 2082 Address This, 2083 const ThisAdjustment &TA) { 2084 return performTypeAdjustment(CGF, This, TA.NonVirtual, 2085 TA.Virtual.Itanium.VCallOffsetOffset, 2086 /*IsReturnAdjustment=*/false); 2087 } 2088 2089 llvm::Value * 2090 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret, 2091 const ReturnAdjustment &RA) { 2092 return performTypeAdjustment(CGF, Ret, RA.NonVirtual, 2093 RA.Virtual.Itanium.VBaseOffsetOffset, 2094 /*IsReturnAdjustment=*/true); 2095 } 2096 2097 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF, 2098 RValue RV, QualType ResultType) { 2099 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl())) 2100 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType); 2101 2102 // Destructor thunks in the ARM ABI have indeterminate results. 2103 llvm::Type *T = CGF.ReturnValue.getElementType(); 2104 RValue Undef = RValue::get(llvm::UndefValue::get(T)); 2105 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType); 2106 } 2107 2108 /************************** Array allocation cookies **************************/ 2109 2110 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) { 2111 // The array cookie is a size_t; pad that up to the element alignment. 2112 // The cookie is actually right-justified in that space. 2113 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes), 2114 CGM.getContext().getTypeAlignInChars(elementType)); 2115 } 2116 2117 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 2118 Address NewPtr, 2119 llvm::Value *NumElements, 2120 const CXXNewExpr *expr, 2121 QualType ElementType) { 2122 assert(requiresArrayCookie(expr)); 2123 2124 unsigned AS = NewPtr.getAddressSpace(); 2125 2126 ASTContext &Ctx = getContext(); 2127 CharUnits SizeSize = CGF.getSizeSize(); 2128 2129 // The size of the cookie. 2130 CharUnits CookieSize = 2131 std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType)); 2132 assert(CookieSize == getArrayCookieSizeImpl(ElementType)); 2133 2134 // Compute an offset to the cookie. 2135 Address CookiePtr = NewPtr; 2136 CharUnits CookieOffset = CookieSize - SizeSize; 2137 if (!CookieOffset.isZero()) 2138 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset); 2139 2140 // Write the number of elements into the appropriate slot. 2141 Address NumElementsPtr = 2142 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy); 2143 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr); 2144 2145 // Handle the array cookie specially in ASan. 2146 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 && 2147 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() || 2148 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) { 2149 // The store to the CookiePtr does not need to be instrumented. 2150 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI); 2151 llvm::FunctionType *FTy = 2152 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false); 2153 llvm::FunctionCallee F = 2154 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie"); 2155 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer()); 2156 } 2157 2158 // Finally, compute a pointer to the actual data buffer by skipping 2159 // over the cookie completely. 2160 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize); 2161 } 2162 2163 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 2164 Address allocPtr, 2165 CharUnits cookieSize) { 2166 // The element size is right-justified in the cookie. 2167 Address numElementsPtr = allocPtr; 2168 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize(); 2169 if (!numElementsOffset.isZero()) 2170 numElementsPtr = 2171 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset); 2172 2173 unsigned AS = allocPtr.getAddressSpace(); 2174 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy); 2175 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0) 2176 return CGF.Builder.CreateLoad(numElementsPtr); 2177 // In asan mode emit a function call instead of a regular load and let the 2178 // run-time deal with it: if the shadow is properly poisoned return the 2179 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs. 2180 // We can't simply ignore this load using nosanitize metadata because 2181 // the metadata may be lost. 2182 llvm::FunctionType *FTy = 2183 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false); 2184 llvm::FunctionCallee F = 2185 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie"); 2186 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer()); 2187 } 2188 2189 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) { 2190 // ARM says that the cookie is always: 2191 // struct array_cookie { 2192 // std::size_t element_size; // element_size != 0 2193 // std::size_t element_count; 2194 // }; 2195 // But the base ABI doesn't give anything an alignment greater than 2196 // 8, so we can dismiss this as typical ABI-author blindness to 2197 // actual language complexity and round up to the element alignment. 2198 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes), 2199 CGM.getContext().getTypeAlignInChars(elementType)); 2200 } 2201 2202 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 2203 Address newPtr, 2204 llvm::Value *numElements, 2205 const CXXNewExpr *expr, 2206 QualType elementType) { 2207 assert(requiresArrayCookie(expr)); 2208 2209 // The cookie is always at the start of the buffer. 2210 Address cookie = newPtr; 2211 2212 // The first element is the element size. 2213 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy); 2214 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy, 2215 getContext().getTypeSizeInChars(elementType).getQuantity()); 2216 CGF.Builder.CreateStore(elementSize, cookie); 2217 2218 // The second element is the element count. 2219 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1); 2220 CGF.Builder.CreateStore(numElements, cookie); 2221 2222 // Finally, compute a pointer to the actual data buffer by skipping 2223 // over the cookie completely. 2224 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType); 2225 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize); 2226 } 2227 2228 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 2229 Address allocPtr, 2230 CharUnits cookieSize) { 2231 // The number of elements is at offset sizeof(size_t) relative to 2232 // the allocated pointer. 2233 Address numElementsPtr 2234 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize()); 2235 2236 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy); 2237 return CGF.Builder.CreateLoad(numElementsPtr); 2238 } 2239 2240 /*********************** Static local initialization **************************/ 2241 2242 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM, 2243 llvm::PointerType *GuardPtrTy) { 2244 // int __cxa_guard_acquire(__guard *guard_object); 2245 llvm::FunctionType *FTy = 2246 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy), 2247 GuardPtrTy, /*isVarArg=*/false); 2248 return CGM.CreateRuntimeFunction( 2249 FTy, "__cxa_guard_acquire", 2250 llvm::AttributeList::get(CGM.getLLVMContext(), 2251 llvm::AttributeList::FunctionIndex, 2252 llvm::Attribute::NoUnwind)); 2253 } 2254 2255 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM, 2256 llvm::PointerType *GuardPtrTy) { 2257 // void __cxa_guard_release(__guard *guard_object); 2258 llvm::FunctionType *FTy = 2259 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 2260 return CGM.CreateRuntimeFunction( 2261 FTy, "__cxa_guard_release", 2262 llvm::AttributeList::get(CGM.getLLVMContext(), 2263 llvm::AttributeList::FunctionIndex, 2264 llvm::Attribute::NoUnwind)); 2265 } 2266 2267 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM, 2268 llvm::PointerType *GuardPtrTy) { 2269 // void __cxa_guard_abort(__guard *guard_object); 2270 llvm::FunctionType *FTy = 2271 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 2272 return CGM.CreateRuntimeFunction( 2273 FTy, "__cxa_guard_abort", 2274 llvm::AttributeList::get(CGM.getLLVMContext(), 2275 llvm::AttributeList::FunctionIndex, 2276 llvm::Attribute::NoUnwind)); 2277 } 2278 2279 namespace { 2280 struct CallGuardAbort final : EHScopeStack::Cleanup { 2281 llvm::GlobalVariable *Guard; 2282 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {} 2283 2284 void Emit(CodeGenFunction &CGF, Flags flags) override { 2285 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()), 2286 Guard); 2287 } 2288 }; 2289 } 2290 2291 /// The ARM code here follows the Itanium code closely enough that we 2292 /// just special-case it at particular places. 2293 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, 2294 const VarDecl &D, 2295 llvm::GlobalVariable *var, 2296 bool shouldPerformInit) { 2297 CGBuilderTy &Builder = CGF.Builder; 2298 2299 // Inline variables that weren't instantiated from variable templates have 2300 // partially-ordered initialization within their translation unit. 2301 bool NonTemplateInline = 2302 D.isInline() && 2303 !isTemplateInstantiation(D.getTemplateSpecializationKind()); 2304 2305 // We only need to use thread-safe statics for local non-TLS variables and 2306 // inline variables; other global initialization is always single-threaded 2307 // or (through lazy dynamic loading in multiple threads) unsequenced. 2308 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics && 2309 (D.isLocalVarDecl() || NonTemplateInline) && 2310 !D.getTLSKind(); 2311 2312 // If we have a global variable with internal linkage and thread-safe statics 2313 // are disabled, we can just let the guard variable be of type i8. 2314 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage(); 2315 2316 llvm::IntegerType *guardTy; 2317 CharUnits guardAlignment; 2318 if (useInt8GuardVariable) { 2319 guardTy = CGF.Int8Ty; 2320 guardAlignment = CharUnits::One(); 2321 } else { 2322 // Guard variables are 64 bits in the generic ABI and size width on ARM 2323 // (i.e. 32-bit on AArch32, 64-bit on AArch64). 2324 if (UseARMGuardVarABI) { 2325 guardTy = CGF.SizeTy; 2326 guardAlignment = CGF.getSizeAlign(); 2327 } else { 2328 guardTy = CGF.Int64Ty; 2329 guardAlignment = CharUnits::fromQuantity( 2330 CGM.getDataLayout().getABITypeAlignment(guardTy)); 2331 } 2332 } 2333 llvm::PointerType *guardPtrTy = guardTy->getPointerTo(); 2334 2335 // Create the guard variable if we don't already have it (as we 2336 // might if we're double-emitting this function body). 2337 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D); 2338 if (!guard) { 2339 // Mangle the name for the guard. 2340 SmallString<256> guardName; 2341 { 2342 llvm::raw_svector_ostream out(guardName); 2343 getMangleContext().mangleStaticGuardVariable(&D, out); 2344 } 2345 2346 // Create the guard variable with a zero-initializer. 2347 // Just absorb linkage and visibility from the guarded variable. 2348 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy, 2349 false, var->getLinkage(), 2350 llvm::ConstantInt::get(guardTy, 0), 2351 guardName.str()); 2352 guard->setDSOLocal(var->isDSOLocal()); 2353 guard->setVisibility(var->getVisibility()); 2354 // If the variable is thread-local, so is its guard variable. 2355 guard->setThreadLocalMode(var->getThreadLocalMode()); 2356 guard->setAlignment(guardAlignment.getAsAlign()); 2357 2358 // The ABI says: "It is suggested that it be emitted in the same COMDAT 2359 // group as the associated data object." In practice, this doesn't work for 2360 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm. 2361 llvm::Comdat *C = var->getComdat(); 2362 if (!D.isLocalVarDecl() && C && 2363 (CGM.getTarget().getTriple().isOSBinFormatELF() || 2364 CGM.getTarget().getTriple().isOSBinFormatWasm())) { 2365 guard->setComdat(C); 2366 // An inline variable's guard function is run from the per-TU 2367 // initialization function, not via a dedicated global ctor function, so 2368 // we can't put it in a comdat. 2369 if (!NonTemplateInline) 2370 CGF.CurFn->setComdat(C); 2371 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) { 2372 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName())); 2373 } 2374 2375 CGM.setStaticLocalDeclGuardAddress(&D, guard); 2376 } 2377 2378 Address guardAddr = Address(guard, guardAlignment); 2379 2380 // Test whether the variable has completed initialization. 2381 // 2382 // Itanium C++ ABI 3.3.2: 2383 // The following is pseudo-code showing how these functions can be used: 2384 // if (obj_guard.first_byte == 0) { 2385 // if ( __cxa_guard_acquire (&obj_guard) ) { 2386 // try { 2387 // ... initialize the object ...; 2388 // } catch (...) { 2389 // __cxa_guard_abort (&obj_guard); 2390 // throw; 2391 // } 2392 // ... queue object destructor with __cxa_atexit() ...; 2393 // __cxa_guard_release (&obj_guard); 2394 // } 2395 // } 2396 2397 // Load the first byte of the guard variable. 2398 llvm::LoadInst *LI = 2399 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty)); 2400 2401 // Itanium ABI: 2402 // An implementation supporting thread-safety on multiprocessor 2403 // systems must also guarantee that references to the initialized 2404 // object do not occur before the load of the initialization flag. 2405 // 2406 // In LLVM, we do this by marking the load Acquire. 2407 if (threadsafe) 2408 LI->setAtomic(llvm::AtomicOrdering::Acquire); 2409 2410 // For ARM, we should only check the first bit, rather than the entire byte: 2411 // 2412 // ARM C++ ABI 3.2.3.1: 2413 // To support the potential use of initialization guard variables 2414 // as semaphores that are the target of ARM SWP and LDREX/STREX 2415 // synchronizing instructions we define a static initialization 2416 // guard variable to be a 4-byte aligned, 4-byte word with the 2417 // following inline access protocol. 2418 // #define INITIALIZED 1 2419 // if ((obj_guard & INITIALIZED) != INITIALIZED) { 2420 // if (__cxa_guard_acquire(&obj_guard)) 2421 // ... 2422 // } 2423 // 2424 // and similarly for ARM64: 2425 // 2426 // ARM64 C++ ABI 3.2.2: 2427 // This ABI instead only specifies the value bit 0 of the static guard 2428 // variable; all other bits are platform defined. Bit 0 shall be 0 when the 2429 // variable is not initialized and 1 when it is. 2430 llvm::Value *V = 2431 (UseARMGuardVarABI && !useInt8GuardVariable) 2432 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1)) 2433 : LI; 2434 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized"); 2435 2436 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check"); 2437 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); 2438 2439 // Check if the first byte of the guard variable is zero. 2440 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock, 2441 CodeGenFunction::GuardKind::VariableGuard, &D); 2442 2443 CGF.EmitBlock(InitCheckBlock); 2444 2445 // Variables used when coping with thread-safe statics and exceptions. 2446 if (threadsafe) { 2447 // Call __cxa_guard_acquire. 2448 llvm::Value *V 2449 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard); 2450 2451 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); 2452 2453 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"), 2454 InitBlock, EndBlock); 2455 2456 // Call __cxa_guard_abort along the exceptional edge. 2457 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard); 2458 2459 CGF.EmitBlock(InitBlock); 2460 } 2461 2462 // Emit the initializer and add a global destructor if appropriate. 2463 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit); 2464 2465 if (threadsafe) { 2466 // Pop the guard-abort cleanup if we pushed one. 2467 CGF.PopCleanupBlock(); 2468 2469 // Call __cxa_guard_release. This cannot throw. 2470 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy), 2471 guardAddr.getPointer()); 2472 } else { 2473 Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr); 2474 } 2475 2476 CGF.EmitBlock(EndBlock); 2477 } 2478 2479 /// Register a global destructor using __cxa_atexit. 2480 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, 2481 llvm::FunctionCallee dtor, 2482 llvm::Constant *addr, bool TLS) { 2483 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) && 2484 "__cxa_atexit is disabled"); 2485 const char *Name = "__cxa_atexit"; 2486 if (TLS) { 2487 const llvm::Triple &T = CGF.getTarget().getTriple(); 2488 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit"; 2489 } 2490 2491 // We're assuming that the destructor function is something we can 2492 // reasonably call with the default CC. Go ahead and cast it to the 2493 // right prototype. 2494 llvm::Type *dtorTy = 2495 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo(); 2496 2497 // Preserve address space of addr. 2498 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0; 2499 auto AddrInt8PtrTy = 2500 AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy; 2501 2502 // Create a variable that binds the atexit to this shared object. 2503 llvm::Constant *handle = 2504 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle"); 2505 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts()); 2506 GV->setVisibility(llvm::GlobalValue::HiddenVisibility); 2507 2508 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); 2509 llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()}; 2510 llvm::FunctionType *atexitTy = 2511 llvm::FunctionType::get(CGF.IntTy, paramTys, false); 2512 2513 // Fetch the actual function. 2514 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name); 2515 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee())) 2516 fn->setDoesNotThrow(); 2517 2518 if (!addr) 2519 // addr is null when we are trying to register a dtor annotated with 2520 // __attribute__((destructor)) in a constructor function. Using null here is 2521 // okay because this argument is just passed back to the destructor 2522 // function. 2523 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy); 2524 2525 llvm::Value *args[] = {llvm::ConstantExpr::getBitCast( 2526 cast<llvm::Constant>(dtor.getCallee()), dtorTy), 2527 llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy), 2528 handle}; 2529 CGF.EmitNounwindRuntimeCall(atexit, args); 2530 } 2531 2532 void CodeGenModule::registerGlobalDtorsWithAtExit() { 2533 for (const auto &I : DtorsUsingAtExit) { 2534 int Priority = I.first; 2535 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second; 2536 2537 // Create a function that registers destructors that have the same priority. 2538 // 2539 // Since constructor functions are run in non-descending order of their 2540 // priorities, destructors are registered in non-descending order of their 2541 // priorities, and since destructor functions are run in the reverse order 2542 // of their registration, destructor functions are run in non-ascending 2543 // order of their priorities. 2544 CodeGenFunction CGF(*this); 2545 std::string GlobalInitFnName = 2546 std::string("__GLOBAL_init_") + llvm::to_string(Priority); 2547 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 2548 llvm::Function *GlobalInitFn = CreateGlobalInitOrCleanUpFunction( 2549 FTy, GlobalInitFnName, getTypes().arrangeNullaryFunction(), 2550 SourceLocation()); 2551 ASTContext &Ctx = getContext(); 2552 QualType ReturnTy = Ctx.VoidTy; 2553 QualType FunctionTy = Ctx.getFunctionType(ReturnTy, llvm::None, {}); 2554 FunctionDecl *FD = FunctionDecl::Create( 2555 Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 2556 &Ctx.Idents.get(GlobalInitFnName), FunctionTy, nullptr, SC_Static, 2557 false, false); 2558 CGF.StartFunction(GlobalDecl(FD), ReturnTy, GlobalInitFn, 2559 getTypes().arrangeNullaryFunction(), FunctionArgList(), 2560 SourceLocation(), SourceLocation()); 2561 2562 for (auto *Dtor : Dtors) { 2563 // Register the destructor function calling __cxa_atexit if it is 2564 // available. Otherwise fall back on calling atexit. 2565 if (getCodeGenOpts().CXAAtExit) 2566 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false); 2567 else 2568 CGF.registerGlobalDtorWithAtExit(Dtor); 2569 } 2570 2571 CGF.FinishFunction(); 2572 AddGlobalCtor(GlobalInitFn, Priority, nullptr); 2573 } 2574 } 2575 2576 /// Register a global destructor as best as we know how. 2577 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 2578 llvm::FunctionCallee dtor, 2579 llvm::Constant *addr) { 2580 if (D.isNoDestroy(CGM.getContext())) 2581 return; 2582 2583 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit 2584 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage 2585 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled. 2586 // We can always use __cxa_thread_atexit. 2587 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind()) 2588 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind()); 2589 2590 // In Apple kexts, we want to add a global destructor entry. 2591 // FIXME: shouldn't this be guarded by some variable? 2592 if (CGM.getLangOpts().AppleKext) { 2593 // Generate a global destructor entry. 2594 return CGM.AddCXXDtorEntry(dtor, addr); 2595 } 2596 2597 CGF.registerGlobalDtorWithAtExit(D, dtor, addr); 2598 } 2599 2600 static bool isThreadWrapperReplaceable(const VarDecl *VD, 2601 CodeGen::CodeGenModule &CGM) { 2602 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!"); 2603 // Darwin prefers to have references to thread local variables to go through 2604 // the thread wrapper instead of directly referencing the backing variable. 2605 return VD->getTLSKind() == VarDecl::TLS_Dynamic && 2606 CGM.getTarget().getTriple().isOSDarwin(); 2607 } 2608 2609 /// Get the appropriate linkage for the wrapper function. This is essentially 2610 /// the weak form of the variable's linkage; every translation unit which needs 2611 /// the wrapper emits a copy, and we want the linker to merge them. 2612 static llvm::GlobalValue::LinkageTypes 2613 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) { 2614 llvm::GlobalValue::LinkageTypes VarLinkage = 2615 CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false); 2616 2617 // For internal linkage variables, we don't need an external or weak wrapper. 2618 if (llvm::GlobalValue::isLocalLinkage(VarLinkage)) 2619 return VarLinkage; 2620 2621 // If the thread wrapper is replaceable, give it appropriate linkage. 2622 if (isThreadWrapperReplaceable(VD, CGM)) 2623 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) && 2624 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage)) 2625 return VarLinkage; 2626 return llvm::GlobalValue::WeakODRLinkage; 2627 } 2628 2629 llvm::Function * 2630 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD, 2631 llvm::Value *Val) { 2632 // Mangle the name for the thread_local wrapper function. 2633 SmallString<256> WrapperName; 2634 { 2635 llvm::raw_svector_ostream Out(WrapperName); 2636 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out); 2637 } 2638 2639 // FIXME: If VD is a definition, we should regenerate the function attributes 2640 // before returning. 2641 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName)) 2642 return cast<llvm::Function>(V); 2643 2644 QualType RetQT = VD->getType(); 2645 if (RetQT->isReferenceType()) 2646 RetQT = RetQT.getNonReferenceType(); 2647 2648 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration( 2649 getContext().getPointerType(RetQT), FunctionArgList()); 2650 2651 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI); 2652 llvm::Function *Wrapper = 2653 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM), 2654 WrapperName.str(), &CGM.getModule()); 2655 2656 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker()) 2657 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName())); 2658 2659 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper); 2660 2661 // Always resolve references to the wrapper at link time. 2662 if (!Wrapper->hasLocalLinkage()) 2663 if (!isThreadWrapperReplaceable(VD, CGM) || 2664 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) || 2665 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) || 2666 VD->getVisibility() == HiddenVisibility) 2667 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility); 2668 2669 if (isThreadWrapperReplaceable(VD, CGM)) { 2670 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2671 Wrapper->addFnAttr(llvm::Attribute::NoUnwind); 2672 } 2673 2674 ThreadWrappers.push_back({VD, Wrapper}); 2675 return Wrapper; 2676 } 2677 2678 void ItaniumCXXABI::EmitThreadLocalInitFuncs( 2679 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals, 2680 ArrayRef<llvm::Function *> CXXThreadLocalInits, 2681 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) { 2682 llvm::Function *InitFunc = nullptr; 2683 2684 // Separate initializers into those with ordered (or partially-ordered) 2685 // initialization and those with unordered initialization. 2686 llvm::SmallVector<llvm::Function *, 8> OrderedInits; 2687 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits; 2688 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) { 2689 if (isTemplateInstantiation( 2690 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind())) 2691 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] = 2692 CXXThreadLocalInits[I]; 2693 else 2694 OrderedInits.push_back(CXXThreadLocalInits[I]); 2695 } 2696 2697 if (!OrderedInits.empty()) { 2698 // Generate a guarded initialization function. 2699 llvm::FunctionType *FTy = 2700 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 2701 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 2702 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI, 2703 SourceLocation(), 2704 /*TLS=*/true); 2705 llvm::GlobalVariable *Guard = new llvm::GlobalVariable( 2706 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false, 2707 llvm::GlobalVariable::InternalLinkage, 2708 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard"); 2709 Guard->setThreadLocal(true); 2710 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel()); 2711 2712 CharUnits GuardAlign = CharUnits::One(); 2713 Guard->setAlignment(GuardAlign.getAsAlign()); 2714 2715 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc( 2716 InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign)); 2717 // On Darwin platforms, use CXX_FAST_TLS calling convention. 2718 if (CGM.getTarget().getTriple().isOSDarwin()) { 2719 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2720 InitFunc->addFnAttr(llvm::Attribute::NoUnwind); 2721 } 2722 } 2723 2724 // Create declarations for thread wrappers for all thread-local variables 2725 // with non-discardable definitions in this translation unit. 2726 for (const VarDecl *VD : CXXThreadLocals) { 2727 if (VD->hasDefinition() && 2728 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) { 2729 llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD)); 2730 getOrCreateThreadLocalWrapper(VD, GV); 2731 } 2732 } 2733 2734 // Emit all referenced thread wrappers. 2735 for (auto VDAndWrapper : ThreadWrappers) { 2736 const VarDecl *VD = VDAndWrapper.first; 2737 llvm::GlobalVariable *Var = 2738 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD))); 2739 llvm::Function *Wrapper = VDAndWrapper.second; 2740 2741 // Some targets require that all access to thread local variables go through 2742 // the thread wrapper. This means that we cannot attempt to create a thread 2743 // wrapper or a thread helper. 2744 if (!VD->hasDefinition()) { 2745 if (isThreadWrapperReplaceable(VD, CGM)) { 2746 Wrapper->setLinkage(llvm::Function::ExternalLinkage); 2747 continue; 2748 } 2749 2750 // If this isn't a TU in which this variable is defined, the thread 2751 // wrapper is discardable. 2752 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage) 2753 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage); 2754 } 2755 2756 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper); 2757 2758 // Mangle the name for the thread_local initialization function. 2759 SmallString<256> InitFnName; 2760 { 2761 llvm::raw_svector_ostream Out(InitFnName); 2762 getMangleContext().mangleItaniumThreadLocalInit(VD, Out); 2763 } 2764 2765 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false); 2766 2767 // If we have a definition for the variable, emit the initialization 2768 // function as an alias to the global Init function (if any). Otherwise, 2769 // produce a declaration of the initialization function. 2770 llvm::GlobalValue *Init = nullptr; 2771 bool InitIsInitFunc = false; 2772 bool HasConstantInitialization = false; 2773 if (!usesThreadWrapperFunction(VD)) { 2774 HasConstantInitialization = true; 2775 } else if (VD->hasDefinition()) { 2776 InitIsInitFunc = true; 2777 llvm::Function *InitFuncToUse = InitFunc; 2778 if (isTemplateInstantiation(VD->getTemplateSpecializationKind())) 2779 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl()); 2780 if (InitFuncToUse) 2781 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(), 2782 InitFuncToUse); 2783 } else { 2784 // Emit a weak global function referring to the initialization function. 2785 // This function will not exist if the TU defining the thread_local 2786 // variable in question does not need any dynamic initialization for 2787 // its thread_local variables. 2788 Init = llvm::Function::Create(InitFnTy, 2789 llvm::GlobalVariable::ExternalWeakLinkage, 2790 InitFnName.str(), &CGM.getModule()); 2791 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 2792 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, 2793 cast<llvm::Function>(Init)); 2794 } 2795 2796 if (Init) { 2797 Init->setVisibility(Var->getVisibility()); 2798 // Don't mark an extern_weak function DSO local on windows. 2799 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage()) 2800 Init->setDSOLocal(Var->isDSOLocal()); 2801 } 2802 2803 llvm::LLVMContext &Context = CGM.getModule().getContext(); 2804 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper); 2805 CGBuilderTy Builder(CGM, Entry); 2806 if (HasConstantInitialization) { 2807 // No dynamic initialization to invoke. 2808 } else if (InitIsInitFunc) { 2809 if (Init) { 2810 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init); 2811 if (isThreadWrapperReplaceable(VD, CGM)) { 2812 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2813 llvm::Function *Fn = 2814 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee()); 2815 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2816 } 2817 } 2818 } else { 2819 // Don't know whether we have an init function. Call it if it exists. 2820 llvm::Value *Have = Builder.CreateIsNotNull(Init); 2821 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 2822 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 2823 Builder.CreateCondBr(Have, InitBB, ExitBB); 2824 2825 Builder.SetInsertPoint(InitBB); 2826 Builder.CreateCall(InitFnTy, Init); 2827 Builder.CreateBr(ExitBB); 2828 2829 Builder.SetInsertPoint(ExitBB); 2830 } 2831 2832 // For a reference, the result of the wrapper function is a pointer to 2833 // the referenced object. 2834 llvm::Value *Val = Var; 2835 if (VD->getType()->isReferenceType()) { 2836 CharUnits Align = CGM.getContext().getDeclAlign(VD); 2837 Val = Builder.CreateAlignedLoad(Val, Align); 2838 } 2839 if (Val->getType() != Wrapper->getReturnType()) 2840 Val = Builder.CreatePointerBitCastOrAddrSpaceCast( 2841 Val, Wrapper->getReturnType(), ""); 2842 Builder.CreateRet(Val); 2843 } 2844 } 2845 2846 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, 2847 const VarDecl *VD, 2848 QualType LValType) { 2849 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD); 2850 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val); 2851 2852 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper); 2853 CallVal->setCallingConv(Wrapper->getCallingConv()); 2854 2855 LValue LV; 2856 if (VD->getType()->isReferenceType()) 2857 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType); 2858 else 2859 LV = CGF.MakeAddrLValue(CallVal, LValType, 2860 CGF.getContext().getDeclAlign(VD)); 2861 // FIXME: need setObjCGCLValueClass? 2862 return LV; 2863 } 2864 2865 /// Return whether the given global decl needs a VTT parameter, which it does 2866 /// if it's a base constructor or destructor with virtual bases. 2867 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { 2868 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 2869 2870 // We don't have any virtual bases, just return early. 2871 if (!MD->getParent()->getNumVBases()) 2872 return false; 2873 2874 // Check if we have a base constructor. 2875 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base) 2876 return true; 2877 2878 // Check if we have a base destructor. 2879 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) 2880 return true; 2881 2882 return false; 2883 } 2884 2885 namespace { 2886 class ItaniumRTTIBuilder { 2887 CodeGenModule &CGM; // Per-module state. 2888 llvm::LLVMContext &VMContext; 2889 const ItaniumCXXABI &CXXABI; // Per-module state. 2890 2891 /// Fields - The fields of the RTTI descriptor currently being built. 2892 SmallVector<llvm::Constant *, 16> Fields; 2893 2894 /// GetAddrOfTypeName - Returns the mangled type name of the given type. 2895 llvm::GlobalVariable * 2896 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage); 2897 2898 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI 2899 /// descriptor of the given type. 2900 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty); 2901 2902 /// BuildVTablePointer - Build the vtable pointer for the given type. 2903 void BuildVTablePointer(const Type *Ty); 2904 2905 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single 2906 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b. 2907 void BuildSIClassTypeInfo(const CXXRecordDecl *RD); 2908 2909 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for 2910 /// classes with bases that do not satisfy the abi::__si_class_type_info 2911 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. 2912 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD); 2913 2914 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used 2915 /// for pointer types. 2916 void BuildPointerTypeInfo(QualType PointeeTy); 2917 2918 /// BuildObjCObjectTypeInfo - Build the appropriate kind of 2919 /// type_info for an object type. 2920 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty); 2921 2922 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info 2923 /// struct, used for member pointer types. 2924 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty); 2925 2926 public: 2927 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI) 2928 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {} 2929 2930 // Pointer type info flags. 2931 enum { 2932 /// PTI_Const - Type has const qualifier. 2933 PTI_Const = 0x1, 2934 2935 /// PTI_Volatile - Type has volatile qualifier. 2936 PTI_Volatile = 0x2, 2937 2938 /// PTI_Restrict - Type has restrict qualifier. 2939 PTI_Restrict = 0x4, 2940 2941 /// PTI_Incomplete - Type is incomplete. 2942 PTI_Incomplete = 0x8, 2943 2944 /// PTI_ContainingClassIncomplete - Containing class is incomplete. 2945 /// (in pointer to member). 2946 PTI_ContainingClassIncomplete = 0x10, 2947 2948 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS). 2949 //PTI_TransactionSafe = 0x20, 2950 2951 /// PTI_Noexcept - Pointee is noexcept function (C++1z). 2952 PTI_Noexcept = 0x40, 2953 }; 2954 2955 // VMI type info flags. 2956 enum { 2957 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance. 2958 VMI_NonDiamondRepeat = 0x1, 2959 2960 /// VMI_DiamondShaped - Class is diamond shaped. 2961 VMI_DiamondShaped = 0x2 2962 }; 2963 2964 // Base class type info flags. 2965 enum { 2966 /// BCTI_Virtual - Base class is virtual. 2967 BCTI_Virtual = 0x1, 2968 2969 /// BCTI_Public - Base class is public. 2970 BCTI_Public = 0x2 2971 }; 2972 2973 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or 2974 /// link to an existing RTTI descriptor if one already exists. 2975 llvm::Constant *BuildTypeInfo(QualType Ty); 2976 2977 /// BuildTypeInfo - Build the RTTI type info struct for the given type. 2978 llvm::Constant *BuildTypeInfo( 2979 QualType Ty, 2980 llvm::GlobalVariable::LinkageTypes Linkage, 2981 llvm::GlobalValue::VisibilityTypes Visibility, 2982 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass); 2983 }; 2984 } 2985 2986 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName( 2987 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) { 2988 SmallString<256> Name; 2989 llvm::raw_svector_ostream Out(Name); 2990 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out); 2991 2992 // We know that the mangled name of the type starts at index 4 of the 2993 // mangled name of the typename, so we can just index into it in order to 2994 // get the mangled name of the type. 2995 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext, 2996 Name.substr(4)); 2997 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy); 2998 2999 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable( 3000 Name, Init->getType(), Linkage, Align.getQuantity()); 3001 3002 GV->setInitializer(Init); 3003 3004 return GV; 3005 } 3006 3007 llvm::Constant * 3008 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) { 3009 // Mangle the RTTI name. 3010 SmallString<256> Name; 3011 llvm::raw_svector_ostream Out(Name); 3012 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 3013 3014 // Look for an existing global. 3015 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name); 3016 3017 if (!GV) { 3018 // Create a new global variable. 3019 // Note for the future: If we would ever like to do deferred emission of 3020 // RTTI, check if emitting vtables opportunistically need any adjustment. 3021 3022 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy, 3023 /*isConstant=*/true, 3024 llvm::GlobalValue::ExternalLinkage, nullptr, 3025 Name); 3026 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 3027 CGM.setGVProperties(GV, RD); 3028 } 3029 3030 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy); 3031 } 3032 3033 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type 3034 /// info for that type is defined in the standard library. 3035 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) { 3036 // Itanium C++ ABI 2.9.2: 3037 // Basic type information (e.g. for "int", "bool", etc.) will be kept in 3038 // the run-time support library. Specifically, the run-time support 3039 // library should contain type_info objects for the types X, X* and 3040 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char, 3041 // unsigned char, signed char, short, unsigned short, int, unsigned int, 3042 // long, unsigned long, long long, unsigned long long, float, double, 3043 // long double, char16_t, char32_t, and the IEEE 754r decimal and 3044 // half-precision floating point types. 3045 // 3046 // GCC also emits RTTI for __int128. 3047 // FIXME: We do not emit RTTI information for decimal types here. 3048 3049 // Types added here must also be added to EmitFundamentalRTTIDescriptors. 3050 switch (Ty->getKind()) { 3051 case BuiltinType::Void: 3052 case BuiltinType::NullPtr: 3053 case BuiltinType::Bool: 3054 case BuiltinType::WChar_S: 3055 case BuiltinType::WChar_U: 3056 case BuiltinType::Char_U: 3057 case BuiltinType::Char_S: 3058 case BuiltinType::UChar: 3059 case BuiltinType::SChar: 3060 case BuiltinType::Short: 3061 case BuiltinType::UShort: 3062 case BuiltinType::Int: 3063 case BuiltinType::UInt: 3064 case BuiltinType::Long: 3065 case BuiltinType::ULong: 3066 case BuiltinType::LongLong: 3067 case BuiltinType::ULongLong: 3068 case BuiltinType::Half: 3069 case BuiltinType::Float: 3070 case BuiltinType::Double: 3071 case BuiltinType::LongDouble: 3072 case BuiltinType::Float16: 3073 case BuiltinType::Float128: 3074 case BuiltinType::Char8: 3075 case BuiltinType::Char16: 3076 case BuiltinType::Char32: 3077 case BuiltinType::Int128: 3078 case BuiltinType::UInt128: 3079 return true; 3080 3081 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 3082 case BuiltinType::Id: 3083 #include "clang/Basic/OpenCLImageTypes.def" 3084 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 3085 case BuiltinType::Id: 3086 #include "clang/Basic/OpenCLExtensionTypes.def" 3087 case BuiltinType::OCLSampler: 3088 case BuiltinType::OCLEvent: 3089 case BuiltinType::OCLClkEvent: 3090 case BuiltinType::OCLQueue: 3091 case BuiltinType::OCLReserveID: 3092 #define SVE_TYPE(Name, Id, SingletonId) \ 3093 case BuiltinType::Id: 3094 #include "clang/Basic/AArch64SVEACLETypes.def" 3095 case BuiltinType::ShortAccum: 3096 case BuiltinType::Accum: 3097 case BuiltinType::LongAccum: 3098 case BuiltinType::UShortAccum: 3099 case BuiltinType::UAccum: 3100 case BuiltinType::ULongAccum: 3101 case BuiltinType::ShortFract: 3102 case BuiltinType::Fract: 3103 case BuiltinType::LongFract: 3104 case BuiltinType::UShortFract: 3105 case BuiltinType::UFract: 3106 case BuiltinType::ULongFract: 3107 case BuiltinType::SatShortAccum: 3108 case BuiltinType::SatAccum: 3109 case BuiltinType::SatLongAccum: 3110 case BuiltinType::SatUShortAccum: 3111 case BuiltinType::SatUAccum: 3112 case BuiltinType::SatULongAccum: 3113 case BuiltinType::SatShortFract: 3114 case BuiltinType::SatFract: 3115 case BuiltinType::SatLongFract: 3116 case BuiltinType::SatUShortFract: 3117 case BuiltinType::SatUFract: 3118 case BuiltinType::SatULongFract: 3119 case BuiltinType::BFloat16: 3120 return false; 3121 3122 case BuiltinType::Dependent: 3123 #define BUILTIN_TYPE(Id, SingletonId) 3124 #define PLACEHOLDER_TYPE(Id, SingletonId) \ 3125 case BuiltinType::Id: 3126 #include "clang/AST/BuiltinTypes.def" 3127 llvm_unreachable("asking for RRTI for a placeholder type!"); 3128 3129 case BuiltinType::ObjCId: 3130 case BuiltinType::ObjCClass: 3131 case BuiltinType::ObjCSel: 3132 llvm_unreachable("FIXME: Objective-C types are unsupported!"); 3133 } 3134 3135 llvm_unreachable("Invalid BuiltinType Kind!"); 3136 } 3137 3138 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) { 3139 QualType PointeeTy = PointerTy->getPointeeType(); 3140 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy); 3141 if (!BuiltinTy) 3142 return false; 3143 3144 // Check the qualifiers. 3145 Qualifiers Quals = PointeeTy.getQualifiers(); 3146 Quals.removeConst(); 3147 3148 if (!Quals.empty()) 3149 return false; 3150 3151 return TypeInfoIsInStandardLibrary(BuiltinTy); 3152 } 3153 3154 /// IsStandardLibraryRTTIDescriptor - Returns whether the type 3155 /// information for the given type exists in the standard library. 3156 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) { 3157 // Type info for builtin types is defined in the standard library. 3158 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty)) 3159 return TypeInfoIsInStandardLibrary(BuiltinTy); 3160 3161 // Type info for some pointer types to builtin types is defined in the 3162 // standard library. 3163 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty)) 3164 return TypeInfoIsInStandardLibrary(PointerTy); 3165 3166 return false; 3167 } 3168 3169 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for 3170 /// the given type exists somewhere else, and that we should not emit the type 3171 /// information in this translation unit. Assumes that it is not a 3172 /// standard-library type. 3173 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, 3174 QualType Ty) { 3175 ASTContext &Context = CGM.getContext(); 3176 3177 // If RTTI is disabled, assume it might be disabled in the 3178 // translation unit that defines any potential key function, too. 3179 if (!Context.getLangOpts().RTTI) return false; 3180 3181 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 3182 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); 3183 if (!RD->hasDefinition()) 3184 return false; 3185 3186 if (!RD->isDynamicClass()) 3187 return false; 3188 3189 // FIXME: this may need to be reconsidered if the key function 3190 // changes. 3191 // N.B. We must always emit the RTTI data ourselves if there exists a key 3192 // function. 3193 bool IsDLLImport = RD->hasAttr<DLLImportAttr>(); 3194 3195 // Don't import the RTTI but emit it locally. 3196 if (CGM.getTriple().isWindowsGNUEnvironment()) 3197 return false; 3198 3199 if (CGM.getVTables().isVTableExternal(RD)) 3200 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment() 3201 ? false 3202 : true; 3203 3204 if (IsDLLImport) 3205 return true; 3206 } 3207 3208 return false; 3209 } 3210 3211 /// IsIncompleteClassType - Returns whether the given record type is incomplete. 3212 static bool IsIncompleteClassType(const RecordType *RecordTy) { 3213 return !RecordTy->getDecl()->isCompleteDefinition(); 3214 } 3215 3216 /// ContainsIncompleteClassType - Returns whether the given type contains an 3217 /// incomplete class type. This is true if 3218 /// 3219 /// * The given type is an incomplete class type. 3220 /// * The given type is a pointer type whose pointee type contains an 3221 /// incomplete class type. 3222 /// * The given type is a member pointer type whose class is an incomplete 3223 /// class type. 3224 /// * The given type is a member pointer type whoise pointee type contains an 3225 /// incomplete class type. 3226 /// is an indirect or direct pointer to an incomplete class type. 3227 static bool ContainsIncompleteClassType(QualType Ty) { 3228 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 3229 if (IsIncompleteClassType(RecordTy)) 3230 return true; 3231 } 3232 3233 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty)) 3234 return ContainsIncompleteClassType(PointerTy->getPointeeType()); 3235 3236 if (const MemberPointerType *MemberPointerTy = 3237 dyn_cast<MemberPointerType>(Ty)) { 3238 // Check if the class type is incomplete. 3239 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass()); 3240 if (IsIncompleteClassType(ClassType)) 3241 return true; 3242 3243 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType()); 3244 } 3245 3246 return false; 3247 } 3248 3249 // CanUseSingleInheritance - Return whether the given record decl has a "single, 3250 // public, non-virtual base at offset zero (i.e. the derived class is dynamic 3251 // iff the base is)", according to Itanium C++ ABI, 2.95p6b. 3252 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) { 3253 // Check the number of bases. 3254 if (RD->getNumBases() != 1) 3255 return false; 3256 3257 // Get the base. 3258 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(); 3259 3260 // Check that the base is not virtual. 3261 if (Base->isVirtual()) 3262 return false; 3263 3264 // Check that the base is public. 3265 if (Base->getAccessSpecifier() != AS_public) 3266 return false; 3267 3268 // Check that the class is dynamic iff the base is. 3269 auto *BaseDecl = 3270 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl()); 3271 if (!BaseDecl->isEmpty() && 3272 BaseDecl->isDynamicClass() != RD->isDynamicClass()) 3273 return false; 3274 3275 return true; 3276 } 3277 3278 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) { 3279 // abi::__class_type_info. 3280 static const char * const ClassTypeInfo = 3281 "_ZTVN10__cxxabiv117__class_type_infoE"; 3282 // abi::__si_class_type_info. 3283 static const char * const SIClassTypeInfo = 3284 "_ZTVN10__cxxabiv120__si_class_type_infoE"; 3285 // abi::__vmi_class_type_info. 3286 static const char * const VMIClassTypeInfo = 3287 "_ZTVN10__cxxabiv121__vmi_class_type_infoE"; 3288 3289 const char *VTableName = nullptr; 3290 3291 switch (Ty->getTypeClass()) { 3292 #define TYPE(Class, Base) 3293 #define ABSTRACT_TYPE(Class, Base) 3294 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 3295 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3296 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 3297 #include "clang/AST/TypeNodes.inc" 3298 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 3299 3300 case Type::LValueReference: 3301 case Type::RValueReference: 3302 llvm_unreachable("References shouldn't get here"); 3303 3304 case Type::Auto: 3305 case Type::DeducedTemplateSpecialization: 3306 llvm_unreachable("Undeduced type shouldn't get here"); 3307 3308 case Type::Pipe: 3309 llvm_unreachable("Pipe types shouldn't get here"); 3310 3311 case Type::Builtin: 3312 case Type::ExtInt: 3313 // GCC treats vector and complex types as fundamental types. 3314 case Type::Vector: 3315 case Type::ExtVector: 3316 case Type::ConstantMatrix: 3317 case Type::Complex: 3318 case Type::Atomic: 3319 // FIXME: GCC treats block pointers as fundamental types?! 3320 case Type::BlockPointer: 3321 // abi::__fundamental_type_info. 3322 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE"; 3323 break; 3324 3325 case Type::ConstantArray: 3326 case Type::IncompleteArray: 3327 case Type::VariableArray: 3328 // abi::__array_type_info. 3329 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE"; 3330 break; 3331 3332 case Type::FunctionNoProto: 3333 case Type::FunctionProto: 3334 // abi::__function_type_info. 3335 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE"; 3336 break; 3337 3338 case Type::Enum: 3339 // abi::__enum_type_info. 3340 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE"; 3341 break; 3342 3343 case Type::Record: { 3344 const CXXRecordDecl *RD = 3345 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl()); 3346 3347 if (!RD->hasDefinition() || !RD->getNumBases()) { 3348 VTableName = ClassTypeInfo; 3349 } else if (CanUseSingleInheritance(RD)) { 3350 VTableName = SIClassTypeInfo; 3351 } else { 3352 VTableName = VMIClassTypeInfo; 3353 } 3354 3355 break; 3356 } 3357 3358 case Type::ObjCObject: 3359 // Ignore protocol qualifiers. 3360 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr(); 3361 3362 // Handle id and Class. 3363 if (isa<BuiltinType>(Ty)) { 3364 VTableName = ClassTypeInfo; 3365 break; 3366 } 3367 3368 assert(isa<ObjCInterfaceType>(Ty)); 3369 LLVM_FALLTHROUGH; 3370 3371 case Type::ObjCInterface: 3372 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) { 3373 VTableName = SIClassTypeInfo; 3374 } else { 3375 VTableName = ClassTypeInfo; 3376 } 3377 break; 3378 3379 case Type::ObjCObjectPointer: 3380 case Type::Pointer: 3381 // abi::__pointer_type_info. 3382 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE"; 3383 break; 3384 3385 case Type::MemberPointer: 3386 // abi::__pointer_to_member_type_info. 3387 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE"; 3388 break; 3389 } 3390 3391 llvm::Constant *VTable = nullptr; 3392 3393 // Check if the alias exists. If it doesn't, then get or create the global. 3394 if (CGM.getItaniumVTableContext().isRelativeLayout()) 3395 VTable = CGM.getModule().getNamedAlias(VTableName); 3396 if (!VTable) 3397 VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy); 3398 3399 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts())); 3400 3401 llvm::Type *PtrDiffTy = 3402 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()); 3403 3404 // The vtable address point is 2. 3405 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 3406 // The vtable address point is 8 bytes after its start: 3407 // 4 for the offset to top + 4 for the relative offset to rtti. 3408 llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8); 3409 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy); 3410 VTable = 3411 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight); 3412 } else { 3413 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2); 3414 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, 3415 Two); 3416 } 3417 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy); 3418 3419 Fields.push_back(VTable); 3420 } 3421 3422 /// Return the linkage that the type info and type info name constants 3423 /// should have for the given type. 3424 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM, 3425 QualType Ty) { 3426 // Itanium C++ ABI 2.9.5p7: 3427 // In addition, it and all of the intermediate abi::__pointer_type_info 3428 // structs in the chain down to the abi::__class_type_info for the 3429 // incomplete class type must be prevented from resolving to the 3430 // corresponding type_info structs for the complete class type, possibly 3431 // by making them local static objects. Finally, a dummy class RTTI is 3432 // generated for the incomplete type that will not resolve to the final 3433 // complete class RTTI (because the latter need not exist), possibly by 3434 // making it a local static object. 3435 if (ContainsIncompleteClassType(Ty)) 3436 return llvm::GlobalValue::InternalLinkage; 3437 3438 switch (Ty->getLinkage()) { 3439 case NoLinkage: 3440 case InternalLinkage: 3441 case UniqueExternalLinkage: 3442 return llvm::GlobalValue::InternalLinkage; 3443 3444 case VisibleNoLinkage: 3445 case ModuleInternalLinkage: 3446 case ModuleLinkage: 3447 case ExternalLinkage: 3448 // RTTI is not enabled, which means that this type info struct is going 3449 // to be used for exception handling. Give it linkonce_odr linkage. 3450 if (!CGM.getLangOpts().RTTI) 3451 return llvm::GlobalValue::LinkOnceODRLinkage; 3452 3453 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) { 3454 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl()); 3455 if (RD->hasAttr<WeakAttr>()) 3456 return llvm::GlobalValue::WeakODRLinkage; 3457 if (CGM.getTriple().isWindowsItaniumEnvironment()) 3458 if (RD->hasAttr<DLLImportAttr>() && 3459 ShouldUseExternalRTTIDescriptor(CGM, Ty)) 3460 return llvm::GlobalValue::ExternalLinkage; 3461 // MinGW always uses LinkOnceODRLinkage for type info. 3462 if (RD->isDynamicClass() && 3463 !CGM.getContext() 3464 .getTargetInfo() 3465 .getTriple() 3466 .isWindowsGNUEnvironment()) 3467 return CGM.getVTableLinkage(RD); 3468 } 3469 3470 return llvm::GlobalValue::LinkOnceODRLinkage; 3471 } 3472 3473 llvm_unreachable("Invalid linkage!"); 3474 } 3475 3476 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) { 3477 // We want to operate on the canonical type. 3478 Ty = Ty.getCanonicalType(); 3479 3480 // Check if we've already emitted an RTTI descriptor for this type. 3481 SmallString<256> Name; 3482 llvm::raw_svector_ostream Out(Name); 3483 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 3484 3485 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name); 3486 if (OldGV && !OldGV->isDeclaration()) { 3487 assert(!OldGV->hasAvailableExternallyLinkage() && 3488 "available_externally typeinfos not yet implemented"); 3489 3490 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy); 3491 } 3492 3493 // Check if there is already an external RTTI descriptor for this type. 3494 if (IsStandardLibraryRTTIDescriptor(Ty) || 3495 ShouldUseExternalRTTIDescriptor(CGM, Ty)) 3496 return GetAddrOfExternalRTTIDescriptor(Ty); 3497 3498 // Emit the standard library with external linkage. 3499 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty); 3500 3501 // Give the type_info object and name the formal visibility of the 3502 // type itself. 3503 llvm::GlobalValue::VisibilityTypes llvmVisibility; 3504 if (llvm::GlobalValue::isLocalLinkage(Linkage)) 3505 // If the linkage is local, only default visibility makes sense. 3506 llvmVisibility = llvm::GlobalValue::DefaultVisibility; 3507 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) == 3508 ItaniumCXXABI::RUK_NonUniqueHidden) 3509 llvmVisibility = llvm::GlobalValue::HiddenVisibility; 3510 else 3511 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility()); 3512 3513 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass = 3514 llvm::GlobalValue::DefaultStorageClass; 3515 if (CGM.getTriple().isWindowsItaniumEnvironment()) { 3516 auto RD = Ty->getAsCXXRecordDecl(); 3517 if (RD && RD->hasAttr<DLLExportAttr>()) 3518 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass; 3519 } 3520 3521 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass); 3522 } 3523 3524 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo( 3525 QualType Ty, 3526 llvm::GlobalVariable::LinkageTypes Linkage, 3527 llvm::GlobalValue::VisibilityTypes Visibility, 3528 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) { 3529 // Add the vtable pointer. 3530 BuildVTablePointer(cast<Type>(Ty)); 3531 3532 // And the name. 3533 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage); 3534 llvm::Constant *TypeNameField; 3535 3536 // If we're supposed to demote the visibility, be sure to set a flag 3537 // to use a string comparison for type_info comparisons. 3538 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness = 3539 CXXABI.classifyRTTIUniqueness(Ty, Linkage); 3540 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) { 3541 // The flag is the sign bit, which on ARM64 is defined to be clear 3542 // for global pointers. This is very ARM64-specific. 3543 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty); 3544 llvm::Constant *flag = 3545 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63); 3546 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag); 3547 TypeNameField = 3548 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy); 3549 } else { 3550 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy); 3551 } 3552 Fields.push_back(TypeNameField); 3553 3554 switch (Ty->getTypeClass()) { 3555 #define TYPE(Class, Base) 3556 #define ABSTRACT_TYPE(Class, Base) 3557 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 3558 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3559 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 3560 #include "clang/AST/TypeNodes.inc" 3561 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 3562 3563 // GCC treats vector types as fundamental types. 3564 case Type::Builtin: 3565 case Type::Vector: 3566 case Type::ExtVector: 3567 case Type::ConstantMatrix: 3568 case Type::Complex: 3569 case Type::BlockPointer: 3570 // Itanium C++ ABI 2.9.5p4: 3571 // abi::__fundamental_type_info adds no data members to std::type_info. 3572 break; 3573 3574 case Type::LValueReference: 3575 case Type::RValueReference: 3576 llvm_unreachable("References shouldn't get here"); 3577 3578 case Type::Auto: 3579 case Type::DeducedTemplateSpecialization: 3580 llvm_unreachable("Undeduced type shouldn't get here"); 3581 3582 case Type::Pipe: 3583 break; 3584 3585 case Type::ExtInt: 3586 break; 3587 3588 case Type::ConstantArray: 3589 case Type::IncompleteArray: 3590 case Type::VariableArray: 3591 // Itanium C++ ABI 2.9.5p5: 3592 // abi::__array_type_info adds no data members to std::type_info. 3593 break; 3594 3595 case Type::FunctionNoProto: 3596 case Type::FunctionProto: 3597 // Itanium C++ ABI 2.9.5p5: 3598 // abi::__function_type_info adds no data members to std::type_info. 3599 break; 3600 3601 case Type::Enum: 3602 // Itanium C++ ABI 2.9.5p5: 3603 // abi::__enum_type_info adds no data members to std::type_info. 3604 break; 3605 3606 case Type::Record: { 3607 const CXXRecordDecl *RD = 3608 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl()); 3609 if (!RD->hasDefinition() || !RD->getNumBases()) { 3610 // We don't need to emit any fields. 3611 break; 3612 } 3613 3614 if (CanUseSingleInheritance(RD)) 3615 BuildSIClassTypeInfo(RD); 3616 else 3617 BuildVMIClassTypeInfo(RD); 3618 3619 break; 3620 } 3621 3622 case Type::ObjCObject: 3623 case Type::ObjCInterface: 3624 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty)); 3625 break; 3626 3627 case Type::ObjCObjectPointer: 3628 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); 3629 break; 3630 3631 case Type::Pointer: 3632 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType()); 3633 break; 3634 3635 case Type::MemberPointer: 3636 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty)); 3637 break; 3638 3639 case Type::Atomic: 3640 // No fields, at least for the moment. 3641 break; 3642 } 3643 3644 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields); 3645 3646 SmallString<256> Name; 3647 llvm::raw_svector_ostream Out(Name); 3648 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 3649 llvm::Module &M = CGM.getModule(); 3650 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name); 3651 llvm::GlobalVariable *GV = 3652 new llvm::GlobalVariable(M, Init->getType(), 3653 /*isConstant=*/true, Linkage, Init, Name); 3654 3655 // If there's already an old global variable, replace it with the new one. 3656 if (OldGV) { 3657 GV->takeName(OldGV); 3658 llvm::Constant *NewPtr = 3659 llvm::ConstantExpr::getBitCast(GV, OldGV->getType()); 3660 OldGV->replaceAllUsesWith(NewPtr); 3661 OldGV->eraseFromParent(); 3662 } 3663 3664 if (CGM.supportsCOMDAT() && GV->isWeakForLinker()) 3665 GV->setComdat(M.getOrInsertComdat(GV->getName())); 3666 3667 CharUnits Align = 3668 CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0)); 3669 GV->setAlignment(Align.getAsAlign()); 3670 3671 // The Itanium ABI specifies that type_info objects must be globally 3672 // unique, with one exception: if the type is an incomplete class 3673 // type or a (possibly indirect) pointer to one. That exception 3674 // affects the general case of comparing type_info objects produced 3675 // by the typeid operator, which is why the comparison operators on 3676 // std::type_info generally use the type_info name pointers instead 3677 // of the object addresses. However, the language's built-in uses 3678 // of RTTI generally require class types to be complete, even when 3679 // manipulating pointers to those class types. This allows the 3680 // implementation of dynamic_cast to rely on address equality tests, 3681 // which is much faster. 3682 3683 // All of this is to say that it's important that both the type_info 3684 // object and the type_info name be uniqued when weakly emitted. 3685 3686 TypeName->setVisibility(Visibility); 3687 CGM.setDSOLocal(TypeName); 3688 3689 GV->setVisibility(Visibility); 3690 CGM.setDSOLocal(GV); 3691 3692 TypeName->setDLLStorageClass(DLLStorageClass); 3693 GV->setDLLStorageClass(DLLStorageClass); 3694 3695 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition); 3696 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition); 3697 3698 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy); 3699 } 3700 3701 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info 3702 /// for the given Objective-C object type. 3703 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) { 3704 // Drop qualifiers. 3705 const Type *T = OT->getBaseType().getTypePtr(); 3706 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T)); 3707 3708 // The builtin types are abi::__class_type_infos and don't require 3709 // extra fields. 3710 if (isa<BuiltinType>(T)) return; 3711 3712 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl(); 3713 ObjCInterfaceDecl *Super = Class->getSuperClass(); 3714 3715 // Root classes are also __class_type_info. 3716 if (!Super) return; 3717 3718 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super); 3719 3720 // Everything else is single inheritance. 3721 llvm::Constant *BaseTypeInfo = 3722 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy); 3723 Fields.push_back(BaseTypeInfo); 3724 } 3725 3726 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single 3727 /// inheritance, according to the Itanium C++ ABI, 2.95p6b. 3728 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) { 3729 // Itanium C++ ABI 2.9.5p6b: 3730 // It adds to abi::__class_type_info a single member pointing to the 3731 // type_info structure for the base type, 3732 llvm::Constant *BaseTypeInfo = 3733 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType()); 3734 Fields.push_back(BaseTypeInfo); 3735 } 3736 3737 namespace { 3738 /// SeenBases - Contains virtual and non-virtual bases seen when traversing 3739 /// a class hierarchy. 3740 struct SeenBases { 3741 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases; 3742 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases; 3743 }; 3744 } 3745 3746 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in 3747 /// abi::__vmi_class_type_info. 3748 /// 3749 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, 3750 SeenBases &Bases) { 3751 3752 unsigned Flags = 0; 3753 3754 auto *BaseDecl = 3755 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl()); 3756 3757 if (Base->isVirtual()) { 3758 // Mark the virtual base as seen. 3759 if (!Bases.VirtualBases.insert(BaseDecl).second) { 3760 // If this virtual base has been seen before, then the class is diamond 3761 // shaped. 3762 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped; 3763 } else { 3764 if (Bases.NonVirtualBases.count(BaseDecl)) 3765 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 3766 } 3767 } else { 3768 // Mark the non-virtual base as seen. 3769 if (!Bases.NonVirtualBases.insert(BaseDecl).second) { 3770 // If this non-virtual base has been seen before, then the class has non- 3771 // diamond shaped repeated inheritance. 3772 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 3773 } else { 3774 if (Bases.VirtualBases.count(BaseDecl)) 3775 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 3776 } 3777 } 3778 3779 // Walk all bases. 3780 for (const auto &I : BaseDecl->bases()) 3781 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); 3782 3783 return Flags; 3784 } 3785 3786 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) { 3787 unsigned Flags = 0; 3788 SeenBases Bases; 3789 3790 // Walk all bases. 3791 for (const auto &I : RD->bases()) 3792 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); 3793 3794 return Flags; 3795 } 3796 3797 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for 3798 /// classes with bases that do not satisfy the abi::__si_class_type_info 3799 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. 3800 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) { 3801 llvm::Type *UnsignedIntLTy = 3802 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 3803 3804 // Itanium C++ ABI 2.9.5p6c: 3805 // __flags is a word with flags describing details about the class 3806 // structure, which may be referenced by using the __flags_masks 3807 // enumeration. These flags refer to both direct and indirect bases. 3808 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD); 3809 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 3810 3811 // Itanium C++ ABI 2.9.5p6c: 3812 // __base_count is a word with the number of direct proper base class 3813 // descriptions that follow. 3814 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases())); 3815 3816 if (!RD->getNumBases()) 3817 return; 3818 3819 // Now add the base class descriptions. 3820 3821 // Itanium C++ ABI 2.9.5p6c: 3822 // __base_info[] is an array of base class descriptions -- one for every 3823 // direct proper base. Each description is of the type: 3824 // 3825 // struct abi::__base_class_type_info { 3826 // public: 3827 // const __class_type_info *__base_type; 3828 // long __offset_flags; 3829 // 3830 // enum __offset_flags_masks { 3831 // __virtual_mask = 0x1, 3832 // __public_mask = 0x2, 3833 // __offset_shift = 8 3834 // }; 3835 // }; 3836 3837 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long 3838 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on 3839 // LLP64 platforms. 3840 // FIXME: Consider updating libc++abi to match, and extend this logic to all 3841 // LLP64 platforms. 3842 QualType OffsetFlagsTy = CGM.getContext().LongTy; 3843 const TargetInfo &TI = CGM.getContext().getTargetInfo(); 3844 if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth()) 3845 OffsetFlagsTy = CGM.getContext().LongLongTy; 3846 llvm::Type *OffsetFlagsLTy = 3847 CGM.getTypes().ConvertType(OffsetFlagsTy); 3848 3849 for (const auto &Base : RD->bases()) { 3850 // The __base_type member points to the RTTI for the base type. 3851 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType())); 3852 3853 auto *BaseDecl = 3854 cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl()); 3855 3856 int64_t OffsetFlags = 0; 3857 3858 // All but the lower 8 bits of __offset_flags are a signed offset. 3859 // For a non-virtual base, this is the offset in the object of the base 3860 // subobject. For a virtual base, this is the offset in the virtual table of 3861 // the virtual base offset for the virtual base referenced (negative). 3862 CharUnits Offset; 3863 if (Base.isVirtual()) 3864 Offset = 3865 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl); 3866 else { 3867 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); 3868 Offset = Layout.getBaseClassOffset(BaseDecl); 3869 }; 3870 3871 OffsetFlags = uint64_t(Offset.getQuantity()) << 8; 3872 3873 // The low-order byte of __offset_flags contains flags, as given by the 3874 // masks from the enumeration __offset_flags_masks. 3875 if (Base.isVirtual()) 3876 OffsetFlags |= BCTI_Virtual; 3877 if (Base.getAccessSpecifier() == AS_public) 3878 OffsetFlags |= BCTI_Public; 3879 3880 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags)); 3881 } 3882 } 3883 3884 /// Compute the flags for a __pbase_type_info, and remove the corresponding 3885 /// pieces from \p Type. 3886 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) { 3887 unsigned Flags = 0; 3888 3889 if (Type.isConstQualified()) 3890 Flags |= ItaniumRTTIBuilder::PTI_Const; 3891 if (Type.isVolatileQualified()) 3892 Flags |= ItaniumRTTIBuilder::PTI_Volatile; 3893 if (Type.isRestrictQualified()) 3894 Flags |= ItaniumRTTIBuilder::PTI_Restrict; 3895 Type = Type.getUnqualifiedType(); 3896 3897 // Itanium C++ ABI 2.9.5p7: 3898 // When the abi::__pbase_type_info is for a direct or indirect pointer to an 3899 // incomplete class type, the incomplete target type flag is set. 3900 if (ContainsIncompleteClassType(Type)) 3901 Flags |= ItaniumRTTIBuilder::PTI_Incomplete; 3902 3903 if (auto *Proto = Type->getAs<FunctionProtoType>()) { 3904 if (Proto->isNothrow()) { 3905 Flags |= ItaniumRTTIBuilder::PTI_Noexcept; 3906 Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None); 3907 } 3908 } 3909 3910 return Flags; 3911 } 3912 3913 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, 3914 /// used for pointer types. 3915 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) { 3916 // Itanium C++ ABI 2.9.5p7: 3917 // __flags is a flag word describing the cv-qualification and other 3918 // attributes of the type pointed to 3919 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy); 3920 3921 llvm::Type *UnsignedIntLTy = 3922 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 3923 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 3924 3925 // Itanium C++ ABI 2.9.5p7: 3926 // __pointee is a pointer to the std::type_info derivation for the 3927 // unqualified type being pointed to. 3928 llvm::Constant *PointeeTypeInfo = 3929 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy); 3930 Fields.push_back(PointeeTypeInfo); 3931 } 3932 3933 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info 3934 /// struct, used for member pointer types. 3935 void 3936 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) { 3937 QualType PointeeTy = Ty->getPointeeType(); 3938 3939 // Itanium C++ ABI 2.9.5p7: 3940 // __flags is a flag word describing the cv-qualification and other 3941 // attributes of the type pointed to. 3942 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy); 3943 3944 const RecordType *ClassType = cast<RecordType>(Ty->getClass()); 3945 if (IsIncompleteClassType(ClassType)) 3946 Flags |= PTI_ContainingClassIncomplete; 3947 3948 llvm::Type *UnsignedIntLTy = 3949 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 3950 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 3951 3952 // Itanium C++ ABI 2.9.5p7: 3953 // __pointee is a pointer to the std::type_info derivation for the 3954 // unqualified type being pointed to. 3955 llvm::Constant *PointeeTypeInfo = 3956 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy); 3957 Fields.push_back(PointeeTypeInfo); 3958 3959 // Itanium C++ ABI 2.9.5p9: 3960 // __context is a pointer to an abi::__class_type_info corresponding to the 3961 // class type containing the member pointed to 3962 // (e.g., the "A" in "int A::*"). 3963 Fields.push_back( 3964 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0))); 3965 } 3966 3967 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) { 3968 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty); 3969 } 3970 3971 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) { 3972 // Types added here must also be added to TypeInfoIsInStandardLibrary. 3973 QualType FundamentalTypes[] = { 3974 getContext().VoidTy, getContext().NullPtrTy, 3975 getContext().BoolTy, getContext().WCharTy, 3976 getContext().CharTy, getContext().UnsignedCharTy, 3977 getContext().SignedCharTy, getContext().ShortTy, 3978 getContext().UnsignedShortTy, getContext().IntTy, 3979 getContext().UnsignedIntTy, getContext().LongTy, 3980 getContext().UnsignedLongTy, getContext().LongLongTy, 3981 getContext().UnsignedLongLongTy, getContext().Int128Ty, 3982 getContext().UnsignedInt128Ty, getContext().HalfTy, 3983 getContext().FloatTy, getContext().DoubleTy, 3984 getContext().LongDoubleTy, getContext().Float128Ty, 3985 getContext().Char8Ty, getContext().Char16Ty, 3986 getContext().Char32Ty 3987 }; 3988 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass = 3989 RD->hasAttr<DLLExportAttr>() 3990 ? llvm::GlobalValue::DLLExportStorageClass 3991 : llvm::GlobalValue::DefaultStorageClass; 3992 llvm::GlobalValue::VisibilityTypes Visibility = 3993 CodeGenModule::GetLLVMVisibility(RD->getVisibility()); 3994 for (const QualType &FundamentalType : FundamentalTypes) { 3995 QualType PointerType = getContext().getPointerType(FundamentalType); 3996 QualType PointerTypeConst = getContext().getPointerType( 3997 FundamentalType.withConst()); 3998 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst}) 3999 ItaniumRTTIBuilder(*this).BuildTypeInfo( 4000 Type, llvm::GlobalValue::ExternalLinkage, 4001 Visibility, DLLStorageClass); 4002 } 4003 } 4004 4005 /// What sort of uniqueness rules should we use for the RTTI for the 4006 /// given type? 4007 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness( 4008 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const { 4009 if (shouldRTTIBeUnique()) 4010 return RUK_Unique; 4011 4012 // It's only necessary for linkonce_odr or weak_odr linkage. 4013 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage && 4014 Linkage != llvm::GlobalValue::WeakODRLinkage) 4015 return RUK_Unique; 4016 4017 // It's only necessary with default visibility. 4018 if (CanTy->getVisibility() != DefaultVisibility) 4019 return RUK_Unique; 4020 4021 // If we're not required to publish this symbol, hide it. 4022 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage) 4023 return RUK_NonUniqueHidden; 4024 4025 // If we're required to publish this symbol, as we might be under an 4026 // explicit instantiation, leave it with default visibility but 4027 // enable string-comparisons. 4028 assert(Linkage == llvm::GlobalValue::WeakODRLinkage); 4029 return RUK_NonUniqueVisible; 4030 } 4031 4032 // Find out how to codegen the complete destructor and constructor 4033 namespace { 4034 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT }; 4035 } 4036 static StructorCodegen getCodegenToUse(CodeGenModule &CGM, 4037 const CXXMethodDecl *MD) { 4038 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases) 4039 return StructorCodegen::Emit; 4040 4041 // The complete and base structors are not equivalent if there are any virtual 4042 // bases, so emit separate functions. 4043 if (MD->getParent()->getNumVBases()) 4044 return StructorCodegen::Emit; 4045 4046 GlobalDecl AliasDecl; 4047 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) { 4048 AliasDecl = GlobalDecl(DD, Dtor_Complete); 4049 } else { 4050 const auto *CD = cast<CXXConstructorDecl>(MD); 4051 AliasDecl = GlobalDecl(CD, Ctor_Complete); 4052 } 4053 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl); 4054 4055 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage)) 4056 return StructorCodegen::RAUW; 4057 4058 // FIXME: Should we allow available_externally aliases? 4059 if (!llvm::GlobalAlias::isValidLinkage(Linkage)) 4060 return StructorCodegen::RAUW; 4061 4062 if (llvm::GlobalValue::isWeakForLinker(Linkage)) { 4063 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5). 4064 if (CGM.getTarget().getTriple().isOSBinFormatELF() || 4065 CGM.getTarget().getTriple().isOSBinFormatWasm()) 4066 return StructorCodegen::COMDAT; 4067 return StructorCodegen::Emit; 4068 } 4069 4070 return StructorCodegen::Alias; 4071 } 4072 4073 static void emitConstructorDestructorAlias(CodeGenModule &CGM, 4074 GlobalDecl AliasDecl, 4075 GlobalDecl TargetDecl) { 4076 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl); 4077 4078 StringRef MangledName = CGM.getMangledName(AliasDecl); 4079 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName); 4080 if (Entry && !Entry->isDeclaration()) 4081 return; 4082 4083 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl)); 4084 4085 // Create the alias with no name. 4086 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee); 4087 4088 // Constructors and destructors are always unnamed_addr. 4089 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 4090 4091 // Switch any previous uses to the alias. 4092 if (Entry) { 4093 assert(Entry->getType() == Aliasee->getType() && 4094 "declaration exists with different type"); 4095 Alias->takeName(Entry); 4096 Entry->replaceAllUsesWith(Alias); 4097 Entry->eraseFromParent(); 4098 } else { 4099 Alias->setName(MangledName); 4100 } 4101 4102 // Finally, set up the alias with its proper name and attributes. 4103 CGM.SetCommonAttributes(AliasDecl, Alias); 4104 } 4105 4106 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) { 4107 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 4108 auto *CD = dyn_cast<CXXConstructorDecl>(MD); 4109 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD); 4110 4111 StructorCodegen CGType = getCodegenToUse(CGM, MD); 4112 4113 if (CD ? GD.getCtorType() == Ctor_Complete 4114 : GD.getDtorType() == Dtor_Complete) { 4115 GlobalDecl BaseDecl; 4116 if (CD) 4117 BaseDecl = GD.getWithCtorType(Ctor_Base); 4118 else 4119 BaseDecl = GD.getWithDtorType(Dtor_Base); 4120 4121 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) { 4122 emitConstructorDestructorAlias(CGM, GD, BaseDecl); 4123 return; 4124 } 4125 4126 if (CGType == StructorCodegen::RAUW) { 4127 StringRef MangledName = CGM.getMangledName(GD); 4128 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl); 4129 CGM.addReplacement(MangledName, Aliasee); 4130 return; 4131 } 4132 } 4133 4134 // The base destructor is equivalent to the base destructor of its 4135 // base class if there is exactly one non-virtual base class with a 4136 // non-trivial destructor, there are no fields with a non-trivial 4137 // destructor, and the body of the destructor is trivial. 4138 if (DD && GD.getDtorType() == Dtor_Base && 4139 CGType != StructorCodegen::COMDAT && 4140 !CGM.TryEmitBaseDestructorAsAlias(DD)) 4141 return; 4142 4143 // FIXME: The deleting destructor is equivalent to the selected operator 4144 // delete if: 4145 // * either the delete is a destroying operator delete or the destructor 4146 // would be trivial if it weren't virtual, 4147 // * the conversion from the 'this' parameter to the first parameter of the 4148 // destructor is equivalent to a bitcast, 4149 // * the destructor does not have an implicit "this" return, and 4150 // * the operator delete has the same calling convention and IR function type 4151 // as the destructor. 4152 // In such cases we should try to emit the deleting dtor as an alias to the 4153 // selected 'operator delete'. 4154 4155 llvm::Function *Fn = CGM.codegenCXXStructor(GD); 4156 4157 if (CGType == StructorCodegen::COMDAT) { 4158 SmallString<256> Buffer; 4159 llvm::raw_svector_ostream Out(Buffer); 4160 if (DD) 4161 getMangleContext().mangleCXXDtorComdat(DD, Out); 4162 else 4163 getMangleContext().mangleCXXCtorComdat(CD, Out); 4164 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str()); 4165 Fn->setComdat(C); 4166 } else { 4167 CGM.maybeSetTrivialComdat(*MD, *Fn); 4168 } 4169 } 4170 4171 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) { 4172 // void *__cxa_begin_catch(void*); 4173 llvm::FunctionType *FTy = llvm::FunctionType::get( 4174 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false); 4175 4176 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch"); 4177 } 4178 4179 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) { 4180 // void __cxa_end_catch(); 4181 llvm::FunctionType *FTy = 4182 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 4183 4184 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch"); 4185 } 4186 4187 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) { 4188 // void *__cxa_get_exception_ptr(void*); 4189 llvm::FunctionType *FTy = llvm::FunctionType::get( 4190 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false); 4191 4192 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr"); 4193 } 4194 4195 namespace { 4196 /// A cleanup to call __cxa_end_catch. In many cases, the caught 4197 /// exception type lets us state definitively that the thrown exception 4198 /// type does not have a destructor. In particular: 4199 /// - Catch-alls tell us nothing, so we have to conservatively 4200 /// assume that the thrown exception might have a destructor. 4201 /// - Catches by reference behave according to their base types. 4202 /// - Catches of non-record types will only trigger for exceptions 4203 /// of non-record types, which never have destructors. 4204 /// - Catches of record types can trigger for arbitrary subclasses 4205 /// of the caught type, so we have to assume the actual thrown 4206 /// exception type might have a throwing destructor, even if the 4207 /// caught type's destructor is trivial or nothrow. 4208 struct CallEndCatch final : EHScopeStack::Cleanup { 4209 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {} 4210 bool MightThrow; 4211 4212 void Emit(CodeGenFunction &CGF, Flags flags) override { 4213 if (!MightThrow) { 4214 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM)); 4215 return; 4216 } 4217 4218 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM)); 4219 } 4220 }; 4221 } 4222 4223 /// Emits a call to __cxa_begin_catch and enters a cleanup to call 4224 /// __cxa_end_catch. 4225 /// 4226 /// \param EndMightThrow - true if __cxa_end_catch might throw 4227 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF, 4228 llvm::Value *Exn, 4229 bool EndMightThrow) { 4230 llvm::CallInst *call = 4231 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn); 4232 4233 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow); 4234 4235 return call; 4236 } 4237 4238 /// A "special initializer" callback for initializing a catch 4239 /// parameter during catch initialization. 4240 static void InitCatchParam(CodeGenFunction &CGF, 4241 const VarDecl &CatchParam, 4242 Address ParamAddr, 4243 SourceLocation Loc) { 4244 // Load the exception from where the landing pad saved it. 4245 llvm::Value *Exn = CGF.getExceptionFromSlot(); 4246 4247 CanQualType CatchType = 4248 CGF.CGM.getContext().getCanonicalType(CatchParam.getType()); 4249 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType); 4250 4251 // If we're catching by reference, we can just cast the object 4252 // pointer to the appropriate pointer. 4253 if (isa<ReferenceType>(CatchType)) { 4254 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType(); 4255 bool EndCatchMightThrow = CaughtType->isRecordType(); 4256 4257 // __cxa_begin_catch returns the adjusted object pointer. 4258 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow); 4259 4260 // We have no way to tell the personality function that we're 4261 // catching by reference, so if we're catching a pointer, 4262 // __cxa_begin_catch will actually return that pointer by value. 4263 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) { 4264 QualType PointeeType = PT->getPointeeType(); 4265 4266 // When catching by reference, generally we should just ignore 4267 // this by-value pointer and use the exception object instead. 4268 if (!PointeeType->isRecordType()) { 4269 4270 // Exn points to the struct _Unwind_Exception header, which 4271 // we have to skip past in order to reach the exception data. 4272 unsigned HeaderSize = 4273 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException(); 4274 AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize); 4275 4276 // However, if we're catching a pointer-to-record type that won't 4277 // work, because the personality function might have adjusted 4278 // the pointer. There's actually no way for us to fully satisfy 4279 // the language/ABI contract here: we can't use Exn because it 4280 // might have the wrong adjustment, but we can't use the by-value 4281 // pointer because it's off by a level of abstraction. 4282 // 4283 // The current solution is to dump the adjusted pointer into an 4284 // alloca, which breaks language semantics (because changing the 4285 // pointer doesn't change the exception) but at least works. 4286 // The better solution would be to filter out non-exact matches 4287 // and rethrow them, but this is tricky because the rethrow 4288 // really needs to be catchable by other sites at this landing 4289 // pad. The best solution is to fix the personality function. 4290 } else { 4291 // Pull the pointer for the reference type off. 4292 llvm::Type *PtrTy = 4293 cast<llvm::PointerType>(LLVMCatchTy)->getElementType(); 4294 4295 // Create the temporary and write the adjusted pointer into it. 4296 Address ExnPtrTmp = 4297 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp"); 4298 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); 4299 CGF.Builder.CreateStore(Casted, ExnPtrTmp); 4300 4301 // Bind the reference to the temporary. 4302 AdjustedExn = ExnPtrTmp.getPointer(); 4303 } 4304 } 4305 4306 llvm::Value *ExnCast = 4307 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref"); 4308 CGF.Builder.CreateStore(ExnCast, ParamAddr); 4309 return; 4310 } 4311 4312 // Scalars and complexes. 4313 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType); 4314 if (TEK != TEK_Aggregate) { 4315 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false); 4316 4317 // If the catch type is a pointer type, __cxa_begin_catch returns 4318 // the pointer by value. 4319 if (CatchType->hasPointerRepresentation()) { 4320 llvm::Value *CastExn = 4321 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted"); 4322 4323 switch (CatchType.getQualifiers().getObjCLifetime()) { 4324 case Qualifiers::OCL_Strong: 4325 CastExn = CGF.EmitARCRetainNonBlock(CastExn); 4326 LLVM_FALLTHROUGH; 4327 4328 case Qualifiers::OCL_None: 4329 case Qualifiers::OCL_ExplicitNone: 4330 case Qualifiers::OCL_Autoreleasing: 4331 CGF.Builder.CreateStore(CastExn, ParamAddr); 4332 return; 4333 4334 case Qualifiers::OCL_Weak: 4335 CGF.EmitARCInitWeak(ParamAddr, CastExn); 4336 return; 4337 } 4338 llvm_unreachable("bad ownership qualifier!"); 4339 } 4340 4341 // Otherwise, it returns a pointer into the exception object. 4342 4343 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok 4344 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); 4345 4346 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType); 4347 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType); 4348 switch (TEK) { 4349 case TEK_Complex: 4350 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV, 4351 /*init*/ true); 4352 return; 4353 case TEK_Scalar: { 4354 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc); 4355 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true); 4356 return; 4357 } 4358 case TEK_Aggregate: 4359 llvm_unreachable("evaluation kind filtered out!"); 4360 } 4361 llvm_unreachable("bad evaluation kind"); 4362 } 4363 4364 assert(isa<RecordType>(CatchType) && "unexpected catch type!"); 4365 auto catchRD = CatchType->getAsCXXRecordDecl(); 4366 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD); 4367 4368 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok 4369 4370 // Check for a copy expression. If we don't have a copy expression, 4371 // that means a trivial copy is okay. 4372 const Expr *copyExpr = CatchParam.getInit(); 4373 if (!copyExpr) { 4374 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true); 4375 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy), 4376 caughtExnAlignment); 4377 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType); 4378 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType); 4379 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap); 4380 return; 4381 } 4382 4383 // We have to call __cxa_get_exception_ptr to get the adjusted 4384 // pointer before copying. 4385 llvm::CallInst *rawAdjustedExn = 4386 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn); 4387 4388 // Cast that to the appropriate type. 4389 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy), 4390 caughtExnAlignment); 4391 4392 // The copy expression is defined in terms of an OpaqueValueExpr. 4393 // Find it and map it to the adjusted expression. 4394 CodeGenFunction::OpaqueValueMapping 4395 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr), 4396 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType())); 4397 4398 // Call the copy ctor in a terminate scope. 4399 CGF.EHStack.pushTerminate(); 4400 4401 // Perform the copy construction. 4402 CGF.EmitAggExpr(copyExpr, 4403 AggValueSlot::forAddr(ParamAddr, Qualifiers(), 4404 AggValueSlot::IsNotDestructed, 4405 AggValueSlot::DoesNotNeedGCBarriers, 4406 AggValueSlot::IsNotAliased, 4407 AggValueSlot::DoesNotOverlap)); 4408 4409 // Leave the terminate scope. 4410 CGF.EHStack.popTerminate(); 4411 4412 // Undo the opaque value mapping. 4413 opaque.pop(); 4414 4415 // Finally we can call __cxa_begin_catch. 4416 CallBeginCatch(CGF, Exn, true); 4417 } 4418 4419 /// Begins a catch statement by initializing the catch variable and 4420 /// calling __cxa_begin_catch. 4421 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF, 4422 const CXXCatchStmt *S) { 4423 // We have to be very careful with the ordering of cleanups here: 4424 // C++ [except.throw]p4: 4425 // The destruction [of the exception temporary] occurs 4426 // immediately after the destruction of the object declared in 4427 // the exception-declaration in the handler. 4428 // 4429 // So the precise ordering is: 4430 // 1. Construct catch variable. 4431 // 2. __cxa_begin_catch 4432 // 3. Enter __cxa_end_catch cleanup 4433 // 4. Enter dtor cleanup 4434 // 4435 // We do this by using a slightly abnormal initialization process. 4436 // Delegation sequence: 4437 // - ExitCXXTryStmt opens a RunCleanupsScope 4438 // - EmitAutoVarAlloca creates the variable and debug info 4439 // - InitCatchParam initializes the variable from the exception 4440 // - CallBeginCatch calls __cxa_begin_catch 4441 // - CallBeginCatch enters the __cxa_end_catch cleanup 4442 // - EmitAutoVarCleanups enters the variable destructor cleanup 4443 // - EmitCXXTryStmt emits the code for the catch body 4444 // - EmitCXXTryStmt close the RunCleanupsScope 4445 4446 VarDecl *CatchParam = S->getExceptionDecl(); 4447 if (!CatchParam) { 4448 llvm::Value *Exn = CGF.getExceptionFromSlot(); 4449 CallBeginCatch(CGF, Exn, true); 4450 return; 4451 } 4452 4453 // Emit the local. 4454 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam); 4455 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc()); 4456 CGF.EmitAutoVarCleanups(var); 4457 } 4458 4459 /// Get or define the following function: 4460 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn 4461 /// This code is used only in C++. 4462 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) { 4463 llvm::FunctionType *fnTy = 4464 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false); 4465 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction( 4466 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true); 4467 llvm::Function *fn = 4468 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts()); 4469 if (fn->empty()) { 4470 fn->setDoesNotThrow(); 4471 fn->setDoesNotReturn(); 4472 4473 // What we really want is to massively penalize inlining without 4474 // forbidding it completely. The difference between that and 4475 // 'noinline' is negligible. 4476 fn->addFnAttr(llvm::Attribute::NoInline); 4477 4478 // Allow this function to be shared across translation units, but 4479 // we don't want it to turn into an exported symbol. 4480 fn->setLinkage(llvm::Function::LinkOnceODRLinkage); 4481 fn->setVisibility(llvm::Function::HiddenVisibility); 4482 if (CGM.supportsCOMDAT()) 4483 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName())); 4484 4485 // Set up the function. 4486 llvm::BasicBlock *entry = 4487 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn); 4488 CGBuilderTy builder(CGM, entry); 4489 4490 // Pull the exception pointer out of the parameter list. 4491 llvm::Value *exn = &*fn->arg_begin(); 4492 4493 // Call __cxa_begin_catch(exn). 4494 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn); 4495 catchCall->setDoesNotThrow(); 4496 catchCall->setCallingConv(CGM.getRuntimeCC()); 4497 4498 // Call std::terminate(). 4499 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn()); 4500 termCall->setDoesNotThrow(); 4501 termCall->setDoesNotReturn(); 4502 termCall->setCallingConv(CGM.getRuntimeCC()); 4503 4504 // std::terminate cannot return. 4505 builder.CreateUnreachable(); 4506 } 4507 return fnRef; 4508 } 4509 4510 llvm::CallInst * 4511 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF, 4512 llvm::Value *Exn) { 4513 // In C++, we want to call __cxa_begin_catch() before terminating. 4514 if (Exn) { 4515 assert(CGF.CGM.getLangOpts().CPlusPlus); 4516 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn); 4517 } 4518 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn()); 4519 } 4520 4521 std::pair<llvm::Value *, const CXXRecordDecl *> 4522 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This, 4523 const CXXRecordDecl *RD) { 4524 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD}; 4525 } 4526 4527 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF, 4528 const CXXCatchStmt *C) { 4529 if (CGF.getTarget().hasFeature("exception-handling")) 4530 CGF.EHStack.pushCleanup<CatchRetScope>( 4531 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad)); 4532 ItaniumCXXABI::emitBeginCatch(CGF, C); 4533 } 4534 4535 /// Register a global destructor as best as we know how. 4536 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 4537 llvm::FunctionCallee dtor, 4538 llvm::Constant *addr) { 4539 if (D.getTLSKind() != VarDecl::TLS_None) 4540 llvm::report_fatal_error("thread local storage not yet implemented on AIX"); 4541 4542 // Create __dtor function for the var decl. 4543 llvm::Function *dtorStub = CGF.createAtExitStub(D, dtor, addr); 4544 4545 // Register above __dtor with atexit(). 4546 CGF.registerGlobalDtorWithAtExit(dtorStub); 4547 4548 // Emit __finalize function to unregister __dtor and (as appropriate) call 4549 // __dtor. 4550 emitCXXStermFinalizer(D, dtorStub, addr); 4551 } 4552 4553 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub, 4554 llvm::Constant *addr) { 4555 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false); 4556 SmallString<256> FnName; 4557 { 4558 llvm::raw_svector_ostream Out(FnName); 4559 getMangleContext().mangleDynamicStermFinalizer(&D, Out); 4560 } 4561 4562 // Create the finalization action associated with a variable. 4563 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 4564 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction( 4565 FTy, FnName.str(), FI, D.getLocation()); 4566 4567 CodeGenFunction CGF(CGM); 4568 4569 CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI, 4570 FunctionArgList()); 4571 4572 // The unatexit subroutine unregisters __dtor functions that were previously 4573 // registered by the atexit subroutine. If the referenced function is found, 4574 // the unatexit returns a value of 0, meaning that the cleanup is still 4575 // pending (and we should call the __dtor function). 4576 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub); 4577 4578 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct"); 4579 4580 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call"); 4581 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end"); 4582 4583 // Check if unatexit returns a value of 0. If it does, jump to 4584 // DestructCallBlock, otherwise jump to EndBlock directly. 4585 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock); 4586 4587 CGF.EmitBlock(DestructCallBlock); 4588 4589 // Emit the call to dtorStub. 4590 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub); 4591 4592 // Make sure the call and the callee agree on calling convention. 4593 CI->setCallingConv(dtorStub->getCallingConv()); 4594 4595 CGF.EmitBlock(EndBlock); 4596 4597 CGF.FinishFunction(); 4598 4599 CGM.AddCXXStermFinalizerEntry(StermFinalizer); 4600 } 4601