1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This provides C++ code generation targeting the Itanium C++ ABI. The class 10 // in this file generates structures that follow the Itanium C++ ABI, which is 11 // documented at: 12 // https://itanium-cxx-abi.github.io/cxx-abi/abi.html 13 // https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html 14 // 15 // It also supports the closely-related ARM ABI, documented at: 16 // https://developer.arm.com/documentation/ihi0041/g/ 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "CGCXXABI.h" 21 #include "CGCleanup.h" 22 #include "CGRecordLayout.h" 23 #include "CGVTables.h" 24 #include "CodeGenFunction.h" 25 #include "CodeGenModule.h" 26 #include "TargetInfo.h" 27 #include "clang/AST/Attr.h" 28 #include "clang/AST/Mangle.h" 29 #include "clang/AST/StmtCXX.h" 30 #include "clang/AST/Type.h" 31 #include "clang/CodeGen/ConstantInitBuilder.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/GlobalValue.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/Intrinsics.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/Support/ScopedPrinter.h" 38 39 using namespace clang; 40 using namespace CodeGen; 41 42 namespace { 43 class ItaniumCXXABI : public CodeGen::CGCXXABI { 44 /// VTables - All the vtables which have been defined. 45 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables; 46 47 /// All the thread wrapper functions that have been used. 48 llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8> 49 ThreadWrappers; 50 51 protected: 52 bool UseARMMethodPtrABI; 53 bool UseARMGuardVarABI; 54 bool Use32BitVTableOffsetABI; 55 56 ItaniumMangleContext &getMangleContext() { 57 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext()); 58 } 59 60 public: 61 ItaniumCXXABI(CodeGen::CodeGenModule &CGM, 62 bool UseARMMethodPtrABI = false, 63 bool UseARMGuardVarABI = false) : 64 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI), 65 UseARMGuardVarABI(UseARMGuardVarABI), 66 Use32BitVTableOffsetABI(false) { } 67 68 bool classifyReturnType(CGFunctionInfo &FI) const override; 69 70 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override { 71 // If C++ prohibits us from making a copy, pass by address. 72 if (!RD->canPassInRegisters()) 73 return RAA_Indirect; 74 return RAA_Default; 75 } 76 77 bool isThisCompleteObject(GlobalDecl GD) const override { 78 // The Itanium ABI has separate complete-object vs. base-object 79 // variants of both constructors and destructors. 80 if (isa<CXXDestructorDecl>(GD.getDecl())) { 81 switch (GD.getDtorType()) { 82 case Dtor_Complete: 83 case Dtor_Deleting: 84 return true; 85 86 case Dtor_Base: 87 return false; 88 89 case Dtor_Comdat: 90 llvm_unreachable("emitting dtor comdat as function?"); 91 } 92 llvm_unreachable("bad dtor kind"); 93 } 94 if (isa<CXXConstructorDecl>(GD.getDecl())) { 95 switch (GD.getCtorType()) { 96 case Ctor_Complete: 97 return true; 98 99 case Ctor_Base: 100 return false; 101 102 case Ctor_CopyingClosure: 103 case Ctor_DefaultClosure: 104 llvm_unreachable("closure ctors in Itanium ABI?"); 105 106 case Ctor_Comdat: 107 llvm_unreachable("emitting ctor comdat as function?"); 108 } 109 llvm_unreachable("bad dtor kind"); 110 } 111 112 // No other kinds. 113 return false; 114 } 115 116 bool isZeroInitializable(const MemberPointerType *MPT) override; 117 118 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override; 119 120 CGCallee 121 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, 122 const Expr *E, 123 Address This, 124 llvm::Value *&ThisPtrForCall, 125 llvm::Value *MemFnPtr, 126 const MemberPointerType *MPT) override; 127 128 llvm::Value * 129 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E, 130 Address Base, 131 llvm::Value *MemPtr, 132 const MemberPointerType *MPT) override; 133 134 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF, 135 const CastExpr *E, 136 llvm::Value *Src) override; 137 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E, 138 llvm::Constant *Src) override; 139 140 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override; 141 142 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override; 143 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT, 144 CharUnits offset) override; 145 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override; 146 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD, 147 CharUnits ThisAdjustment); 148 149 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF, 150 llvm::Value *L, llvm::Value *R, 151 const MemberPointerType *MPT, 152 bool Inequality) override; 153 154 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 155 llvm::Value *Addr, 156 const MemberPointerType *MPT) override; 157 158 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, 159 Address Ptr, QualType ElementType, 160 const CXXDestructorDecl *Dtor) override; 161 162 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override; 163 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override; 164 165 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; 166 167 llvm::CallInst * 168 emitTerminateForUnexpectedException(CodeGenFunction &CGF, 169 llvm::Value *Exn) override; 170 171 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD); 172 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override; 173 CatchTypeInfo 174 getAddrOfCXXCatchHandlerType(QualType Ty, 175 QualType CatchHandlerType) override { 176 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0}; 177 } 178 179 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override; 180 void EmitBadTypeidCall(CodeGenFunction &CGF) override; 181 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy, 182 Address ThisPtr, 183 llvm::Type *StdTypeInfoPtrTy) override; 184 185 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, 186 QualType SrcRecordTy) override; 187 188 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value, 189 QualType SrcRecordTy, QualType DestTy, 190 QualType DestRecordTy, 191 llvm::BasicBlock *CastEnd) override; 192 193 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value, 194 QualType SrcRecordTy, 195 QualType DestTy) override; 196 197 bool EmitBadCastCall(CodeGenFunction &CGF) override; 198 199 llvm::Value * 200 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This, 201 const CXXRecordDecl *ClassDecl, 202 const CXXRecordDecl *BaseClassDecl) override; 203 204 void EmitCXXConstructors(const CXXConstructorDecl *D) override; 205 206 AddedStructorArgCounts 207 buildStructorSignature(GlobalDecl GD, 208 SmallVectorImpl<CanQualType> &ArgTys) override; 209 210 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, 211 CXXDtorType DT) const override { 212 // Itanium does not emit any destructor variant as an inline thunk. 213 // Delegating may occur as an optimization, but all variants are either 214 // emitted with external linkage or as linkonce if they are inline and used. 215 return false; 216 } 217 218 void EmitCXXDestructors(const CXXDestructorDecl *D) override; 219 220 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, 221 FunctionArgList &Params) override; 222 223 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override; 224 225 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF, 226 const CXXConstructorDecl *D, 227 CXXCtorType Type, 228 bool ForVirtualBase, 229 bool Delegating) override; 230 231 llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF, 232 const CXXDestructorDecl *DD, 233 CXXDtorType Type, 234 bool ForVirtualBase, 235 bool Delegating) override; 236 237 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD, 238 CXXDtorType Type, bool ForVirtualBase, 239 bool Delegating, Address This, 240 QualType ThisTy) override; 241 242 void emitVTableDefinitions(CodeGenVTables &CGVT, 243 const CXXRecordDecl *RD) override; 244 245 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF, 246 CodeGenFunction::VPtr Vptr) override; 247 248 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override { 249 return true; 250 } 251 252 llvm::Constant * 253 getVTableAddressPoint(BaseSubobject Base, 254 const CXXRecordDecl *VTableClass) override; 255 256 llvm::Value *getVTableAddressPointInStructor( 257 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, 258 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override; 259 260 llvm::Value *getVTableAddressPointInStructorWithVTT( 261 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, 262 BaseSubobject Base, const CXXRecordDecl *NearestVBase); 263 264 llvm::Constant * 265 getVTableAddressPointForConstExpr(BaseSubobject Base, 266 const CXXRecordDecl *VTableClass) override; 267 268 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD, 269 CharUnits VPtrOffset) override; 270 271 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, 272 Address This, llvm::Type *Ty, 273 SourceLocation Loc) override; 274 275 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF, 276 const CXXDestructorDecl *Dtor, 277 CXXDtorType DtorType, Address This, 278 DeleteOrMemberCallExpr E) override; 279 280 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override; 281 282 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override; 283 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const; 284 285 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD, 286 bool ReturnAdjustment) override { 287 // Allow inlining of thunks by emitting them with available_externally 288 // linkage together with vtables when needed. 289 if (ForVTable && !Thunk->hasLocalLinkage()) 290 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage); 291 CGM.setGVProperties(Thunk, GD); 292 } 293 294 bool exportThunk() override { return true; } 295 296 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This, 297 const ThisAdjustment &TA) override; 298 299 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret, 300 const ReturnAdjustment &RA) override; 301 302 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, 303 FunctionArgList &Args) const override { 304 assert(!Args.empty() && "expected the arglist to not be empty!"); 305 return Args.size() - 1; 306 } 307 308 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; } 309 StringRef GetDeletedVirtualCallName() override 310 { return "__cxa_deleted_virtual"; } 311 312 CharUnits getArrayCookieSizeImpl(QualType elementType) override; 313 Address InitializeArrayCookie(CodeGenFunction &CGF, 314 Address NewPtr, 315 llvm::Value *NumElements, 316 const CXXNewExpr *expr, 317 QualType ElementType) override; 318 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, 319 Address allocPtr, 320 CharUnits cookieSize) override; 321 322 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, 323 llvm::GlobalVariable *DeclPtr, 324 bool PerformInit) override; 325 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 326 llvm::FunctionCallee dtor, 327 llvm::Constant *addr) override; 328 329 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD, 330 llvm::Value *Val); 331 void EmitThreadLocalInitFuncs( 332 CodeGenModule &CGM, 333 ArrayRef<const VarDecl *> CXXThreadLocals, 334 ArrayRef<llvm::Function *> CXXThreadLocalInits, 335 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override; 336 337 /// Determine whether we will definitely emit this variable with a constant 338 /// initializer, either because the language semantics demand it or because 339 /// we know that the initializer is a constant. 340 bool isEmittedWithConstantInitializer(const VarDecl *VD) const { 341 VD = VD->getMostRecentDecl(); 342 if (VD->hasAttr<ConstInitAttr>()) 343 return true; 344 345 // All later checks examine the initializer specified on the variable. If 346 // the variable is weak, such examination would not be correct. 347 if (VD->isWeak() || VD->hasAttr<SelectAnyAttr>()) 348 return false; 349 350 const VarDecl *InitDecl = VD->getInitializingDeclaration(); 351 if (!InitDecl) 352 return false; 353 354 // If there's no initializer to run, this is constant initialization. 355 if (!InitDecl->hasInit()) 356 return true; 357 358 // If we have the only definition, we don't need a thread wrapper if we 359 // will emit the value as a constant. 360 if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD))) 361 return !VD->needsDestruction(getContext()) && InitDecl->evaluateValue(); 362 363 // Otherwise, we need a thread wrapper unless we know that every 364 // translation unit will emit the value as a constant. We rely on the 365 // variable being constant-initialized in every translation unit if it's 366 // constant-initialized in any translation unit, which isn't actually 367 // guaranteed by the standard but is necessary for sanity. 368 return InitDecl->hasConstantInitialization(); 369 } 370 371 bool usesThreadWrapperFunction(const VarDecl *VD) const override { 372 return !isEmittedWithConstantInitializer(VD) || 373 VD->needsDestruction(getContext()); 374 } 375 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, 376 QualType LValType) override; 377 378 bool NeedsVTTParameter(GlobalDecl GD) override; 379 380 /**************************** RTTI Uniqueness ******************************/ 381 382 protected: 383 /// Returns true if the ABI requires RTTI type_info objects to be unique 384 /// across a program. 385 virtual bool shouldRTTIBeUnique() const { return true; } 386 387 public: 388 /// What sort of unique-RTTI behavior should we use? 389 enum RTTIUniquenessKind { 390 /// We are guaranteeing, or need to guarantee, that the RTTI string 391 /// is unique. 392 RUK_Unique, 393 394 /// We are not guaranteeing uniqueness for the RTTI string, so we 395 /// can demote to hidden visibility but must use string comparisons. 396 RUK_NonUniqueHidden, 397 398 /// We are not guaranteeing uniqueness for the RTTI string, so we 399 /// have to use string comparisons, but we also have to emit it with 400 /// non-hidden visibility. 401 RUK_NonUniqueVisible 402 }; 403 404 /// Return the required visibility status for the given type and linkage in 405 /// the current ABI. 406 RTTIUniquenessKind 407 classifyRTTIUniqueness(QualType CanTy, 408 llvm::GlobalValue::LinkageTypes Linkage) const; 409 friend class ItaniumRTTIBuilder; 410 411 void emitCXXStructor(GlobalDecl GD) override; 412 413 std::pair<llvm::Value *, const CXXRecordDecl *> 414 LoadVTablePtr(CodeGenFunction &CGF, Address This, 415 const CXXRecordDecl *RD) override; 416 417 private: 418 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const { 419 const auto &VtableLayout = 420 CGM.getItaniumVTableContext().getVTableLayout(RD); 421 422 for (const auto &VtableComponent : VtableLayout.vtable_components()) { 423 // Skip empty slot. 424 if (!VtableComponent.isUsedFunctionPointerKind()) 425 continue; 426 427 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); 428 if (!Method->getCanonicalDecl()->isInlined()) 429 continue; 430 431 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl()); 432 auto *Entry = CGM.GetGlobalValue(Name); 433 // This checks if virtual inline function has already been emitted. 434 // Note that it is possible that this inline function would be emitted 435 // after trying to emit vtable speculatively. Because of this we do 436 // an extra pass after emitting all deferred vtables to find and emit 437 // these vtables opportunistically. 438 if (!Entry || Entry->isDeclaration()) 439 return true; 440 } 441 return false; 442 } 443 444 bool isVTableHidden(const CXXRecordDecl *RD) const { 445 const auto &VtableLayout = 446 CGM.getItaniumVTableContext().getVTableLayout(RD); 447 448 for (const auto &VtableComponent : VtableLayout.vtable_components()) { 449 if (VtableComponent.isRTTIKind()) { 450 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl(); 451 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility) 452 return true; 453 } else if (VtableComponent.isUsedFunctionPointerKind()) { 454 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); 455 if (Method->getVisibility() == Visibility::HiddenVisibility && 456 !Method->isDefined()) 457 return true; 458 } 459 } 460 return false; 461 } 462 }; 463 464 class ARMCXXABI : public ItaniumCXXABI { 465 public: 466 ARMCXXABI(CodeGen::CodeGenModule &CGM) : 467 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 468 /*UseARMGuardVarABI=*/true) {} 469 470 bool HasThisReturn(GlobalDecl GD) const override { 471 return (isa<CXXConstructorDecl>(GD.getDecl()) || ( 472 isa<CXXDestructorDecl>(GD.getDecl()) && 473 GD.getDtorType() != Dtor_Deleting)); 474 } 475 476 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, 477 QualType ResTy) override; 478 479 CharUnits getArrayCookieSizeImpl(QualType elementType) override; 480 Address InitializeArrayCookie(CodeGenFunction &CGF, 481 Address NewPtr, 482 llvm::Value *NumElements, 483 const CXXNewExpr *expr, 484 QualType ElementType) override; 485 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr, 486 CharUnits cookieSize) override; 487 }; 488 489 class AppleARM64CXXABI : public ARMCXXABI { 490 public: 491 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) { 492 Use32BitVTableOffsetABI = true; 493 } 494 495 // ARM64 libraries are prepared for non-unique RTTI. 496 bool shouldRTTIBeUnique() const override { return false; } 497 }; 498 499 class FuchsiaCXXABI final : public ItaniumCXXABI { 500 public: 501 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM) 502 : ItaniumCXXABI(CGM) {} 503 504 private: 505 bool HasThisReturn(GlobalDecl GD) const override { 506 return isa<CXXConstructorDecl>(GD.getDecl()) || 507 (isa<CXXDestructorDecl>(GD.getDecl()) && 508 GD.getDtorType() != Dtor_Deleting); 509 } 510 }; 511 512 class WebAssemblyCXXABI final : public ItaniumCXXABI { 513 public: 514 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM) 515 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 516 /*UseARMGuardVarABI=*/true) {} 517 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; 518 519 private: 520 bool HasThisReturn(GlobalDecl GD) const override { 521 return isa<CXXConstructorDecl>(GD.getDecl()) || 522 (isa<CXXDestructorDecl>(GD.getDecl()) && 523 GD.getDtorType() != Dtor_Deleting); 524 } 525 bool canCallMismatchedFunctionType() const override { return false; } 526 }; 527 528 class XLCXXABI final : public ItaniumCXXABI { 529 public: 530 explicit XLCXXABI(CodeGen::CodeGenModule &CGM) 531 : ItaniumCXXABI(CGM) {} 532 533 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 534 llvm::FunctionCallee dtor, 535 llvm::Constant *addr) override; 536 537 bool useSinitAndSterm() const override { return true; } 538 539 private: 540 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub, 541 llvm::Constant *addr); 542 }; 543 } 544 545 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) { 546 switch (CGM.getTarget().getCXXABI().getKind()) { 547 // For IR-generation purposes, there's no significant difference 548 // between the ARM and iOS ABIs. 549 case TargetCXXABI::GenericARM: 550 case TargetCXXABI::iOS: 551 case TargetCXXABI::WatchOS: 552 return new ARMCXXABI(CGM); 553 554 case TargetCXXABI::AppleARM64: 555 return new AppleARM64CXXABI(CGM); 556 557 case TargetCXXABI::Fuchsia: 558 return new FuchsiaCXXABI(CGM); 559 560 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't 561 // include the other 32-bit ARM oddities: constructor/destructor return values 562 // and array cookies. 563 case TargetCXXABI::GenericAArch64: 564 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 565 /*UseARMGuardVarABI=*/true); 566 567 case TargetCXXABI::GenericMIPS: 568 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true); 569 570 case TargetCXXABI::WebAssembly: 571 return new WebAssemblyCXXABI(CGM); 572 573 case TargetCXXABI::XL: 574 return new XLCXXABI(CGM); 575 576 case TargetCXXABI::GenericItanium: 577 if (CGM.getContext().getTargetInfo().getTriple().getArch() 578 == llvm::Triple::le32) { 579 // For PNaCl, use ARM-style method pointers so that PNaCl code 580 // does not assume anything about the alignment of function 581 // pointers. 582 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true); 583 } 584 return new ItaniumCXXABI(CGM); 585 586 case TargetCXXABI::Microsoft: 587 llvm_unreachable("Microsoft ABI is not Itanium-based"); 588 } 589 llvm_unreachable("bad ABI kind"); 590 } 591 592 llvm::Type * 593 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) { 594 if (MPT->isMemberDataPointer()) 595 return CGM.PtrDiffTy; 596 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy); 597 } 598 599 /// In the Itanium and ARM ABIs, method pointers have the form: 600 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr; 601 /// 602 /// In the Itanium ABI: 603 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero 604 /// - the this-adjustment is (memptr.adj) 605 /// - the virtual offset is (memptr.ptr - 1) 606 /// 607 /// In the ARM ABI: 608 /// - method pointers are virtual if (memptr.adj & 1) is nonzero 609 /// - the this-adjustment is (memptr.adj >> 1) 610 /// - the virtual offset is (memptr.ptr) 611 /// ARM uses 'adj' for the virtual flag because Thumb functions 612 /// may be only single-byte aligned. 613 /// 614 /// If the member is virtual, the adjusted 'this' pointer points 615 /// to a vtable pointer from which the virtual offset is applied. 616 /// 617 /// If the member is non-virtual, memptr.ptr is the address of 618 /// the function to call. 619 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer( 620 CodeGenFunction &CGF, const Expr *E, Address ThisAddr, 621 llvm::Value *&ThisPtrForCall, 622 llvm::Value *MemFnPtr, const MemberPointerType *MPT) { 623 CGBuilderTy &Builder = CGF.Builder; 624 625 const FunctionProtoType *FPT = 626 MPT->getPointeeType()->getAs<FunctionProtoType>(); 627 auto *RD = 628 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl()); 629 630 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType( 631 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr)); 632 633 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1); 634 635 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual"); 636 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual"); 637 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end"); 638 639 // Extract memptr.adj, which is in the second field. 640 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj"); 641 642 // Compute the true adjustment. 643 llvm::Value *Adj = RawAdj; 644 if (UseARMMethodPtrABI) 645 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted"); 646 647 // Apply the adjustment and cast back to the original struct type 648 // for consistency. 649 llvm::Value *This = ThisAddr.getPointer(); 650 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy()); 651 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj); 652 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted"); 653 ThisPtrForCall = This; 654 655 // Load the function pointer. 656 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr"); 657 658 // If the LSB in the function pointer is 1, the function pointer points to 659 // a virtual function. 660 llvm::Value *IsVirtual; 661 if (UseARMMethodPtrABI) 662 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1); 663 else 664 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1); 665 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual"); 666 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual); 667 668 // In the virtual path, the adjustment left 'This' pointing to the 669 // vtable of the correct base subobject. The "function pointer" is an 670 // offset within the vtable (+1 for the virtual flag on non-ARM). 671 CGF.EmitBlock(FnVirtual); 672 673 // Cast the adjusted this to a pointer to vtable pointer and load. 674 llvm::Type *VTableTy = Builder.getInt8PtrTy(); 675 CharUnits VTablePtrAlign = 676 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD, 677 CGF.getPointerAlign()); 678 llvm::Value *VTable = 679 CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD); 680 681 // Apply the offset. 682 // On ARM64, to reserve extra space in virtual member function pointers, 683 // we only pay attention to the low 32 bits of the offset. 684 llvm::Value *VTableOffset = FnAsInt; 685 if (!UseARMMethodPtrABI) 686 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1); 687 if (Use32BitVTableOffsetABI) { 688 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty); 689 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy); 690 } 691 692 // Check the address of the function pointer if CFI on member function 693 // pointers is enabled. 694 llvm::Constant *CheckSourceLocation; 695 llvm::Constant *CheckTypeDesc; 696 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) && 697 CGM.HasHiddenLTOVisibility(RD); 698 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination && 699 CGM.HasHiddenLTOVisibility(RD); 700 bool ShouldEmitWPDInfo = 701 CGM.getCodeGenOpts().WholeProgramVTables && 702 // Don't insert type tests if we are forcing public std visibility. 703 !CGM.HasLTOVisibilityPublicStd(RD); 704 llvm::Value *VirtualFn = nullptr; 705 706 { 707 CodeGenFunction::SanitizerScope SanScope(&CGF); 708 llvm::Value *TypeId = nullptr; 709 llvm::Value *CheckResult = nullptr; 710 711 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) { 712 // If doing CFI, VFE or WPD, we will need the metadata node to check 713 // against. 714 llvm::Metadata *MD = 715 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0)); 716 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD); 717 } 718 719 if (ShouldEmitVFEInfo) { 720 llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset); 721 722 // If doing VFE, load from the vtable with a type.checked.load intrinsic 723 // call. Note that we use the GEP to calculate the address to load from 724 // and pass 0 as the offset to the intrinsic. This is because every 725 // vtable slot of the correct type is marked with matching metadata, and 726 // we know that the load must be from one of these slots. 727 llvm::Value *CheckedLoad = Builder.CreateCall( 728 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load), 729 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId}); 730 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1); 731 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0); 732 VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(), 733 "memptr.virtualfn"); 734 } else { 735 // When not doing VFE, emit a normal load, as it allows more 736 // optimisations than type.checked.load. 737 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) { 738 llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset); 739 CheckResult = Builder.CreateCall( 740 CGM.getIntrinsic(llvm::Intrinsic::type_test), 741 {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId}); 742 } 743 744 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 745 VirtualFn = CGF.Builder.CreateCall( 746 CGM.getIntrinsic(llvm::Intrinsic::load_relative, 747 {VTableOffset->getType()}), 748 {VTable, VTableOffset}); 749 VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo()); 750 } else { 751 llvm::Value *VFPAddr = CGF.Builder.CreateGEP(VTable, VTableOffset); 752 VFPAddr = CGF.Builder.CreateBitCast( 753 VFPAddr, FTy->getPointerTo()->getPointerTo()); 754 VirtualFn = CGF.Builder.CreateAlignedLoad( 755 VFPAddr, CGF.getPointerAlign(), "memptr.virtualfn"); 756 } 757 } 758 assert(VirtualFn && "Virtual fuction pointer not created!"); 759 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo || 760 CheckResult) && 761 "Check result required but not created!"); 762 763 if (ShouldEmitCFICheck) { 764 // If doing CFI, emit the check. 765 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc()); 766 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0)); 767 llvm::Constant *StaticData[] = { 768 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall), 769 CheckSourceLocation, 770 CheckTypeDesc, 771 }; 772 773 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) { 774 CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail); 775 } else { 776 llvm::Value *AllVtables = llvm::MetadataAsValue::get( 777 CGM.getLLVMContext(), 778 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables")); 779 llvm::Value *ValidVtable = Builder.CreateCall( 780 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables}); 781 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall), 782 SanitizerHandler::CFICheckFail, StaticData, 783 {VTable, ValidVtable}); 784 } 785 786 FnVirtual = Builder.GetInsertBlock(); 787 } 788 } // End of sanitizer scope 789 790 CGF.EmitBranch(FnEnd); 791 792 // In the non-virtual path, the function pointer is actually a 793 // function pointer. 794 CGF.EmitBlock(FnNonVirtual); 795 llvm::Value *NonVirtualFn = 796 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn"); 797 798 // Check the function pointer if CFI on member function pointers is enabled. 799 if (ShouldEmitCFICheck) { 800 CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl(); 801 if (RD->hasDefinition()) { 802 CodeGenFunction::SanitizerScope SanScope(&CGF); 803 804 llvm::Constant *StaticData[] = { 805 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall), 806 CheckSourceLocation, 807 CheckTypeDesc, 808 }; 809 810 llvm::Value *Bit = Builder.getFalse(); 811 llvm::Value *CastedNonVirtualFn = 812 Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy); 813 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) { 814 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType( 815 getContext().getMemberPointerType( 816 MPT->getPointeeType(), 817 getContext().getRecordType(Base).getTypePtr())); 818 llvm::Value *TypeId = 819 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD); 820 821 llvm::Value *TypeTest = 822 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test), 823 {CastedNonVirtualFn, TypeId}); 824 Bit = Builder.CreateOr(Bit, TypeTest); 825 } 826 827 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall), 828 SanitizerHandler::CFICheckFail, StaticData, 829 {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)}); 830 831 FnNonVirtual = Builder.GetInsertBlock(); 832 } 833 } 834 835 // We're done. 836 CGF.EmitBlock(FnEnd); 837 llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2); 838 CalleePtr->addIncoming(VirtualFn, FnVirtual); 839 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual); 840 841 CGCallee Callee(FPT, CalleePtr); 842 return Callee; 843 } 844 845 /// Compute an l-value by applying the given pointer-to-member to a 846 /// base object. 847 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress( 848 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr, 849 const MemberPointerType *MPT) { 850 assert(MemPtr->getType() == CGM.PtrDiffTy); 851 852 CGBuilderTy &Builder = CGF.Builder; 853 854 // Cast to char*. 855 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty); 856 857 // Apply the offset, which we assume is non-null. 858 llvm::Value *Addr = 859 Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset"); 860 861 // Cast the address to the appropriate pointer type, adopting the 862 // address space of the base pointer. 863 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType()) 864 ->getPointerTo(Base.getAddressSpace()); 865 return Builder.CreateBitCast(Addr, PType); 866 } 867 868 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer 869 /// conversion. 870 /// 871 /// Bitcast conversions are always a no-op under Itanium. 872 /// 873 /// Obligatory offset/adjustment diagram: 874 /// <-- offset --> <-- adjustment --> 875 /// |--------------------------|----------------------|--------------------| 876 /// ^Derived address point ^Base address point ^Member address point 877 /// 878 /// So when converting a base member pointer to a derived member pointer, 879 /// we add the offset to the adjustment because the address point has 880 /// decreased; and conversely, when converting a derived MP to a base MP 881 /// we subtract the offset from the adjustment because the address point 882 /// has increased. 883 /// 884 /// The standard forbids (at compile time) conversion to and from 885 /// virtual bases, which is why we don't have to consider them here. 886 /// 887 /// The standard forbids (at run time) casting a derived MP to a base 888 /// MP when the derived MP does not point to a member of the base. 889 /// This is why -1 is a reasonable choice for null data member 890 /// pointers. 891 llvm::Value * 892 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF, 893 const CastExpr *E, 894 llvm::Value *src) { 895 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 896 E->getCastKind() == CK_BaseToDerivedMemberPointer || 897 E->getCastKind() == CK_ReinterpretMemberPointer); 898 899 // Under Itanium, reinterprets don't require any additional processing. 900 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 901 902 // Use constant emission if we can. 903 if (isa<llvm::Constant>(src)) 904 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src)); 905 906 llvm::Constant *adj = getMemberPointerAdjustment(E); 907 if (!adj) return src; 908 909 CGBuilderTy &Builder = CGF.Builder; 910 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 911 912 const MemberPointerType *destTy = 913 E->getType()->castAs<MemberPointerType>(); 914 915 // For member data pointers, this is just a matter of adding the 916 // offset if the source is non-null. 917 if (destTy->isMemberDataPointer()) { 918 llvm::Value *dst; 919 if (isDerivedToBase) 920 dst = Builder.CreateNSWSub(src, adj, "adj"); 921 else 922 dst = Builder.CreateNSWAdd(src, adj, "adj"); 923 924 // Null check. 925 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType()); 926 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull"); 927 return Builder.CreateSelect(isNull, src, dst); 928 } 929 930 // The this-adjustment is left-shifted by 1 on ARM. 931 if (UseARMMethodPtrABI) { 932 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 933 offset <<= 1; 934 adj = llvm::ConstantInt::get(adj->getType(), offset); 935 } 936 937 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj"); 938 llvm::Value *dstAdj; 939 if (isDerivedToBase) 940 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj"); 941 else 942 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj"); 943 944 return Builder.CreateInsertValue(src, dstAdj, 1); 945 } 946 947 llvm::Constant * 948 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E, 949 llvm::Constant *src) { 950 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 951 E->getCastKind() == CK_BaseToDerivedMemberPointer || 952 E->getCastKind() == CK_ReinterpretMemberPointer); 953 954 // Under Itanium, reinterprets don't require any additional processing. 955 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 956 957 // If the adjustment is trivial, we don't need to do anything. 958 llvm::Constant *adj = getMemberPointerAdjustment(E); 959 if (!adj) return src; 960 961 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 962 963 const MemberPointerType *destTy = 964 E->getType()->castAs<MemberPointerType>(); 965 966 // For member data pointers, this is just a matter of adding the 967 // offset if the source is non-null. 968 if (destTy->isMemberDataPointer()) { 969 // null maps to null. 970 if (src->isAllOnesValue()) return src; 971 972 if (isDerivedToBase) 973 return llvm::ConstantExpr::getNSWSub(src, adj); 974 else 975 return llvm::ConstantExpr::getNSWAdd(src, adj); 976 } 977 978 // The this-adjustment is left-shifted by 1 on ARM. 979 if (UseARMMethodPtrABI) { 980 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 981 offset <<= 1; 982 adj = llvm::ConstantInt::get(adj->getType(), offset); 983 } 984 985 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1); 986 llvm::Constant *dstAdj; 987 if (isDerivedToBase) 988 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj); 989 else 990 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj); 991 992 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1); 993 } 994 995 llvm::Constant * 996 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) { 997 // Itanium C++ ABI 2.3: 998 // A NULL pointer is represented as -1. 999 if (MPT->isMemberDataPointer()) 1000 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true); 1001 1002 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0); 1003 llvm::Constant *Values[2] = { Zero, Zero }; 1004 return llvm::ConstantStruct::getAnon(Values); 1005 } 1006 1007 llvm::Constant * 1008 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT, 1009 CharUnits offset) { 1010 // Itanium C++ ABI 2.3: 1011 // A pointer to data member is an offset from the base address of 1012 // the class object containing it, represented as a ptrdiff_t 1013 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()); 1014 } 1015 1016 llvm::Constant * 1017 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) { 1018 return BuildMemberPointer(MD, CharUnits::Zero()); 1019 } 1020 1021 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, 1022 CharUnits ThisAdjustment) { 1023 assert(MD->isInstance() && "Member function must not be static!"); 1024 1025 CodeGenTypes &Types = CGM.getTypes(); 1026 1027 // Get the function pointer (or index if this is a virtual function). 1028 llvm::Constant *MemPtr[2]; 1029 if (MD->isVirtual()) { 1030 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD); 1031 uint64_t VTableOffset; 1032 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1033 // Multiply by 4-byte relative offsets. 1034 VTableOffset = Index * 4; 1035 } else { 1036 const ASTContext &Context = getContext(); 1037 CharUnits PointerWidth = Context.toCharUnitsFromBits( 1038 Context.getTargetInfo().getPointerWidth(0)); 1039 VTableOffset = Index * PointerWidth.getQuantity(); 1040 } 1041 1042 if (UseARMMethodPtrABI) { 1043 // ARM C++ ABI 3.2.1: 1044 // This ABI specifies that adj contains twice the this 1045 // adjustment, plus 1 if the member function is virtual. The 1046 // least significant bit of adj then makes exactly the same 1047 // discrimination as the least significant bit of ptr does for 1048 // Itanium. 1049 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset); 1050 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 1051 2 * ThisAdjustment.getQuantity() + 1); 1052 } else { 1053 // Itanium C++ ABI 2.3: 1054 // For a virtual function, [the pointer field] is 1 plus the 1055 // virtual table offset (in bytes) of the function, 1056 // represented as a ptrdiff_t. 1057 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1); 1058 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 1059 ThisAdjustment.getQuantity()); 1060 } 1061 } else { 1062 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 1063 llvm::Type *Ty; 1064 // Check whether the function has a computable LLVM signature. 1065 if (Types.isFuncTypeConvertible(FPT)) { 1066 // The function has a computable LLVM signature; use the correct type. 1067 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD)); 1068 } else { 1069 // Use an arbitrary non-function type to tell GetAddrOfFunction that the 1070 // function type is incomplete. 1071 Ty = CGM.PtrDiffTy; 1072 } 1073 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty); 1074 1075 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy); 1076 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 1077 (UseARMMethodPtrABI ? 2 : 1) * 1078 ThisAdjustment.getQuantity()); 1079 } 1080 1081 return llvm::ConstantStruct::getAnon(MemPtr); 1082 } 1083 1084 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP, 1085 QualType MPType) { 1086 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>(); 1087 const ValueDecl *MPD = MP.getMemberPointerDecl(); 1088 if (!MPD) 1089 return EmitNullMemberPointer(MPT); 1090 1091 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP); 1092 1093 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) 1094 return BuildMemberPointer(MD, ThisAdjustment); 1095 1096 CharUnits FieldOffset = 1097 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD)); 1098 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset); 1099 } 1100 1101 /// The comparison algorithm is pretty easy: the member pointers are 1102 /// the same if they're either bitwise identical *or* both null. 1103 /// 1104 /// ARM is different here only because null-ness is more complicated. 1105 llvm::Value * 1106 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF, 1107 llvm::Value *L, 1108 llvm::Value *R, 1109 const MemberPointerType *MPT, 1110 bool Inequality) { 1111 CGBuilderTy &Builder = CGF.Builder; 1112 1113 llvm::ICmpInst::Predicate Eq; 1114 llvm::Instruction::BinaryOps And, Or; 1115 if (Inequality) { 1116 Eq = llvm::ICmpInst::ICMP_NE; 1117 And = llvm::Instruction::Or; 1118 Or = llvm::Instruction::And; 1119 } else { 1120 Eq = llvm::ICmpInst::ICMP_EQ; 1121 And = llvm::Instruction::And; 1122 Or = llvm::Instruction::Or; 1123 } 1124 1125 // Member data pointers are easy because there's a unique null 1126 // value, so it just comes down to bitwise equality. 1127 if (MPT->isMemberDataPointer()) 1128 return Builder.CreateICmp(Eq, L, R); 1129 1130 // For member function pointers, the tautologies are more complex. 1131 // The Itanium tautology is: 1132 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj)) 1133 // The ARM tautology is: 1134 // (L == R) <==> (L.ptr == R.ptr && 1135 // (L.adj == R.adj || 1136 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0))) 1137 // The inequality tautologies have exactly the same structure, except 1138 // applying De Morgan's laws. 1139 1140 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr"); 1141 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr"); 1142 1143 // This condition tests whether L.ptr == R.ptr. This must always be 1144 // true for equality to hold. 1145 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr"); 1146 1147 // This condition, together with the assumption that L.ptr == R.ptr, 1148 // tests whether the pointers are both null. ARM imposes an extra 1149 // condition. 1150 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType()); 1151 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null"); 1152 1153 // This condition tests whether L.adj == R.adj. If this isn't 1154 // true, the pointers are unequal unless they're both null. 1155 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj"); 1156 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj"); 1157 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj"); 1158 1159 // Null member function pointers on ARM clear the low bit of Adj, 1160 // so the zero condition has to check that neither low bit is set. 1161 if (UseARMMethodPtrABI) { 1162 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1); 1163 1164 // Compute (l.adj | r.adj) & 1 and test it against zero. 1165 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj"); 1166 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One); 1167 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero, 1168 "cmp.or.adj"); 1169 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero); 1170 } 1171 1172 // Tie together all our conditions. 1173 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq); 1174 Result = Builder.CreateBinOp(And, PtrEq, Result, 1175 Inequality ? "memptr.ne" : "memptr.eq"); 1176 return Result; 1177 } 1178 1179 llvm::Value * 1180 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 1181 llvm::Value *MemPtr, 1182 const MemberPointerType *MPT) { 1183 CGBuilderTy &Builder = CGF.Builder; 1184 1185 /// For member data pointers, this is just a check against -1. 1186 if (MPT->isMemberDataPointer()) { 1187 assert(MemPtr->getType() == CGM.PtrDiffTy); 1188 llvm::Value *NegativeOne = 1189 llvm::Constant::getAllOnesValue(MemPtr->getType()); 1190 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool"); 1191 } 1192 1193 // In Itanium, a member function pointer is not null if 'ptr' is not null. 1194 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr"); 1195 1196 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0); 1197 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool"); 1198 1199 // On ARM, a member function pointer is also non-null if the low bit of 'adj' 1200 // (the virtual bit) is set. 1201 if (UseARMMethodPtrABI) { 1202 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1); 1203 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj"); 1204 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit"); 1205 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero, 1206 "memptr.isvirtual"); 1207 Result = Builder.CreateOr(Result, IsVirtual); 1208 } 1209 1210 return Result; 1211 } 1212 1213 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const { 1214 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl(); 1215 if (!RD) 1216 return false; 1217 1218 // If C++ prohibits us from making a copy, return by address. 1219 if (!RD->canPassInRegisters()) { 1220 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType()); 1221 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 1222 return true; 1223 } 1224 return false; 1225 } 1226 1227 /// The Itanium ABI requires non-zero initialization only for data 1228 /// member pointers, for which '0' is a valid offset. 1229 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) { 1230 return MPT->isMemberFunctionPointer(); 1231 } 1232 1233 /// The Itanium ABI always places an offset to the complete object 1234 /// at entry -2 in the vtable. 1235 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF, 1236 const CXXDeleteExpr *DE, 1237 Address Ptr, 1238 QualType ElementType, 1239 const CXXDestructorDecl *Dtor) { 1240 bool UseGlobalDelete = DE->isGlobalDelete(); 1241 if (UseGlobalDelete) { 1242 // Derive the complete-object pointer, which is what we need 1243 // to pass to the deallocation function. 1244 1245 // Grab the vtable pointer as an intptr_t*. 1246 auto *ClassDecl = 1247 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl()); 1248 llvm::Value *VTable = 1249 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl); 1250 1251 // Track back to entry -2 and pull out the offset there. 1252 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64( 1253 VTable, -2, "complete-offset.ptr"); 1254 llvm::Value *Offset = 1255 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign()); 1256 1257 // Apply the offset. 1258 llvm::Value *CompletePtr = 1259 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy); 1260 CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset); 1261 1262 // If we're supposed to call the global delete, make sure we do so 1263 // even if the destructor throws. 1264 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr, 1265 ElementType); 1266 } 1267 1268 // FIXME: Provide a source location here even though there's no 1269 // CXXMemberCallExpr for dtor call. 1270 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting; 1271 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE); 1272 1273 if (UseGlobalDelete) 1274 CGF.PopCleanupBlock(); 1275 } 1276 1277 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) { 1278 // void __cxa_rethrow(); 1279 1280 llvm::FunctionType *FTy = 1281 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 1282 1283 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow"); 1284 1285 if (isNoReturn) 1286 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None); 1287 else 1288 CGF.EmitRuntimeCallOrInvoke(Fn); 1289 } 1290 1291 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) { 1292 // void *__cxa_allocate_exception(size_t thrown_size); 1293 1294 llvm::FunctionType *FTy = 1295 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false); 1296 1297 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception"); 1298 } 1299 1300 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) { 1301 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo, 1302 // void (*dest) (void *)); 1303 1304 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy }; 1305 llvm::FunctionType *FTy = 1306 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false); 1307 1308 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw"); 1309 } 1310 1311 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) { 1312 QualType ThrowType = E->getSubExpr()->getType(); 1313 // Now allocate the exception object. 1314 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType()); 1315 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity(); 1316 1317 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM); 1318 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall( 1319 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception"); 1320 1321 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment(); 1322 CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign)); 1323 1324 // Now throw the exception. 1325 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType, 1326 /*ForEH=*/true); 1327 1328 // The address of the destructor. If the exception type has a 1329 // trivial destructor (or isn't a record), we just pass null. 1330 llvm::Constant *Dtor = nullptr; 1331 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) { 1332 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl()); 1333 if (!Record->hasTrivialDestructor()) { 1334 CXXDestructorDecl *DtorD = Record->getDestructor(); 1335 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete)); 1336 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy); 1337 } 1338 } 1339 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy); 1340 1341 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor }; 1342 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args); 1343 } 1344 1345 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) { 1346 // void *__dynamic_cast(const void *sub, 1347 // const abi::__class_type_info *src, 1348 // const abi::__class_type_info *dst, 1349 // std::ptrdiff_t src2dst_offset); 1350 1351 llvm::Type *Int8PtrTy = CGF.Int8PtrTy; 1352 llvm::Type *PtrDiffTy = 1353 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1354 1355 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy }; 1356 1357 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false); 1358 1359 // Mark the function as nounwind readonly. 1360 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind, 1361 llvm::Attribute::ReadOnly }; 1362 llvm::AttributeList Attrs = llvm::AttributeList::get( 1363 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs); 1364 1365 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs); 1366 } 1367 1368 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) { 1369 // void __cxa_bad_cast(); 1370 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1371 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast"); 1372 } 1373 1374 /// Compute the src2dst_offset hint as described in the 1375 /// Itanium C++ ABI [2.9.7] 1376 static CharUnits computeOffsetHint(ASTContext &Context, 1377 const CXXRecordDecl *Src, 1378 const CXXRecordDecl *Dst) { 1379 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, 1380 /*DetectVirtual=*/false); 1381 1382 // If Dst is not derived from Src we can skip the whole computation below and 1383 // return that Src is not a public base of Dst. Record all inheritance paths. 1384 if (!Dst->isDerivedFrom(Src, Paths)) 1385 return CharUnits::fromQuantity(-2ULL); 1386 1387 unsigned NumPublicPaths = 0; 1388 CharUnits Offset; 1389 1390 // Now walk all possible inheritance paths. 1391 for (const CXXBasePath &Path : Paths) { 1392 if (Path.Access != AS_public) // Ignore non-public inheritance. 1393 continue; 1394 1395 ++NumPublicPaths; 1396 1397 for (const CXXBasePathElement &PathElement : Path) { 1398 // If the path contains a virtual base class we can't give any hint. 1399 // -1: no hint. 1400 if (PathElement.Base->isVirtual()) 1401 return CharUnits::fromQuantity(-1ULL); 1402 1403 if (NumPublicPaths > 1) // Won't use offsets, skip computation. 1404 continue; 1405 1406 // Accumulate the base class offsets. 1407 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class); 1408 Offset += L.getBaseClassOffset( 1409 PathElement.Base->getType()->getAsCXXRecordDecl()); 1410 } 1411 } 1412 1413 // -2: Src is not a public base of Dst. 1414 if (NumPublicPaths == 0) 1415 return CharUnits::fromQuantity(-2ULL); 1416 1417 // -3: Src is a multiple public base type but never a virtual base type. 1418 if (NumPublicPaths > 1) 1419 return CharUnits::fromQuantity(-3ULL); 1420 1421 // Otherwise, the Src type is a unique public nonvirtual base type of Dst. 1422 // Return the offset of Src from the origin of Dst. 1423 return Offset; 1424 } 1425 1426 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) { 1427 // void __cxa_bad_typeid(); 1428 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1429 1430 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); 1431 } 1432 1433 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref, 1434 QualType SrcRecordTy) { 1435 return IsDeref; 1436 } 1437 1438 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) { 1439 llvm::FunctionCallee Fn = getBadTypeidFn(CGF); 1440 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn); 1441 Call->setDoesNotReturn(); 1442 CGF.Builder.CreateUnreachable(); 1443 } 1444 1445 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF, 1446 QualType SrcRecordTy, 1447 Address ThisPtr, 1448 llvm::Type *StdTypeInfoPtrTy) { 1449 auto *ClassDecl = 1450 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl()); 1451 llvm::Value *Value = 1452 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl); 1453 1454 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1455 // Load the type info. 1456 Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy); 1457 Value = CGF.Builder.CreateCall( 1458 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}), 1459 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)}); 1460 1461 // Setup to dereference again since this is a proxy we accessed. 1462 Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo()); 1463 } else { 1464 // Load the type info. 1465 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL); 1466 } 1467 return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign()); 1468 } 1469 1470 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, 1471 QualType SrcRecordTy) { 1472 return SrcIsPtr; 1473 } 1474 1475 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall( 1476 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy, 1477 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) { 1478 llvm::Type *PtrDiffLTy = 1479 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1480 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1481 1482 llvm::Value *SrcRTTI = 1483 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType()); 1484 llvm::Value *DestRTTI = 1485 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType()); 1486 1487 // Compute the offset hint. 1488 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); 1489 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl(); 1490 llvm::Value *OffsetHint = llvm::ConstantInt::get( 1491 PtrDiffLTy, 1492 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity()); 1493 1494 // Emit the call to __dynamic_cast. 1495 llvm::Value *Value = ThisAddr.getPointer(); 1496 Value = CGF.EmitCastToVoidPtr(Value); 1497 1498 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint}; 1499 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args); 1500 Value = CGF.Builder.CreateBitCast(Value, DestLTy); 1501 1502 /// C++ [expr.dynamic.cast]p9: 1503 /// A failed cast to reference type throws std::bad_cast 1504 if (DestTy->isReferenceType()) { 1505 llvm::BasicBlock *BadCastBlock = 1506 CGF.createBasicBlock("dynamic_cast.bad_cast"); 1507 1508 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value); 1509 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd); 1510 1511 CGF.EmitBlock(BadCastBlock); 1512 EmitBadCastCall(CGF); 1513 } 1514 1515 return Value; 1516 } 1517 1518 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, 1519 Address ThisAddr, 1520 QualType SrcRecordTy, 1521 QualType DestTy) { 1522 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1523 auto *ClassDecl = 1524 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl()); 1525 llvm::Value *OffsetToTop; 1526 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1527 // Get the vtable pointer. 1528 llvm::Value *VTable = 1529 CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl); 1530 1531 // Get the offset-to-top from the vtable. 1532 OffsetToTop = 1533 CGF.Builder.CreateConstInBoundsGEP1_32(/*Type=*/nullptr, VTable, -2U); 1534 OffsetToTop = CGF.Builder.CreateAlignedLoad( 1535 OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top"); 1536 } else { 1537 llvm::Type *PtrDiffLTy = 1538 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1539 1540 // Get the vtable pointer. 1541 llvm::Value *VTable = 1542 CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl); 1543 1544 // Get the offset-to-top from the vtable. 1545 OffsetToTop = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL); 1546 OffsetToTop = CGF.Builder.CreateAlignedLoad( 1547 OffsetToTop, CGF.getPointerAlign(), "offset.to.top"); 1548 } 1549 // Finally, add the offset to the pointer. 1550 llvm::Value *Value = ThisAddr.getPointer(); 1551 Value = CGF.EmitCastToVoidPtr(Value); 1552 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop); 1553 return CGF.Builder.CreateBitCast(Value, DestLTy); 1554 } 1555 1556 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) { 1557 llvm::FunctionCallee Fn = getBadCastFn(CGF); 1558 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn); 1559 Call->setDoesNotReturn(); 1560 CGF.Builder.CreateUnreachable(); 1561 return true; 1562 } 1563 1564 llvm::Value * 1565 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF, 1566 Address This, 1567 const CXXRecordDecl *ClassDecl, 1568 const CXXRecordDecl *BaseClassDecl) { 1569 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl); 1570 CharUnits VBaseOffsetOffset = 1571 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl, 1572 BaseClassDecl); 1573 llvm::Value *VBaseOffsetPtr = 1574 CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(), 1575 "vbase.offset.ptr"); 1576 1577 llvm::Value *VBaseOffset; 1578 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1579 VBaseOffsetPtr = 1580 CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo()); 1581 VBaseOffset = CGF.Builder.CreateAlignedLoad( 1582 VBaseOffsetPtr, CharUnits::fromQuantity(4), "vbase.offset"); 1583 } else { 1584 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr, 1585 CGM.PtrDiffTy->getPointerTo()); 1586 VBaseOffset = CGF.Builder.CreateAlignedLoad( 1587 VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset"); 1588 } 1589 return VBaseOffset; 1590 } 1591 1592 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) { 1593 // Just make sure we're in sync with TargetCXXABI. 1594 assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); 1595 1596 // The constructor used for constructing this as a base class; 1597 // ignores virtual bases. 1598 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base)); 1599 1600 // The constructor used for constructing this as a complete class; 1601 // constructs the virtual bases, then calls the base constructor. 1602 if (!D->getParent()->isAbstract()) { 1603 // We don't need to emit the complete ctor if the class is abstract. 1604 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete)); 1605 } 1606 } 1607 1608 CGCXXABI::AddedStructorArgCounts 1609 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD, 1610 SmallVectorImpl<CanQualType> &ArgTys) { 1611 ASTContext &Context = getContext(); 1612 1613 // All parameters are already in place except VTT, which goes after 'this'. 1614 // These are Clang types, so we don't need to worry about sret yet. 1615 1616 // Check if we need to add a VTT parameter (which has type void **). 1617 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base 1618 : GD.getDtorType() == Dtor_Base) && 1619 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) { 1620 ArgTys.insert(ArgTys.begin() + 1, 1621 Context.getPointerType(Context.VoidPtrTy)); 1622 return AddedStructorArgCounts::prefix(1); 1623 } 1624 return AddedStructorArgCounts{}; 1625 } 1626 1627 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) { 1628 // The destructor used for destructing this as a base class; ignores 1629 // virtual bases. 1630 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base)); 1631 1632 // The destructor used for destructing this as a most-derived class; 1633 // call the base destructor and then destructs any virtual bases. 1634 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete)); 1635 1636 // The destructor in a virtual table is always a 'deleting' 1637 // destructor, which calls the complete destructor and then uses the 1638 // appropriate operator delete. 1639 if (D->isVirtual()) 1640 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting)); 1641 } 1642 1643 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF, 1644 QualType &ResTy, 1645 FunctionArgList &Params) { 1646 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl()); 1647 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)); 1648 1649 // Check if we need a VTT parameter as well. 1650 if (NeedsVTTParameter(CGF.CurGD)) { 1651 ASTContext &Context = getContext(); 1652 1653 // FIXME: avoid the fake decl 1654 QualType T = Context.getPointerType(Context.VoidPtrTy); 1655 auto *VTTDecl = ImplicitParamDecl::Create( 1656 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"), 1657 T, ImplicitParamDecl::CXXVTT); 1658 Params.insert(Params.begin() + 1, VTTDecl); 1659 getStructorImplicitParamDecl(CGF) = VTTDecl; 1660 } 1661 } 1662 1663 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { 1664 // Naked functions have no prolog. 1665 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>()) 1666 return; 1667 1668 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue 1669 /// adjustments are required, because they are all handled by thunks. 1670 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF)); 1671 1672 /// Initialize the 'vtt' slot if needed. 1673 if (getStructorImplicitParamDecl(CGF)) { 1674 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad( 1675 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt"); 1676 } 1677 1678 /// If this is a function that the ABI specifies returns 'this', initialize 1679 /// the return slot to 'this' at the start of the function. 1680 /// 1681 /// Unlike the setting of return types, this is done within the ABI 1682 /// implementation instead of by clients of CGCXXABI because: 1683 /// 1) getThisValue is currently protected 1684 /// 2) in theory, an ABI could implement 'this' returns some other way; 1685 /// HasThisReturn only specifies a contract, not the implementation 1686 if (HasThisReturn(CGF.CurGD)) 1687 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue); 1688 } 1689 1690 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs( 1691 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, 1692 bool ForVirtualBase, bool Delegating) { 1693 if (!NeedsVTTParameter(GlobalDecl(D, Type))) 1694 return AddedStructorArgs{}; 1695 1696 // Insert the implicit 'vtt' argument as the second argument. 1697 llvm::Value *VTT = 1698 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating); 1699 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); 1700 return AddedStructorArgs::prefix({{VTT, VTTTy}}); 1701 } 1702 1703 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam( 1704 CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, 1705 bool ForVirtualBase, bool Delegating) { 1706 GlobalDecl GD(DD, Type); 1707 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating); 1708 } 1709 1710 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF, 1711 const CXXDestructorDecl *DD, 1712 CXXDtorType Type, bool ForVirtualBase, 1713 bool Delegating, Address This, 1714 QualType ThisTy) { 1715 GlobalDecl GD(DD, Type); 1716 llvm::Value *VTT = 1717 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating); 1718 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); 1719 1720 CGCallee Callee; 1721 if (getContext().getLangOpts().AppleKext && 1722 Type != Dtor_Base && DD->isVirtual()) 1723 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent()); 1724 else 1725 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD); 1726 1727 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy, 1728 nullptr); 1729 } 1730 1731 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT, 1732 const CXXRecordDecl *RD) { 1733 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits()); 1734 if (VTable->hasInitializer()) 1735 return; 1736 1737 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext(); 1738 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD); 1739 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD); 1740 llvm::Constant *RTTI = 1741 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD)); 1742 1743 // Create and set the initializer. 1744 ConstantInitBuilder builder(CGM); 1745 auto components = builder.beginStruct(); 1746 CGVT.createVTableInitializer(components, VTLayout, RTTI, 1747 llvm::GlobalValue::isLocalLinkage(Linkage)); 1748 components.finishAndSetAsInitializer(VTable); 1749 1750 // Set the correct linkage. 1751 VTable->setLinkage(Linkage); 1752 1753 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker()) 1754 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName())); 1755 1756 // Set the right visibility. 1757 CGM.setGVProperties(VTable, RD); 1758 1759 // If this is the magic class __cxxabiv1::__fundamental_type_info, 1760 // we will emit the typeinfo for the fundamental types. This is the 1761 // same behaviour as GCC. 1762 const DeclContext *DC = RD->getDeclContext(); 1763 if (RD->getIdentifier() && 1764 RD->getIdentifier()->isStr("__fundamental_type_info") && 1765 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() && 1766 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") && 1767 DC->getParent()->isTranslationUnit()) 1768 EmitFundamentalRTTIDescriptors(RD); 1769 1770 if (!VTable->isDeclarationForLinker()) 1771 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout); 1772 1773 if (VTContext.isRelativeLayout() && !VTable->isDSOLocal()) 1774 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName()); 1775 } 1776 1777 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField( 1778 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) { 1779 if (Vptr.NearestVBase == nullptr) 1780 return false; 1781 return NeedsVTTParameter(CGF.CurGD); 1782 } 1783 1784 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor( 1785 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, 1786 const CXXRecordDecl *NearestVBase) { 1787 1788 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && 1789 NeedsVTTParameter(CGF.CurGD)) { 1790 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base, 1791 NearestVBase); 1792 } 1793 return getVTableAddressPoint(Base, VTableClass); 1794 } 1795 1796 llvm::Constant * 1797 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, 1798 const CXXRecordDecl *VTableClass) { 1799 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits()); 1800 1801 // Find the appropriate vtable within the vtable group, and the address point 1802 // within that vtable. 1803 VTableLayout::AddressPointLocation AddressPoint = 1804 CGM.getItaniumVTableContext() 1805 .getVTableLayout(VTableClass) 1806 .getAddressPoint(Base); 1807 llvm::Value *Indices[] = { 1808 llvm::ConstantInt::get(CGM.Int32Ty, 0), 1809 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex), 1810 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex), 1811 }; 1812 1813 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable, 1814 Indices, /*InBounds=*/true, 1815 /*InRangeIndex=*/1); 1816 } 1817 1818 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT( 1819 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, 1820 const CXXRecordDecl *NearestVBase) { 1821 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && 1822 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT"); 1823 1824 // Get the secondary vpointer index. 1825 uint64_t VirtualPointerIndex = 1826 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base); 1827 1828 /// Load the VTT. 1829 llvm::Value *VTT = CGF.LoadCXXVTT(); 1830 if (VirtualPointerIndex) 1831 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex); 1832 1833 // And load the address point from the VTT. 1834 return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign()); 1835 } 1836 1837 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr( 1838 BaseSubobject Base, const CXXRecordDecl *VTableClass) { 1839 return getVTableAddressPoint(Base, VTableClass); 1840 } 1841 1842 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, 1843 CharUnits VPtrOffset) { 1844 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets"); 1845 1846 llvm::GlobalVariable *&VTable = VTables[RD]; 1847 if (VTable) 1848 return VTable; 1849 1850 // Queue up this vtable for possible deferred emission. 1851 CGM.addDeferredVTable(RD); 1852 1853 SmallString<256> Name; 1854 llvm::raw_svector_ostream Out(Name); 1855 getMangleContext().mangleCXXVTable(RD, Out); 1856 1857 const VTableLayout &VTLayout = 1858 CGM.getItaniumVTableContext().getVTableLayout(RD); 1859 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout); 1860 1861 // Use pointer alignment for the vtable. Otherwise we would align them based 1862 // on the size of the initializer which doesn't make sense as only single 1863 // values are read. 1864 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout() 1865 ? 32 1866 : CGM.getTarget().getPointerAlign(0); 1867 1868 VTable = CGM.CreateOrReplaceCXXRuntimeVariable( 1869 Name, VTableType, llvm::GlobalValue::ExternalLinkage, 1870 getContext().toCharUnitsFromBits(PAlign).getQuantity()); 1871 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 1872 1873 CGM.setGVProperties(VTable, RD); 1874 1875 return VTable; 1876 } 1877 1878 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF, 1879 GlobalDecl GD, 1880 Address This, 1881 llvm::Type *Ty, 1882 SourceLocation Loc) { 1883 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl()); 1884 llvm::Value *VTable = CGF.GetVTablePtr( 1885 This, Ty->getPointerTo()->getPointerTo(), MethodDecl->getParent()); 1886 1887 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD); 1888 llvm::Value *VFunc; 1889 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) { 1890 VFunc = CGF.EmitVTableTypeCheckedLoad( 1891 MethodDecl->getParent(), VTable, 1892 VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8); 1893 } else { 1894 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc); 1895 1896 llvm::Value *VFuncLoad; 1897 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1898 VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy); 1899 llvm::Value *Load = CGF.Builder.CreateCall( 1900 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}), 1901 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)}); 1902 VFuncLoad = CGF.Builder.CreateBitCast(Load, Ty->getPointerTo()); 1903 } else { 1904 VTable = 1905 CGF.Builder.CreateBitCast(VTable, Ty->getPointerTo()->getPointerTo()); 1906 llvm::Value *VTableSlotPtr = 1907 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn"); 1908 VFuncLoad = 1909 CGF.Builder.CreateAlignedLoad(VTableSlotPtr, CGF.getPointerAlign()); 1910 } 1911 1912 // Add !invariant.load md to virtual function load to indicate that 1913 // function didn't change inside vtable. 1914 // It's safe to add it without -fstrict-vtable-pointers, but it would not 1915 // help in devirtualization because it will only matter if we will have 2 1916 // the same virtual function loads from the same vtable load, which won't 1917 // happen without enabled devirtualization with -fstrict-vtable-pointers. 1918 if (CGM.getCodeGenOpts().OptimizationLevel > 0 && 1919 CGM.getCodeGenOpts().StrictVTablePointers) { 1920 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) { 1921 VFuncLoadInstr->setMetadata( 1922 llvm::LLVMContext::MD_invariant_load, 1923 llvm::MDNode::get(CGM.getLLVMContext(), 1924 llvm::ArrayRef<llvm::Metadata *>())); 1925 } 1926 } 1927 VFunc = VFuncLoad; 1928 } 1929 1930 CGCallee Callee(GD, VFunc); 1931 return Callee; 1932 } 1933 1934 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall( 1935 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType, 1936 Address This, DeleteOrMemberCallExpr E) { 1937 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>(); 1938 auto *D = E.dyn_cast<const CXXDeleteExpr *>(); 1939 assert((CE != nullptr) ^ (D != nullptr)); 1940 assert(CE == nullptr || CE->arg_begin() == CE->arg_end()); 1941 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete); 1942 1943 GlobalDecl GD(Dtor, DtorType); 1944 const CGFunctionInfo *FInfo = 1945 &CGM.getTypes().arrangeCXXStructorDeclaration(GD); 1946 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo); 1947 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty); 1948 1949 QualType ThisTy; 1950 if (CE) { 1951 ThisTy = CE->getObjectType(); 1952 } else { 1953 ThisTy = D->getDestroyedType(); 1954 } 1955 1956 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr, 1957 QualType(), nullptr); 1958 return nullptr; 1959 } 1960 1961 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) { 1962 CodeGenVTables &VTables = CGM.getVTables(); 1963 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD); 1964 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD); 1965 } 1966 1967 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass( 1968 const CXXRecordDecl *RD) const { 1969 // We don't emit available_externally vtables if we are in -fapple-kext mode 1970 // because kext mode does not permit devirtualization. 1971 if (CGM.getLangOpts().AppleKext) 1972 return false; 1973 1974 // If the vtable is hidden then it is not safe to emit an available_externally 1975 // copy of vtable. 1976 if (isVTableHidden(RD)) 1977 return false; 1978 1979 if (CGM.getCodeGenOpts().ForceEmitVTables) 1980 return true; 1981 1982 // If we don't have any not emitted inline virtual function then we are safe 1983 // to emit an available_externally copy of vtable. 1984 // FIXME we can still emit a copy of the vtable if we 1985 // can emit definition of the inline functions. 1986 if (hasAnyUnusedVirtualInlineFunction(RD)) 1987 return false; 1988 1989 // For a class with virtual bases, we must also be able to speculatively 1990 // emit the VTT, because CodeGen doesn't have separate notions of "can emit 1991 // the vtable" and "can emit the VTT". For a base subobject, this means we 1992 // need to be able to emit non-virtual base vtables. 1993 if (RD->getNumVBases()) { 1994 for (const auto &B : RD->bases()) { 1995 auto *BRD = B.getType()->getAsCXXRecordDecl(); 1996 assert(BRD && "no class for base specifier"); 1997 if (B.isVirtual() || !BRD->isDynamicClass()) 1998 continue; 1999 if (!canSpeculativelyEmitVTableAsBaseClass(BRD)) 2000 return false; 2001 } 2002 } 2003 2004 return true; 2005 } 2006 2007 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const { 2008 if (!canSpeculativelyEmitVTableAsBaseClass(RD)) 2009 return false; 2010 2011 // For a complete-object vtable (or more specifically, for the VTT), we need 2012 // to be able to speculatively emit the vtables of all dynamic virtual bases. 2013 for (const auto &B : RD->vbases()) { 2014 auto *BRD = B.getType()->getAsCXXRecordDecl(); 2015 assert(BRD && "no class for base specifier"); 2016 if (!BRD->isDynamicClass()) 2017 continue; 2018 if (!canSpeculativelyEmitVTableAsBaseClass(BRD)) 2019 return false; 2020 } 2021 2022 return true; 2023 } 2024 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF, 2025 Address InitialPtr, 2026 int64_t NonVirtualAdjustment, 2027 int64_t VirtualAdjustment, 2028 bool IsReturnAdjustment) { 2029 if (!NonVirtualAdjustment && !VirtualAdjustment) 2030 return InitialPtr.getPointer(); 2031 2032 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty); 2033 2034 // In a base-to-derived cast, the non-virtual adjustment is applied first. 2035 if (NonVirtualAdjustment && !IsReturnAdjustment) { 2036 V = CGF.Builder.CreateConstInBoundsByteGEP(V, 2037 CharUnits::fromQuantity(NonVirtualAdjustment)); 2038 } 2039 2040 // Perform the virtual adjustment if we have one. 2041 llvm::Value *ResultPtr; 2042 if (VirtualAdjustment) { 2043 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy); 2044 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr); 2045 2046 llvm::Value *Offset; 2047 llvm::Value *OffsetPtr = 2048 CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment); 2049 if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) { 2050 // Load the adjustment offset from the vtable as a 32-bit int. 2051 OffsetPtr = 2052 CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo()); 2053 Offset = 2054 CGF.Builder.CreateAlignedLoad(OffsetPtr, CharUnits::fromQuantity(4)); 2055 } else { 2056 llvm::Type *PtrDiffTy = 2057 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 2058 2059 OffsetPtr = 2060 CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo()); 2061 2062 // Load the adjustment offset from the vtable. 2063 Offset = CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign()); 2064 } 2065 // Adjust our pointer. 2066 ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset); 2067 } else { 2068 ResultPtr = V.getPointer(); 2069 } 2070 2071 // In a derived-to-base conversion, the non-virtual adjustment is 2072 // applied second. 2073 if (NonVirtualAdjustment && IsReturnAdjustment) { 2074 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr, 2075 NonVirtualAdjustment); 2076 } 2077 2078 // Cast back to the original type. 2079 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType()); 2080 } 2081 2082 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF, 2083 Address This, 2084 const ThisAdjustment &TA) { 2085 return performTypeAdjustment(CGF, This, TA.NonVirtual, 2086 TA.Virtual.Itanium.VCallOffsetOffset, 2087 /*IsReturnAdjustment=*/false); 2088 } 2089 2090 llvm::Value * 2091 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret, 2092 const ReturnAdjustment &RA) { 2093 return performTypeAdjustment(CGF, Ret, RA.NonVirtual, 2094 RA.Virtual.Itanium.VBaseOffsetOffset, 2095 /*IsReturnAdjustment=*/true); 2096 } 2097 2098 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF, 2099 RValue RV, QualType ResultType) { 2100 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl())) 2101 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType); 2102 2103 // Destructor thunks in the ARM ABI have indeterminate results. 2104 llvm::Type *T = CGF.ReturnValue.getElementType(); 2105 RValue Undef = RValue::get(llvm::UndefValue::get(T)); 2106 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType); 2107 } 2108 2109 /************************** Array allocation cookies **************************/ 2110 2111 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) { 2112 // The array cookie is a size_t; pad that up to the element alignment. 2113 // The cookie is actually right-justified in that space. 2114 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes), 2115 CGM.getContext().getPreferredTypeAlignInChars(elementType)); 2116 } 2117 2118 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 2119 Address NewPtr, 2120 llvm::Value *NumElements, 2121 const CXXNewExpr *expr, 2122 QualType ElementType) { 2123 assert(requiresArrayCookie(expr)); 2124 2125 unsigned AS = NewPtr.getAddressSpace(); 2126 2127 ASTContext &Ctx = getContext(); 2128 CharUnits SizeSize = CGF.getSizeSize(); 2129 2130 // The size of the cookie. 2131 CharUnits CookieSize = 2132 std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType)); 2133 assert(CookieSize == getArrayCookieSizeImpl(ElementType)); 2134 2135 // Compute an offset to the cookie. 2136 Address CookiePtr = NewPtr; 2137 CharUnits CookieOffset = CookieSize - SizeSize; 2138 if (!CookieOffset.isZero()) 2139 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset); 2140 2141 // Write the number of elements into the appropriate slot. 2142 Address NumElementsPtr = 2143 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy); 2144 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr); 2145 2146 // Handle the array cookie specially in ASan. 2147 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 && 2148 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() || 2149 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) { 2150 // The store to the CookiePtr does not need to be instrumented. 2151 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI); 2152 llvm::FunctionType *FTy = 2153 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false); 2154 llvm::FunctionCallee F = 2155 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie"); 2156 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer()); 2157 } 2158 2159 // Finally, compute a pointer to the actual data buffer by skipping 2160 // over the cookie completely. 2161 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize); 2162 } 2163 2164 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 2165 Address allocPtr, 2166 CharUnits cookieSize) { 2167 // The element size is right-justified in the cookie. 2168 Address numElementsPtr = allocPtr; 2169 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize(); 2170 if (!numElementsOffset.isZero()) 2171 numElementsPtr = 2172 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset); 2173 2174 unsigned AS = allocPtr.getAddressSpace(); 2175 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy); 2176 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0) 2177 return CGF.Builder.CreateLoad(numElementsPtr); 2178 // In asan mode emit a function call instead of a regular load and let the 2179 // run-time deal with it: if the shadow is properly poisoned return the 2180 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs. 2181 // We can't simply ignore this load using nosanitize metadata because 2182 // the metadata may be lost. 2183 llvm::FunctionType *FTy = 2184 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false); 2185 llvm::FunctionCallee F = 2186 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie"); 2187 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer()); 2188 } 2189 2190 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) { 2191 // ARM says that the cookie is always: 2192 // struct array_cookie { 2193 // std::size_t element_size; // element_size != 0 2194 // std::size_t element_count; 2195 // }; 2196 // But the base ABI doesn't give anything an alignment greater than 2197 // 8, so we can dismiss this as typical ABI-author blindness to 2198 // actual language complexity and round up to the element alignment. 2199 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes), 2200 CGM.getContext().getTypeAlignInChars(elementType)); 2201 } 2202 2203 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 2204 Address newPtr, 2205 llvm::Value *numElements, 2206 const CXXNewExpr *expr, 2207 QualType elementType) { 2208 assert(requiresArrayCookie(expr)); 2209 2210 // The cookie is always at the start of the buffer. 2211 Address cookie = newPtr; 2212 2213 // The first element is the element size. 2214 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy); 2215 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy, 2216 getContext().getTypeSizeInChars(elementType).getQuantity()); 2217 CGF.Builder.CreateStore(elementSize, cookie); 2218 2219 // The second element is the element count. 2220 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1); 2221 CGF.Builder.CreateStore(numElements, cookie); 2222 2223 // Finally, compute a pointer to the actual data buffer by skipping 2224 // over the cookie completely. 2225 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType); 2226 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize); 2227 } 2228 2229 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 2230 Address allocPtr, 2231 CharUnits cookieSize) { 2232 // The number of elements is at offset sizeof(size_t) relative to 2233 // the allocated pointer. 2234 Address numElementsPtr 2235 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize()); 2236 2237 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy); 2238 return CGF.Builder.CreateLoad(numElementsPtr); 2239 } 2240 2241 /*********************** Static local initialization **************************/ 2242 2243 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM, 2244 llvm::PointerType *GuardPtrTy) { 2245 // int __cxa_guard_acquire(__guard *guard_object); 2246 llvm::FunctionType *FTy = 2247 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy), 2248 GuardPtrTy, /*isVarArg=*/false); 2249 return CGM.CreateRuntimeFunction( 2250 FTy, "__cxa_guard_acquire", 2251 llvm::AttributeList::get(CGM.getLLVMContext(), 2252 llvm::AttributeList::FunctionIndex, 2253 llvm::Attribute::NoUnwind)); 2254 } 2255 2256 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM, 2257 llvm::PointerType *GuardPtrTy) { 2258 // void __cxa_guard_release(__guard *guard_object); 2259 llvm::FunctionType *FTy = 2260 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 2261 return CGM.CreateRuntimeFunction( 2262 FTy, "__cxa_guard_release", 2263 llvm::AttributeList::get(CGM.getLLVMContext(), 2264 llvm::AttributeList::FunctionIndex, 2265 llvm::Attribute::NoUnwind)); 2266 } 2267 2268 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM, 2269 llvm::PointerType *GuardPtrTy) { 2270 // void __cxa_guard_abort(__guard *guard_object); 2271 llvm::FunctionType *FTy = 2272 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 2273 return CGM.CreateRuntimeFunction( 2274 FTy, "__cxa_guard_abort", 2275 llvm::AttributeList::get(CGM.getLLVMContext(), 2276 llvm::AttributeList::FunctionIndex, 2277 llvm::Attribute::NoUnwind)); 2278 } 2279 2280 namespace { 2281 struct CallGuardAbort final : EHScopeStack::Cleanup { 2282 llvm::GlobalVariable *Guard; 2283 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {} 2284 2285 void Emit(CodeGenFunction &CGF, Flags flags) override { 2286 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()), 2287 Guard); 2288 } 2289 }; 2290 } 2291 2292 /// The ARM code here follows the Itanium code closely enough that we 2293 /// just special-case it at particular places. 2294 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, 2295 const VarDecl &D, 2296 llvm::GlobalVariable *var, 2297 bool shouldPerformInit) { 2298 CGBuilderTy &Builder = CGF.Builder; 2299 2300 // Inline variables that weren't instantiated from variable templates have 2301 // partially-ordered initialization within their translation unit. 2302 bool NonTemplateInline = 2303 D.isInline() && 2304 !isTemplateInstantiation(D.getTemplateSpecializationKind()); 2305 2306 // We only need to use thread-safe statics for local non-TLS variables and 2307 // inline variables; other global initialization is always single-threaded 2308 // or (through lazy dynamic loading in multiple threads) unsequenced. 2309 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics && 2310 (D.isLocalVarDecl() || NonTemplateInline) && 2311 !D.getTLSKind(); 2312 2313 // If we have a global variable with internal linkage and thread-safe statics 2314 // are disabled, we can just let the guard variable be of type i8. 2315 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage(); 2316 2317 llvm::IntegerType *guardTy; 2318 CharUnits guardAlignment; 2319 if (useInt8GuardVariable) { 2320 guardTy = CGF.Int8Ty; 2321 guardAlignment = CharUnits::One(); 2322 } else { 2323 // Guard variables are 64 bits in the generic ABI and size width on ARM 2324 // (i.e. 32-bit on AArch32, 64-bit on AArch64). 2325 if (UseARMGuardVarABI) { 2326 guardTy = CGF.SizeTy; 2327 guardAlignment = CGF.getSizeAlign(); 2328 } else { 2329 guardTy = CGF.Int64Ty; 2330 guardAlignment = CharUnits::fromQuantity( 2331 CGM.getDataLayout().getABITypeAlignment(guardTy)); 2332 } 2333 } 2334 llvm::PointerType *guardPtrTy = guardTy->getPointerTo( 2335 CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace()); 2336 2337 // Create the guard variable if we don't already have it (as we 2338 // might if we're double-emitting this function body). 2339 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D); 2340 if (!guard) { 2341 // Mangle the name for the guard. 2342 SmallString<256> guardName; 2343 { 2344 llvm::raw_svector_ostream out(guardName); 2345 getMangleContext().mangleStaticGuardVariable(&D, out); 2346 } 2347 2348 // Create the guard variable with a zero-initializer. 2349 // Just absorb linkage and visibility from the guarded variable. 2350 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy, 2351 false, var->getLinkage(), 2352 llvm::ConstantInt::get(guardTy, 0), 2353 guardName.str()); 2354 guard->setDSOLocal(var->isDSOLocal()); 2355 guard->setVisibility(var->getVisibility()); 2356 // If the variable is thread-local, so is its guard variable. 2357 guard->setThreadLocalMode(var->getThreadLocalMode()); 2358 guard->setAlignment(guardAlignment.getAsAlign()); 2359 2360 // The ABI says: "It is suggested that it be emitted in the same COMDAT 2361 // group as the associated data object." In practice, this doesn't work for 2362 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm. 2363 llvm::Comdat *C = var->getComdat(); 2364 if (!D.isLocalVarDecl() && C && 2365 (CGM.getTarget().getTriple().isOSBinFormatELF() || 2366 CGM.getTarget().getTriple().isOSBinFormatWasm())) { 2367 guard->setComdat(C); 2368 // An inline variable's guard function is run from the per-TU 2369 // initialization function, not via a dedicated global ctor function, so 2370 // we can't put it in a comdat. 2371 if (!NonTemplateInline) 2372 CGF.CurFn->setComdat(C); 2373 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) { 2374 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName())); 2375 } 2376 2377 CGM.setStaticLocalDeclGuardAddress(&D, guard); 2378 } 2379 2380 Address guardAddr = Address(guard, guardAlignment); 2381 2382 // Test whether the variable has completed initialization. 2383 // 2384 // Itanium C++ ABI 3.3.2: 2385 // The following is pseudo-code showing how these functions can be used: 2386 // if (obj_guard.first_byte == 0) { 2387 // if ( __cxa_guard_acquire (&obj_guard) ) { 2388 // try { 2389 // ... initialize the object ...; 2390 // } catch (...) { 2391 // __cxa_guard_abort (&obj_guard); 2392 // throw; 2393 // } 2394 // ... queue object destructor with __cxa_atexit() ...; 2395 // __cxa_guard_release (&obj_guard); 2396 // } 2397 // } 2398 2399 // Load the first byte of the guard variable. 2400 llvm::LoadInst *LI = 2401 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty)); 2402 2403 // Itanium ABI: 2404 // An implementation supporting thread-safety on multiprocessor 2405 // systems must also guarantee that references to the initialized 2406 // object do not occur before the load of the initialization flag. 2407 // 2408 // In LLVM, we do this by marking the load Acquire. 2409 if (threadsafe) 2410 LI->setAtomic(llvm::AtomicOrdering::Acquire); 2411 2412 // For ARM, we should only check the first bit, rather than the entire byte: 2413 // 2414 // ARM C++ ABI 3.2.3.1: 2415 // To support the potential use of initialization guard variables 2416 // as semaphores that are the target of ARM SWP and LDREX/STREX 2417 // synchronizing instructions we define a static initialization 2418 // guard variable to be a 4-byte aligned, 4-byte word with the 2419 // following inline access protocol. 2420 // #define INITIALIZED 1 2421 // if ((obj_guard & INITIALIZED) != INITIALIZED) { 2422 // if (__cxa_guard_acquire(&obj_guard)) 2423 // ... 2424 // } 2425 // 2426 // and similarly for ARM64: 2427 // 2428 // ARM64 C++ ABI 3.2.2: 2429 // This ABI instead only specifies the value bit 0 of the static guard 2430 // variable; all other bits are platform defined. Bit 0 shall be 0 when the 2431 // variable is not initialized and 1 when it is. 2432 llvm::Value *V = 2433 (UseARMGuardVarABI && !useInt8GuardVariable) 2434 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1)) 2435 : LI; 2436 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized"); 2437 2438 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check"); 2439 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); 2440 2441 // Check if the first byte of the guard variable is zero. 2442 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock, 2443 CodeGenFunction::GuardKind::VariableGuard, &D); 2444 2445 CGF.EmitBlock(InitCheckBlock); 2446 2447 // Variables used when coping with thread-safe statics and exceptions. 2448 if (threadsafe) { 2449 // Call __cxa_guard_acquire. 2450 llvm::Value *V 2451 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard); 2452 2453 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); 2454 2455 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"), 2456 InitBlock, EndBlock); 2457 2458 // Call __cxa_guard_abort along the exceptional edge. 2459 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard); 2460 2461 CGF.EmitBlock(InitBlock); 2462 } 2463 2464 // Emit the initializer and add a global destructor if appropriate. 2465 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit); 2466 2467 if (threadsafe) { 2468 // Pop the guard-abort cleanup if we pushed one. 2469 CGF.PopCleanupBlock(); 2470 2471 // Call __cxa_guard_release. This cannot throw. 2472 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy), 2473 guardAddr.getPointer()); 2474 } else { 2475 Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr); 2476 } 2477 2478 CGF.EmitBlock(EndBlock); 2479 } 2480 2481 /// Register a global destructor using __cxa_atexit. 2482 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, 2483 llvm::FunctionCallee dtor, 2484 llvm::Constant *addr, bool TLS) { 2485 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) && 2486 "__cxa_atexit is disabled"); 2487 const char *Name = "__cxa_atexit"; 2488 if (TLS) { 2489 const llvm::Triple &T = CGF.getTarget().getTriple(); 2490 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit"; 2491 } 2492 2493 // We're assuming that the destructor function is something we can 2494 // reasonably call with the default CC. Go ahead and cast it to the 2495 // right prototype. 2496 llvm::Type *dtorTy = 2497 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo(); 2498 2499 // Preserve address space of addr. 2500 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0; 2501 auto AddrInt8PtrTy = 2502 AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy; 2503 2504 // Create a variable that binds the atexit to this shared object. 2505 llvm::Constant *handle = 2506 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle"); 2507 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts()); 2508 GV->setVisibility(llvm::GlobalValue::HiddenVisibility); 2509 2510 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); 2511 llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()}; 2512 llvm::FunctionType *atexitTy = 2513 llvm::FunctionType::get(CGF.IntTy, paramTys, false); 2514 2515 // Fetch the actual function. 2516 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name); 2517 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee())) 2518 fn->setDoesNotThrow(); 2519 2520 if (!addr) 2521 // addr is null when we are trying to register a dtor annotated with 2522 // __attribute__((destructor)) in a constructor function. Using null here is 2523 // okay because this argument is just passed back to the destructor 2524 // function. 2525 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy); 2526 2527 llvm::Value *args[] = {llvm::ConstantExpr::getBitCast( 2528 cast<llvm::Constant>(dtor.getCallee()), dtorTy), 2529 llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy), 2530 handle}; 2531 CGF.EmitNounwindRuntimeCall(atexit, args); 2532 } 2533 2534 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM, 2535 StringRef FnName) { 2536 // Create a function that registers/unregisters destructors that have the same 2537 // priority. 2538 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false); 2539 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction( 2540 FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation()); 2541 2542 return GlobalInitOrCleanupFn; 2543 } 2544 2545 static FunctionDecl * 2546 createGlobalInitOrCleanupFnDecl(CodeGen::CodeGenModule &CGM, StringRef FnName) { 2547 ASTContext &Ctx = CGM.getContext(); 2548 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, {}); 2549 return FunctionDecl::Create( 2550 Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 2551 &Ctx.Idents.get(FnName), FunctionTy, nullptr, SC_Static, false, false); 2552 } 2553 2554 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() { 2555 for (const auto &I : DtorsUsingAtExit) { 2556 int Priority = I.first; 2557 std::string GlobalCleanupFnName = 2558 std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority); 2559 2560 llvm::Function *GlobalCleanupFn = 2561 createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName); 2562 2563 FunctionDecl *GlobalCleanupFD = 2564 createGlobalInitOrCleanupFnDecl(*this, GlobalCleanupFnName); 2565 2566 CodeGenFunction CGF(*this); 2567 CGF.StartFunction(GlobalDecl(GlobalCleanupFD), getContext().VoidTy, 2568 GlobalCleanupFn, getTypes().arrangeNullaryFunction(), 2569 FunctionArgList(), SourceLocation(), SourceLocation()); 2570 2571 // Get the destructor function type, void(*)(void). 2572 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false); 2573 llvm::Type *dtorTy = dtorFuncTy->getPointerTo(); 2574 2575 // Destructor functions are run/unregistered in non-ascending 2576 // order of their priorities. 2577 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second; 2578 auto itv = Dtors.rbegin(); 2579 while (itv != Dtors.rend()) { 2580 llvm::Function *Dtor = *itv; 2581 2582 // We're assuming that the destructor function is something we can 2583 // reasonably call with the correct CC. Go ahead and cast it to the 2584 // right prototype. 2585 llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy); 2586 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor); 2587 llvm::Value *NeedsDestruct = 2588 CGF.Builder.CreateIsNull(V, "needs_destruct"); 2589 2590 llvm::BasicBlock *DestructCallBlock = 2591 CGF.createBasicBlock("destruct.call"); 2592 llvm::BasicBlock *EndBlock = CGF.createBasicBlock( 2593 (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end"); 2594 // Check if unatexit returns a value of 0. If it does, jump to 2595 // DestructCallBlock, otherwise jump to EndBlock directly. 2596 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock); 2597 2598 CGF.EmitBlock(DestructCallBlock); 2599 2600 // Emit the call to casted Dtor. 2601 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor); 2602 // Make sure the call and the callee agree on calling convention. 2603 CI->setCallingConv(Dtor->getCallingConv()); 2604 2605 CGF.EmitBlock(EndBlock); 2606 2607 itv++; 2608 } 2609 2610 CGF.FinishFunction(); 2611 AddGlobalDtor(GlobalCleanupFn, Priority); 2612 } 2613 } 2614 2615 void CodeGenModule::registerGlobalDtorsWithAtExit() { 2616 for (const auto &I : DtorsUsingAtExit) { 2617 int Priority = I.first; 2618 std::string GlobalInitFnName = 2619 std::string("__GLOBAL_init_") + llvm::to_string(Priority); 2620 llvm::Function *GlobalInitFn = 2621 createGlobalInitOrCleanupFn(*this, GlobalInitFnName); 2622 FunctionDecl *GlobalInitFD = 2623 createGlobalInitOrCleanupFnDecl(*this, GlobalInitFnName); 2624 2625 CodeGenFunction CGF(*this); 2626 CGF.StartFunction(GlobalDecl(GlobalInitFD), getContext().VoidTy, 2627 GlobalInitFn, getTypes().arrangeNullaryFunction(), 2628 FunctionArgList(), SourceLocation(), SourceLocation()); 2629 2630 // Since constructor functions are run in non-descending order of their 2631 // priorities, destructors are registered in non-descending order of their 2632 // priorities, and since destructor functions are run in the reverse order 2633 // of their registration, destructor functions are run in non-ascending 2634 // order of their priorities. 2635 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second; 2636 for (auto *Dtor : Dtors) { 2637 // Register the destructor function calling __cxa_atexit if it is 2638 // available. Otherwise fall back on calling atexit. 2639 if (getCodeGenOpts().CXAAtExit) { 2640 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false); 2641 } else { 2642 // Get the destructor function type, void(*)(void). 2643 llvm::Type *dtorTy = 2644 llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo(); 2645 2646 // We're assuming that the destructor function is something we can 2647 // reasonably call with the correct CC. Go ahead and cast it to the 2648 // right prototype. 2649 CGF.registerGlobalDtorWithAtExit( 2650 llvm::ConstantExpr::getBitCast(Dtor, dtorTy)); 2651 } 2652 } 2653 2654 CGF.FinishFunction(); 2655 AddGlobalCtor(GlobalInitFn, Priority, nullptr); 2656 } 2657 2658 if (getCXXABI().useSinitAndSterm()) 2659 unregisterGlobalDtorsWithUnAtExit(); 2660 } 2661 2662 /// Register a global destructor as best as we know how. 2663 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 2664 llvm::FunctionCallee dtor, 2665 llvm::Constant *addr) { 2666 if (D.isNoDestroy(CGM.getContext())) 2667 return; 2668 2669 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit 2670 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage 2671 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled. 2672 // We can always use __cxa_thread_atexit. 2673 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind()) 2674 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind()); 2675 2676 // In Apple kexts, we want to add a global destructor entry. 2677 // FIXME: shouldn't this be guarded by some variable? 2678 if (CGM.getLangOpts().AppleKext) { 2679 // Generate a global destructor entry. 2680 return CGM.AddCXXDtorEntry(dtor, addr); 2681 } 2682 2683 CGF.registerGlobalDtorWithAtExit(D, dtor, addr); 2684 } 2685 2686 static bool isThreadWrapperReplaceable(const VarDecl *VD, 2687 CodeGen::CodeGenModule &CGM) { 2688 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!"); 2689 // Darwin prefers to have references to thread local variables to go through 2690 // the thread wrapper instead of directly referencing the backing variable. 2691 return VD->getTLSKind() == VarDecl::TLS_Dynamic && 2692 CGM.getTarget().getTriple().isOSDarwin(); 2693 } 2694 2695 /// Get the appropriate linkage for the wrapper function. This is essentially 2696 /// the weak form of the variable's linkage; every translation unit which needs 2697 /// the wrapper emits a copy, and we want the linker to merge them. 2698 static llvm::GlobalValue::LinkageTypes 2699 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) { 2700 llvm::GlobalValue::LinkageTypes VarLinkage = 2701 CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false); 2702 2703 // For internal linkage variables, we don't need an external or weak wrapper. 2704 if (llvm::GlobalValue::isLocalLinkage(VarLinkage)) 2705 return VarLinkage; 2706 2707 // If the thread wrapper is replaceable, give it appropriate linkage. 2708 if (isThreadWrapperReplaceable(VD, CGM)) 2709 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) && 2710 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage)) 2711 return VarLinkage; 2712 return llvm::GlobalValue::WeakODRLinkage; 2713 } 2714 2715 llvm::Function * 2716 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD, 2717 llvm::Value *Val) { 2718 // Mangle the name for the thread_local wrapper function. 2719 SmallString<256> WrapperName; 2720 { 2721 llvm::raw_svector_ostream Out(WrapperName); 2722 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out); 2723 } 2724 2725 // FIXME: If VD is a definition, we should regenerate the function attributes 2726 // before returning. 2727 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName)) 2728 return cast<llvm::Function>(V); 2729 2730 QualType RetQT = VD->getType(); 2731 if (RetQT->isReferenceType()) 2732 RetQT = RetQT.getNonReferenceType(); 2733 2734 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration( 2735 getContext().getPointerType(RetQT), FunctionArgList()); 2736 2737 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI); 2738 llvm::Function *Wrapper = 2739 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM), 2740 WrapperName.str(), &CGM.getModule()); 2741 2742 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker()) 2743 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName())); 2744 2745 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper); 2746 2747 // Always resolve references to the wrapper at link time. 2748 if (!Wrapper->hasLocalLinkage()) 2749 if (!isThreadWrapperReplaceable(VD, CGM) || 2750 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) || 2751 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) || 2752 VD->getVisibility() == HiddenVisibility) 2753 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility); 2754 2755 if (isThreadWrapperReplaceable(VD, CGM)) { 2756 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2757 Wrapper->addFnAttr(llvm::Attribute::NoUnwind); 2758 } 2759 2760 ThreadWrappers.push_back({VD, Wrapper}); 2761 return Wrapper; 2762 } 2763 2764 void ItaniumCXXABI::EmitThreadLocalInitFuncs( 2765 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals, 2766 ArrayRef<llvm::Function *> CXXThreadLocalInits, 2767 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) { 2768 llvm::Function *InitFunc = nullptr; 2769 2770 // Separate initializers into those with ordered (or partially-ordered) 2771 // initialization and those with unordered initialization. 2772 llvm::SmallVector<llvm::Function *, 8> OrderedInits; 2773 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits; 2774 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) { 2775 if (isTemplateInstantiation( 2776 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind())) 2777 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] = 2778 CXXThreadLocalInits[I]; 2779 else 2780 OrderedInits.push_back(CXXThreadLocalInits[I]); 2781 } 2782 2783 if (!OrderedInits.empty()) { 2784 // Generate a guarded initialization function. 2785 llvm::FunctionType *FTy = 2786 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 2787 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 2788 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI, 2789 SourceLocation(), 2790 /*TLS=*/true); 2791 llvm::GlobalVariable *Guard = new llvm::GlobalVariable( 2792 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false, 2793 llvm::GlobalVariable::InternalLinkage, 2794 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard"); 2795 Guard->setThreadLocal(true); 2796 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel()); 2797 2798 CharUnits GuardAlign = CharUnits::One(); 2799 Guard->setAlignment(GuardAlign.getAsAlign()); 2800 2801 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc( 2802 InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign)); 2803 // On Darwin platforms, use CXX_FAST_TLS calling convention. 2804 if (CGM.getTarget().getTriple().isOSDarwin()) { 2805 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2806 InitFunc->addFnAttr(llvm::Attribute::NoUnwind); 2807 } 2808 } 2809 2810 // Create declarations for thread wrappers for all thread-local variables 2811 // with non-discardable definitions in this translation unit. 2812 for (const VarDecl *VD : CXXThreadLocals) { 2813 if (VD->hasDefinition() && 2814 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) { 2815 llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD)); 2816 getOrCreateThreadLocalWrapper(VD, GV); 2817 } 2818 } 2819 2820 // Emit all referenced thread wrappers. 2821 for (auto VDAndWrapper : ThreadWrappers) { 2822 const VarDecl *VD = VDAndWrapper.first; 2823 llvm::GlobalVariable *Var = 2824 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD))); 2825 llvm::Function *Wrapper = VDAndWrapper.second; 2826 2827 // Some targets require that all access to thread local variables go through 2828 // the thread wrapper. This means that we cannot attempt to create a thread 2829 // wrapper or a thread helper. 2830 if (!VD->hasDefinition()) { 2831 if (isThreadWrapperReplaceable(VD, CGM)) { 2832 Wrapper->setLinkage(llvm::Function::ExternalLinkage); 2833 continue; 2834 } 2835 2836 // If this isn't a TU in which this variable is defined, the thread 2837 // wrapper is discardable. 2838 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage) 2839 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage); 2840 } 2841 2842 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper); 2843 2844 // Mangle the name for the thread_local initialization function. 2845 SmallString<256> InitFnName; 2846 { 2847 llvm::raw_svector_ostream Out(InitFnName); 2848 getMangleContext().mangleItaniumThreadLocalInit(VD, Out); 2849 } 2850 2851 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false); 2852 2853 // If we have a definition for the variable, emit the initialization 2854 // function as an alias to the global Init function (if any). Otherwise, 2855 // produce a declaration of the initialization function. 2856 llvm::GlobalValue *Init = nullptr; 2857 bool InitIsInitFunc = false; 2858 bool HasConstantInitialization = false; 2859 if (!usesThreadWrapperFunction(VD)) { 2860 HasConstantInitialization = true; 2861 } else if (VD->hasDefinition()) { 2862 InitIsInitFunc = true; 2863 llvm::Function *InitFuncToUse = InitFunc; 2864 if (isTemplateInstantiation(VD->getTemplateSpecializationKind())) 2865 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl()); 2866 if (InitFuncToUse) 2867 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(), 2868 InitFuncToUse); 2869 } else { 2870 // Emit a weak global function referring to the initialization function. 2871 // This function will not exist if the TU defining the thread_local 2872 // variable in question does not need any dynamic initialization for 2873 // its thread_local variables. 2874 Init = llvm::Function::Create(InitFnTy, 2875 llvm::GlobalVariable::ExternalWeakLinkage, 2876 InitFnName.str(), &CGM.getModule()); 2877 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 2878 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, 2879 cast<llvm::Function>(Init)); 2880 } 2881 2882 if (Init) { 2883 Init->setVisibility(Var->getVisibility()); 2884 // Don't mark an extern_weak function DSO local on windows. 2885 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage()) 2886 Init->setDSOLocal(Var->isDSOLocal()); 2887 } 2888 2889 llvm::LLVMContext &Context = CGM.getModule().getContext(); 2890 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper); 2891 CGBuilderTy Builder(CGM, Entry); 2892 if (HasConstantInitialization) { 2893 // No dynamic initialization to invoke. 2894 } else if (InitIsInitFunc) { 2895 if (Init) { 2896 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init); 2897 if (isThreadWrapperReplaceable(VD, CGM)) { 2898 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2899 llvm::Function *Fn = 2900 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee()); 2901 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2902 } 2903 } 2904 } else { 2905 // Don't know whether we have an init function. Call it if it exists. 2906 llvm::Value *Have = Builder.CreateIsNotNull(Init); 2907 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 2908 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 2909 Builder.CreateCondBr(Have, InitBB, ExitBB); 2910 2911 Builder.SetInsertPoint(InitBB); 2912 Builder.CreateCall(InitFnTy, Init); 2913 Builder.CreateBr(ExitBB); 2914 2915 Builder.SetInsertPoint(ExitBB); 2916 } 2917 2918 // For a reference, the result of the wrapper function is a pointer to 2919 // the referenced object. 2920 llvm::Value *Val = Var; 2921 if (VD->getType()->isReferenceType()) { 2922 CharUnits Align = CGM.getContext().getDeclAlign(VD); 2923 Val = Builder.CreateAlignedLoad(Val, Align); 2924 } 2925 if (Val->getType() != Wrapper->getReturnType()) 2926 Val = Builder.CreatePointerBitCastOrAddrSpaceCast( 2927 Val, Wrapper->getReturnType(), ""); 2928 Builder.CreateRet(Val); 2929 } 2930 } 2931 2932 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, 2933 const VarDecl *VD, 2934 QualType LValType) { 2935 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD); 2936 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val); 2937 2938 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper); 2939 CallVal->setCallingConv(Wrapper->getCallingConv()); 2940 2941 LValue LV; 2942 if (VD->getType()->isReferenceType()) 2943 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType); 2944 else 2945 LV = CGF.MakeAddrLValue(CallVal, LValType, 2946 CGF.getContext().getDeclAlign(VD)); 2947 // FIXME: need setObjCGCLValueClass? 2948 return LV; 2949 } 2950 2951 /// Return whether the given global decl needs a VTT parameter, which it does 2952 /// if it's a base constructor or destructor with virtual bases. 2953 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { 2954 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 2955 2956 // We don't have any virtual bases, just return early. 2957 if (!MD->getParent()->getNumVBases()) 2958 return false; 2959 2960 // Check if we have a base constructor. 2961 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base) 2962 return true; 2963 2964 // Check if we have a base destructor. 2965 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) 2966 return true; 2967 2968 return false; 2969 } 2970 2971 namespace { 2972 class ItaniumRTTIBuilder { 2973 CodeGenModule &CGM; // Per-module state. 2974 llvm::LLVMContext &VMContext; 2975 const ItaniumCXXABI &CXXABI; // Per-module state. 2976 2977 /// Fields - The fields of the RTTI descriptor currently being built. 2978 SmallVector<llvm::Constant *, 16> Fields; 2979 2980 /// GetAddrOfTypeName - Returns the mangled type name of the given type. 2981 llvm::GlobalVariable * 2982 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage); 2983 2984 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI 2985 /// descriptor of the given type. 2986 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty); 2987 2988 /// BuildVTablePointer - Build the vtable pointer for the given type. 2989 void BuildVTablePointer(const Type *Ty); 2990 2991 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single 2992 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b. 2993 void BuildSIClassTypeInfo(const CXXRecordDecl *RD); 2994 2995 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for 2996 /// classes with bases that do not satisfy the abi::__si_class_type_info 2997 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. 2998 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD); 2999 3000 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used 3001 /// for pointer types. 3002 void BuildPointerTypeInfo(QualType PointeeTy); 3003 3004 /// BuildObjCObjectTypeInfo - Build the appropriate kind of 3005 /// type_info for an object type. 3006 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty); 3007 3008 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info 3009 /// struct, used for member pointer types. 3010 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty); 3011 3012 public: 3013 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI) 3014 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {} 3015 3016 // Pointer type info flags. 3017 enum { 3018 /// PTI_Const - Type has const qualifier. 3019 PTI_Const = 0x1, 3020 3021 /// PTI_Volatile - Type has volatile qualifier. 3022 PTI_Volatile = 0x2, 3023 3024 /// PTI_Restrict - Type has restrict qualifier. 3025 PTI_Restrict = 0x4, 3026 3027 /// PTI_Incomplete - Type is incomplete. 3028 PTI_Incomplete = 0x8, 3029 3030 /// PTI_ContainingClassIncomplete - Containing class is incomplete. 3031 /// (in pointer to member). 3032 PTI_ContainingClassIncomplete = 0x10, 3033 3034 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS). 3035 //PTI_TransactionSafe = 0x20, 3036 3037 /// PTI_Noexcept - Pointee is noexcept function (C++1z). 3038 PTI_Noexcept = 0x40, 3039 }; 3040 3041 // VMI type info flags. 3042 enum { 3043 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance. 3044 VMI_NonDiamondRepeat = 0x1, 3045 3046 /// VMI_DiamondShaped - Class is diamond shaped. 3047 VMI_DiamondShaped = 0x2 3048 }; 3049 3050 // Base class type info flags. 3051 enum { 3052 /// BCTI_Virtual - Base class is virtual. 3053 BCTI_Virtual = 0x1, 3054 3055 /// BCTI_Public - Base class is public. 3056 BCTI_Public = 0x2 3057 }; 3058 3059 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or 3060 /// link to an existing RTTI descriptor if one already exists. 3061 llvm::Constant *BuildTypeInfo(QualType Ty); 3062 3063 /// BuildTypeInfo - Build the RTTI type info struct for the given type. 3064 llvm::Constant *BuildTypeInfo( 3065 QualType Ty, 3066 llvm::GlobalVariable::LinkageTypes Linkage, 3067 llvm::GlobalValue::VisibilityTypes Visibility, 3068 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass); 3069 }; 3070 } 3071 3072 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName( 3073 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) { 3074 SmallString<256> Name; 3075 llvm::raw_svector_ostream Out(Name); 3076 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out); 3077 3078 // We know that the mangled name of the type starts at index 4 of the 3079 // mangled name of the typename, so we can just index into it in order to 3080 // get the mangled name of the type. 3081 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext, 3082 Name.substr(4)); 3083 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy); 3084 3085 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable( 3086 Name, Init->getType(), Linkage, Align.getQuantity()); 3087 3088 GV->setInitializer(Init); 3089 3090 return GV; 3091 } 3092 3093 llvm::Constant * 3094 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) { 3095 // Mangle the RTTI name. 3096 SmallString<256> Name; 3097 llvm::raw_svector_ostream Out(Name); 3098 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 3099 3100 // Look for an existing global. 3101 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name); 3102 3103 if (!GV) { 3104 // Create a new global variable. 3105 // Note for the future: If we would ever like to do deferred emission of 3106 // RTTI, check if emitting vtables opportunistically need any adjustment. 3107 3108 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy, 3109 /*isConstant=*/true, 3110 llvm::GlobalValue::ExternalLinkage, nullptr, 3111 Name); 3112 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 3113 CGM.setGVProperties(GV, RD); 3114 } 3115 3116 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy); 3117 } 3118 3119 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type 3120 /// info for that type is defined in the standard library. 3121 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) { 3122 // Itanium C++ ABI 2.9.2: 3123 // Basic type information (e.g. for "int", "bool", etc.) will be kept in 3124 // the run-time support library. Specifically, the run-time support 3125 // library should contain type_info objects for the types X, X* and 3126 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char, 3127 // unsigned char, signed char, short, unsigned short, int, unsigned int, 3128 // long, unsigned long, long long, unsigned long long, float, double, 3129 // long double, char16_t, char32_t, and the IEEE 754r decimal and 3130 // half-precision floating point types. 3131 // 3132 // GCC also emits RTTI for __int128. 3133 // FIXME: We do not emit RTTI information for decimal types here. 3134 3135 // Types added here must also be added to EmitFundamentalRTTIDescriptors. 3136 switch (Ty->getKind()) { 3137 case BuiltinType::Void: 3138 case BuiltinType::NullPtr: 3139 case BuiltinType::Bool: 3140 case BuiltinType::WChar_S: 3141 case BuiltinType::WChar_U: 3142 case BuiltinType::Char_U: 3143 case BuiltinType::Char_S: 3144 case BuiltinType::UChar: 3145 case BuiltinType::SChar: 3146 case BuiltinType::Short: 3147 case BuiltinType::UShort: 3148 case BuiltinType::Int: 3149 case BuiltinType::UInt: 3150 case BuiltinType::Long: 3151 case BuiltinType::ULong: 3152 case BuiltinType::LongLong: 3153 case BuiltinType::ULongLong: 3154 case BuiltinType::Half: 3155 case BuiltinType::Float: 3156 case BuiltinType::Double: 3157 case BuiltinType::LongDouble: 3158 case BuiltinType::Float16: 3159 case BuiltinType::Float128: 3160 case BuiltinType::Char8: 3161 case BuiltinType::Char16: 3162 case BuiltinType::Char32: 3163 case BuiltinType::Int128: 3164 case BuiltinType::UInt128: 3165 return true; 3166 3167 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 3168 case BuiltinType::Id: 3169 #include "clang/Basic/OpenCLImageTypes.def" 3170 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 3171 case BuiltinType::Id: 3172 #include "clang/Basic/OpenCLExtensionTypes.def" 3173 case BuiltinType::OCLSampler: 3174 case BuiltinType::OCLEvent: 3175 case BuiltinType::OCLClkEvent: 3176 case BuiltinType::OCLQueue: 3177 case BuiltinType::OCLReserveID: 3178 #define SVE_TYPE(Name, Id, SingletonId) \ 3179 case BuiltinType::Id: 3180 #include "clang/Basic/AArch64SVEACLETypes.def" 3181 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 3182 case BuiltinType::Id: 3183 #include "clang/Basic/PPCTypes.def" 3184 case BuiltinType::ShortAccum: 3185 case BuiltinType::Accum: 3186 case BuiltinType::LongAccum: 3187 case BuiltinType::UShortAccum: 3188 case BuiltinType::UAccum: 3189 case BuiltinType::ULongAccum: 3190 case BuiltinType::ShortFract: 3191 case BuiltinType::Fract: 3192 case BuiltinType::LongFract: 3193 case BuiltinType::UShortFract: 3194 case BuiltinType::UFract: 3195 case BuiltinType::ULongFract: 3196 case BuiltinType::SatShortAccum: 3197 case BuiltinType::SatAccum: 3198 case BuiltinType::SatLongAccum: 3199 case BuiltinType::SatUShortAccum: 3200 case BuiltinType::SatUAccum: 3201 case BuiltinType::SatULongAccum: 3202 case BuiltinType::SatShortFract: 3203 case BuiltinType::SatFract: 3204 case BuiltinType::SatLongFract: 3205 case BuiltinType::SatUShortFract: 3206 case BuiltinType::SatUFract: 3207 case BuiltinType::SatULongFract: 3208 case BuiltinType::BFloat16: 3209 return false; 3210 3211 case BuiltinType::Dependent: 3212 #define BUILTIN_TYPE(Id, SingletonId) 3213 #define PLACEHOLDER_TYPE(Id, SingletonId) \ 3214 case BuiltinType::Id: 3215 #include "clang/AST/BuiltinTypes.def" 3216 llvm_unreachable("asking for RRTI for a placeholder type!"); 3217 3218 case BuiltinType::ObjCId: 3219 case BuiltinType::ObjCClass: 3220 case BuiltinType::ObjCSel: 3221 llvm_unreachable("FIXME: Objective-C types are unsupported!"); 3222 } 3223 3224 llvm_unreachable("Invalid BuiltinType Kind!"); 3225 } 3226 3227 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) { 3228 QualType PointeeTy = PointerTy->getPointeeType(); 3229 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy); 3230 if (!BuiltinTy) 3231 return false; 3232 3233 // Check the qualifiers. 3234 Qualifiers Quals = PointeeTy.getQualifiers(); 3235 Quals.removeConst(); 3236 3237 if (!Quals.empty()) 3238 return false; 3239 3240 return TypeInfoIsInStandardLibrary(BuiltinTy); 3241 } 3242 3243 /// IsStandardLibraryRTTIDescriptor - Returns whether the type 3244 /// information for the given type exists in the standard library. 3245 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) { 3246 // Type info for builtin types is defined in the standard library. 3247 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty)) 3248 return TypeInfoIsInStandardLibrary(BuiltinTy); 3249 3250 // Type info for some pointer types to builtin types is defined in the 3251 // standard library. 3252 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty)) 3253 return TypeInfoIsInStandardLibrary(PointerTy); 3254 3255 return false; 3256 } 3257 3258 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for 3259 /// the given type exists somewhere else, and that we should not emit the type 3260 /// information in this translation unit. Assumes that it is not a 3261 /// standard-library type. 3262 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, 3263 QualType Ty) { 3264 ASTContext &Context = CGM.getContext(); 3265 3266 // If RTTI is disabled, assume it might be disabled in the 3267 // translation unit that defines any potential key function, too. 3268 if (!Context.getLangOpts().RTTI) return false; 3269 3270 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 3271 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); 3272 if (!RD->hasDefinition()) 3273 return false; 3274 3275 if (!RD->isDynamicClass()) 3276 return false; 3277 3278 // FIXME: this may need to be reconsidered if the key function 3279 // changes. 3280 // N.B. We must always emit the RTTI data ourselves if there exists a key 3281 // function. 3282 bool IsDLLImport = RD->hasAttr<DLLImportAttr>(); 3283 3284 // Don't import the RTTI but emit it locally. 3285 if (CGM.getTriple().isWindowsGNUEnvironment()) 3286 return false; 3287 3288 if (CGM.getVTables().isVTableExternal(RD)) 3289 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment() 3290 ? false 3291 : true; 3292 3293 if (IsDLLImport) 3294 return true; 3295 } 3296 3297 return false; 3298 } 3299 3300 /// IsIncompleteClassType - Returns whether the given record type is incomplete. 3301 static bool IsIncompleteClassType(const RecordType *RecordTy) { 3302 return !RecordTy->getDecl()->isCompleteDefinition(); 3303 } 3304 3305 /// ContainsIncompleteClassType - Returns whether the given type contains an 3306 /// incomplete class type. This is true if 3307 /// 3308 /// * The given type is an incomplete class type. 3309 /// * The given type is a pointer type whose pointee type contains an 3310 /// incomplete class type. 3311 /// * The given type is a member pointer type whose class is an incomplete 3312 /// class type. 3313 /// * The given type is a member pointer type whoise pointee type contains an 3314 /// incomplete class type. 3315 /// is an indirect or direct pointer to an incomplete class type. 3316 static bool ContainsIncompleteClassType(QualType Ty) { 3317 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 3318 if (IsIncompleteClassType(RecordTy)) 3319 return true; 3320 } 3321 3322 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty)) 3323 return ContainsIncompleteClassType(PointerTy->getPointeeType()); 3324 3325 if (const MemberPointerType *MemberPointerTy = 3326 dyn_cast<MemberPointerType>(Ty)) { 3327 // Check if the class type is incomplete. 3328 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass()); 3329 if (IsIncompleteClassType(ClassType)) 3330 return true; 3331 3332 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType()); 3333 } 3334 3335 return false; 3336 } 3337 3338 // CanUseSingleInheritance - Return whether the given record decl has a "single, 3339 // public, non-virtual base at offset zero (i.e. the derived class is dynamic 3340 // iff the base is)", according to Itanium C++ ABI, 2.95p6b. 3341 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) { 3342 // Check the number of bases. 3343 if (RD->getNumBases() != 1) 3344 return false; 3345 3346 // Get the base. 3347 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(); 3348 3349 // Check that the base is not virtual. 3350 if (Base->isVirtual()) 3351 return false; 3352 3353 // Check that the base is public. 3354 if (Base->getAccessSpecifier() != AS_public) 3355 return false; 3356 3357 // Check that the class is dynamic iff the base is. 3358 auto *BaseDecl = 3359 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl()); 3360 if (!BaseDecl->isEmpty() && 3361 BaseDecl->isDynamicClass() != RD->isDynamicClass()) 3362 return false; 3363 3364 return true; 3365 } 3366 3367 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) { 3368 // abi::__class_type_info. 3369 static const char * const ClassTypeInfo = 3370 "_ZTVN10__cxxabiv117__class_type_infoE"; 3371 // abi::__si_class_type_info. 3372 static const char * const SIClassTypeInfo = 3373 "_ZTVN10__cxxabiv120__si_class_type_infoE"; 3374 // abi::__vmi_class_type_info. 3375 static const char * const VMIClassTypeInfo = 3376 "_ZTVN10__cxxabiv121__vmi_class_type_infoE"; 3377 3378 const char *VTableName = nullptr; 3379 3380 switch (Ty->getTypeClass()) { 3381 #define TYPE(Class, Base) 3382 #define ABSTRACT_TYPE(Class, Base) 3383 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 3384 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3385 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 3386 #include "clang/AST/TypeNodes.inc" 3387 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 3388 3389 case Type::LValueReference: 3390 case Type::RValueReference: 3391 llvm_unreachable("References shouldn't get here"); 3392 3393 case Type::Auto: 3394 case Type::DeducedTemplateSpecialization: 3395 llvm_unreachable("Undeduced type shouldn't get here"); 3396 3397 case Type::Pipe: 3398 llvm_unreachable("Pipe types shouldn't get here"); 3399 3400 case Type::Builtin: 3401 case Type::ExtInt: 3402 // GCC treats vector and complex types as fundamental types. 3403 case Type::Vector: 3404 case Type::ExtVector: 3405 case Type::ConstantMatrix: 3406 case Type::Complex: 3407 case Type::Atomic: 3408 // FIXME: GCC treats block pointers as fundamental types?! 3409 case Type::BlockPointer: 3410 // abi::__fundamental_type_info. 3411 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE"; 3412 break; 3413 3414 case Type::ConstantArray: 3415 case Type::IncompleteArray: 3416 case Type::VariableArray: 3417 // abi::__array_type_info. 3418 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE"; 3419 break; 3420 3421 case Type::FunctionNoProto: 3422 case Type::FunctionProto: 3423 // abi::__function_type_info. 3424 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE"; 3425 break; 3426 3427 case Type::Enum: 3428 // abi::__enum_type_info. 3429 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE"; 3430 break; 3431 3432 case Type::Record: { 3433 const CXXRecordDecl *RD = 3434 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl()); 3435 3436 if (!RD->hasDefinition() || !RD->getNumBases()) { 3437 VTableName = ClassTypeInfo; 3438 } else if (CanUseSingleInheritance(RD)) { 3439 VTableName = SIClassTypeInfo; 3440 } else { 3441 VTableName = VMIClassTypeInfo; 3442 } 3443 3444 break; 3445 } 3446 3447 case Type::ObjCObject: 3448 // Ignore protocol qualifiers. 3449 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr(); 3450 3451 // Handle id and Class. 3452 if (isa<BuiltinType>(Ty)) { 3453 VTableName = ClassTypeInfo; 3454 break; 3455 } 3456 3457 assert(isa<ObjCInterfaceType>(Ty)); 3458 LLVM_FALLTHROUGH; 3459 3460 case Type::ObjCInterface: 3461 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) { 3462 VTableName = SIClassTypeInfo; 3463 } else { 3464 VTableName = ClassTypeInfo; 3465 } 3466 break; 3467 3468 case Type::ObjCObjectPointer: 3469 case Type::Pointer: 3470 // abi::__pointer_type_info. 3471 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE"; 3472 break; 3473 3474 case Type::MemberPointer: 3475 // abi::__pointer_to_member_type_info. 3476 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE"; 3477 break; 3478 } 3479 3480 llvm::Constant *VTable = nullptr; 3481 3482 // Check if the alias exists. If it doesn't, then get or create the global. 3483 if (CGM.getItaniumVTableContext().isRelativeLayout()) 3484 VTable = CGM.getModule().getNamedAlias(VTableName); 3485 if (!VTable) 3486 VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy); 3487 3488 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts())); 3489 3490 llvm::Type *PtrDiffTy = 3491 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()); 3492 3493 // The vtable address point is 2. 3494 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 3495 // The vtable address point is 8 bytes after its start: 3496 // 4 for the offset to top + 4 for the relative offset to rtti. 3497 llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8); 3498 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy); 3499 VTable = 3500 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight); 3501 } else { 3502 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2); 3503 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, 3504 Two); 3505 } 3506 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy); 3507 3508 Fields.push_back(VTable); 3509 } 3510 3511 /// Return the linkage that the type info and type info name constants 3512 /// should have for the given type. 3513 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM, 3514 QualType Ty) { 3515 // Itanium C++ ABI 2.9.5p7: 3516 // In addition, it and all of the intermediate abi::__pointer_type_info 3517 // structs in the chain down to the abi::__class_type_info for the 3518 // incomplete class type must be prevented from resolving to the 3519 // corresponding type_info structs for the complete class type, possibly 3520 // by making them local static objects. Finally, a dummy class RTTI is 3521 // generated for the incomplete type that will not resolve to the final 3522 // complete class RTTI (because the latter need not exist), possibly by 3523 // making it a local static object. 3524 if (ContainsIncompleteClassType(Ty)) 3525 return llvm::GlobalValue::InternalLinkage; 3526 3527 switch (Ty->getLinkage()) { 3528 case NoLinkage: 3529 case InternalLinkage: 3530 case UniqueExternalLinkage: 3531 return llvm::GlobalValue::InternalLinkage; 3532 3533 case VisibleNoLinkage: 3534 case ModuleInternalLinkage: 3535 case ModuleLinkage: 3536 case ExternalLinkage: 3537 // RTTI is not enabled, which means that this type info struct is going 3538 // to be used for exception handling. Give it linkonce_odr linkage. 3539 if (!CGM.getLangOpts().RTTI) 3540 return llvm::GlobalValue::LinkOnceODRLinkage; 3541 3542 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) { 3543 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl()); 3544 if (RD->hasAttr<WeakAttr>()) 3545 return llvm::GlobalValue::WeakODRLinkage; 3546 if (CGM.getTriple().isWindowsItaniumEnvironment()) 3547 if (RD->hasAttr<DLLImportAttr>() && 3548 ShouldUseExternalRTTIDescriptor(CGM, Ty)) 3549 return llvm::GlobalValue::ExternalLinkage; 3550 // MinGW always uses LinkOnceODRLinkage for type info. 3551 if (RD->isDynamicClass() && 3552 !CGM.getContext() 3553 .getTargetInfo() 3554 .getTriple() 3555 .isWindowsGNUEnvironment()) 3556 return CGM.getVTableLinkage(RD); 3557 } 3558 3559 return llvm::GlobalValue::LinkOnceODRLinkage; 3560 } 3561 3562 llvm_unreachable("Invalid linkage!"); 3563 } 3564 3565 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) { 3566 // We want to operate on the canonical type. 3567 Ty = Ty.getCanonicalType(); 3568 3569 // Check if we've already emitted an RTTI descriptor for this type. 3570 SmallString<256> Name; 3571 llvm::raw_svector_ostream Out(Name); 3572 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 3573 3574 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name); 3575 if (OldGV && !OldGV->isDeclaration()) { 3576 assert(!OldGV->hasAvailableExternallyLinkage() && 3577 "available_externally typeinfos not yet implemented"); 3578 3579 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy); 3580 } 3581 3582 // Check if there is already an external RTTI descriptor for this type. 3583 if (IsStandardLibraryRTTIDescriptor(Ty) || 3584 ShouldUseExternalRTTIDescriptor(CGM, Ty)) 3585 return GetAddrOfExternalRTTIDescriptor(Ty); 3586 3587 // Emit the standard library with external linkage. 3588 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty); 3589 3590 // Give the type_info object and name the formal visibility of the 3591 // type itself. 3592 llvm::GlobalValue::VisibilityTypes llvmVisibility; 3593 if (llvm::GlobalValue::isLocalLinkage(Linkage)) 3594 // If the linkage is local, only default visibility makes sense. 3595 llvmVisibility = llvm::GlobalValue::DefaultVisibility; 3596 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) == 3597 ItaniumCXXABI::RUK_NonUniqueHidden) 3598 llvmVisibility = llvm::GlobalValue::HiddenVisibility; 3599 else 3600 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility()); 3601 3602 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass = 3603 llvm::GlobalValue::DefaultStorageClass; 3604 if (CGM.getTriple().isWindowsItaniumEnvironment()) { 3605 auto RD = Ty->getAsCXXRecordDecl(); 3606 if (RD && RD->hasAttr<DLLExportAttr>()) 3607 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass; 3608 } 3609 3610 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass); 3611 } 3612 3613 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo( 3614 QualType Ty, 3615 llvm::GlobalVariable::LinkageTypes Linkage, 3616 llvm::GlobalValue::VisibilityTypes Visibility, 3617 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) { 3618 // Add the vtable pointer. 3619 BuildVTablePointer(cast<Type>(Ty)); 3620 3621 // And the name. 3622 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage); 3623 llvm::Constant *TypeNameField; 3624 3625 // If we're supposed to demote the visibility, be sure to set a flag 3626 // to use a string comparison for type_info comparisons. 3627 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness = 3628 CXXABI.classifyRTTIUniqueness(Ty, Linkage); 3629 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) { 3630 // The flag is the sign bit, which on ARM64 is defined to be clear 3631 // for global pointers. This is very ARM64-specific. 3632 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty); 3633 llvm::Constant *flag = 3634 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63); 3635 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag); 3636 TypeNameField = 3637 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy); 3638 } else { 3639 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy); 3640 } 3641 Fields.push_back(TypeNameField); 3642 3643 switch (Ty->getTypeClass()) { 3644 #define TYPE(Class, Base) 3645 #define ABSTRACT_TYPE(Class, Base) 3646 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 3647 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3648 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 3649 #include "clang/AST/TypeNodes.inc" 3650 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 3651 3652 // GCC treats vector types as fundamental types. 3653 case Type::Builtin: 3654 case Type::Vector: 3655 case Type::ExtVector: 3656 case Type::ConstantMatrix: 3657 case Type::Complex: 3658 case Type::BlockPointer: 3659 // Itanium C++ ABI 2.9.5p4: 3660 // abi::__fundamental_type_info adds no data members to std::type_info. 3661 break; 3662 3663 case Type::LValueReference: 3664 case Type::RValueReference: 3665 llvm_unreachable("References shouldn't get here"); 3666 3667 case Type::Auto: 3668 case Type::DeducedTemplateSpecialization: 3669 llvm_unreachable("Undeduced type shouldn't get here"); 3670 3671 case Type::Pipe: 3672 break; 3673 3674 case Type::ExtInt: 3675 break; 3676 3677 case Type::ConstantArray: 3678 case Type::IncompleteArray: 3679 case Type::VariableArray: 3680 // Itanium C++ ABI 2.9.5p5: 3681 // abi::__array_type_info adds no data members to std::type_info. 3682 break; 3683 3684 case Type::FunctionNoProto: 3685 case Type::FunctionProto: 3686 // Itanium C++ ABI 2.9.5p5: 3687 // abi::__function_type_info adds no data members to std::type_info. 3688 break; 3689 3690 case Type::Enum: 3691 // Itanium C++ ABI 2.9.5p5: 3692 // abi::__enum_type_info adds no data members to std::type_info. 3693 break; 3694 3695 case Type::Record: { 3696 const CXXRecordDecl *RD = 3697 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl()); 3698 if (!RD->hasDefinition() || !RD->getNumBases()) { 3699 // We don't need to emit any fields. 3700 break; 3701 } 3702 3703 if (CanUseSingleInheritance(RD)) 3704 BuildSIClassTypeInfo(RD); 3705 else 3706 BuildVMIClassTypeInfo(RD); 3707 3708 break; 3709 } 3710 3711 case Type::ObjCObject: 3712 case Type::ObjCInterface: 3713 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty)); 3714 break; 3715 3716 case Type::ObjCObjectPointer: 3717 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); 3718 break; 3719 3720 case Type::Pointer: 3721 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType()); 3722 break; 3723 3724 case Type::MemberPointer: 3725 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty)); 3726 break; 3727 3728 case Type::Atomic: 3729 // No fields, at least for the moment. 3730 break; 3731 } 3732 3733 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields); 3734 3735 SmallString<256> Name; 3736 llvm::raw_svector_ostream Out(Name); 3737 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 3738 llvm::Module &M = CGM.getModule(); 3739 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name); 3740 llvm::GlobalVariable *GV = 3741 new llvm::GlobalVariable(M, Init->getType(), 3742 /*isConstant=*/true, Linkage, Init, Name); 3743 3744 // If there's already an old global variable, replace it with the new one. 3745 if (OldGV) { 3746 GV->takeName(OldGV); 3747 llvm::Constant *NewPtr = 3748 llvm::ConstantExpr::getBitCast(GV, OldGV->getType()); 3749 OldGV->replaceAllUsesWith(NewPtr); 3750 OldGV->eraseFromParent(); 3751 } 3752 3753 if (CGM.supportsCOMDAT() && GV->isWeakForLinker()) 3754 GV->setComdat(M.getOrInsertComdat(GV->getName())); 3755 3756 CharUnits Align = 3757 CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0)); 3758 GV->setAlignment(Align.getAsAlign()); 3759 3760 // The Itanium ABI specifies that type_info objects must be globally 3761 // unique, with one exception: if the type is an incomplete class 3762 // type or a (possibly indirect) pointer to one. That exception 3763 // affects the general case of comparing type_info objects produced 3764 // by the typeid operator, which is why the comparison operators on 3765 // std::type_info generally use the type_info name pointers instead 3766 // of the object addresses. However, the language's built-in uses 3767 // of RTTI generally require class types to be complete, even when 3768 // manipulating pointers to those class types. This allows the 3769 // implementation of dynamic_cast to rely on address equality tests, 3770 // which is much faster. 3771 3772 // All of this is to say that it's important that both the type_info 3773 // object and the type_info name be uniqued when weakly emitted. 3774 3775 TypeName->setVisibility(Visibility); 3776 CGM.setDSOLocal(TypeName); 3777 3778 GV->setVisibility(Visibility); 3779 CGM.setDSOLocal(GV); 3780 3781 TypeName->setDLLStorageClass(DLLStorageClass); 3782 GV->setDLLStorageClass(DLLStorageClass); 3783 3784 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition); 3785 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition); 3786 3787 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy); 3788 } 3789 3790 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info 3791 /// for the given Objective-C object type. 3792 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) { 3793 // Drop qualifiers. 3794 const Type *T = OT->getBaseType().getTypePtr(); 3795 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T)); 3796 3797 // The builtin types are abi::__class_type_infos and don't require 3798 // extra fields. 3799 if (isa<BuiltinType>(T)) return; 3800 3801 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl(); 3802 ObjCInterfaceDecl *Super = Class->getSuperClass(); 3803 3804 // Root classes are also __class_type_info. 3805 if (!Super) return; 3806 3807 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super); 3808 3809 // Everything else is single inheritance. 3810 llvm::Constant *BaseTypeInfo = 3811 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy); 3812 Fields.push_back(BaseTypeInfo); 3813 } 3814 3815 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single 3816 /// inheritance, according to the Itanium C++ ABI, 2.95p6b. 3817 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) { 3818 // Itanium C++ ABI 2.9.5p6b: 3819 // It adds to abi::__class_type_info a single member pointing to the 3820 // type_info structure for the base type, 3821 llvm::Constant *BaseTypeInfo = 3822 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType()); 3823 Fields.push_back(BaseTypeInfo); 3824 } 3825 3826 namespace { 3827 /// SeenBases - Contains virtual and non-virtual bases seen when traversing 3828 /// a class hierarchy. 3829 struct SeenBases { 3830 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases; 3831 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases; 3832 }; 3833 } 3834 3835 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in 3836 /// abi::__vmi_class_type_info. 3837 /// 3838 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, 3839 SeenBases &Bases) { 3840 3841 unsigned Flags = 0; 3842 3843 auto *BaseDecl = 3844 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl()); 3845 3846 if (Base->isVirtual()) { 3847 // Mark the virtual base as seen. 3848 if (!Bases.VirtualBases.insert(BaseDecl).second) { 3849 // If this virtual base has been seen before, then the class is diamond 3850 // shaped. 3851 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped; 3852 } else { 3853 if (Bases.NonVirtualBases.count(BaseDecl)) 3854 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 3855 } 3856 } else { 3857 // Mark the non-virtual base as seen. 3858 if (!Bases.NonVirtualBases.insert(BaseDecl).second) { 3859 // If this non-virtual base has been seen before, then the class has non- 3860 // diamond shaped repeated inheritance. 3861 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 3862 } else { 3863 if (Bases.VirtualBases.count(BaseDecl)) 3864 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 3865 } 3866 } 3867 3868 // Walk all bases. 3869 for (const auto &I : BaseDecl->bases()) 3870 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); 3871 3872 return Flags; 3873 } 3874 3875 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) { 3876 unsigned Flags = 0; 3877 SeenBases Bases; 3878 3879 // Walk all bases. 3880 for (const auto &I : RD->bases()) 3881 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); 3882 3883 return Flags; 3884 } 3885 3886 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for 3887 /// classes with bases that do not satisfy the abi::__si_class_type_info 3888 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. 3889 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) { 3890 llvm::Type *UnsignedIntLTy = 3891 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 3892 3893 // Itanium C++ ABI 2.9.5p6c: 3894 // __flags is a word with flags describing details about the class 3895 // structure, which may be referenced by using the __flags_masks 3896 // enumeration. These flags refer to both direct and indirect bases. 3897 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD); 3898 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 3899 3900 // Itanium C++ ABI 2.9.5p6c: 3901 // __base_count is a word with the number of direct proper base class 3902 // descriptions that follow. 3903 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases())); 3904 3905 if (!RD->getNumBases()) 3906 return; 3907 3908 // Now add the base class descriptions. 3909 3910 // Itanium C++ ABI 2.9.5p6c: 3911 // __base_info[] is an array of base class descriptions -- one for every 3912 // direct proper base. Each description is of the type: 3913 // 3914 // struct abi::__base_class_type_info { 3915 // public: 3916 // const __class_type_info *__base_type; 3917 // long __offset_flags; 3918 // 3919 // enum __offset_flags_masks { 3920 // __virtual_mask = 0x1, 3921 // __public_mask = 0x2, 3922 // __offset_shift = 8 3923 // }; 3924 // }; 3925 3926 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long 3927 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on 3928 // LLP64 platforms. 3929 // FIXME: Consider updating libc++abi to match, and extend this logic to all 3930 // LLP64 platforms. 3931 QualType OffsetFlagsTy = CGM.getContext().LongTy; 3932 const TargetInfo &TI = CGM.getContext().getTargetInfo(); 3933 if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth()) 3934 OffsetFlagsTy = CGM.getContext().LongLongTy; 3935 llvm::Type *OffsetFlagsLTy = 3936 CGM.getTypes().ConvertType(OffsetFlagsTy); 3937 3938 for (const auto &Base : RD->bases()) { 3939 // The __base_type member points to the RTTI for the base type. 3940 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType())); 3941 3942 auto *BaseDecl = 3943 cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl()); 3944 3945 int64_t OffsetFlags = 0; 3946 3947 // All but the lower 8 bits of __offset_flags are a signed offset. 3948 // For a non-virtual base, this is the offset in the object of the base 3949 // subobject. For a virtual base, this is the offset in the virtual table of 3950 // the virtual base offset for the virtual base referenced (negative). 3951 CharUnits Offset; 3952 if (Base.isVirtual()) 3953 Offset = 3954 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl); 3955 else { 3956 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); 3957 Offset = Layout.getBaseClassOffset(BaseDecl); 3958 }; 3959 3960 OffsetFlags = uint64_t(Offset.getQuantity()) << 8; 3961 3962 // The low-order byte of __offset_flags contains flags, as given by the 3963 // masks from the enumeration __offset_flags_masks. 3964 if (Base.isVirtual()) 3965 OffsetFlags |= BCTI_Virtual; 3966 if (Base.getAccessSpecifier() == AS_public) 3967 OffsetFlags |= BCTI_Public; 3968 3969 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags)); 3970 } 3971 } 3972 3973 /// Compute the flags for a __pbase_type_info, and remove the corresponding 3974 /// pieces from \p Type. 3975 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) { 3976 unsigned Flags = 0; 3977 3978 if (Type.isConstQualified()) 3979 Flags |= ItaniumRTTIBuilder::PTI_Const; 3980 if (Type.isVolatileQualified()) 3981 Flags |= ItaniumRTTIBuilder::PTI_Volatile; 3982 if (Type.isRestrictQualified()) 3983 Flags |= ItaniumRTTIBuilder::PTI_Restrict; 3984 Type = Type.getUnqualifiedType(); 3985 3986 // Itanium C++ ABI 2.9.5p7: 3987 // When the abi::__pbase_type_info is for a direct or indirect pointer to an 3988 // incomplete class type, the incomplete target type flag is set. 3989 if (ContainsIncompleteClassType(Type)) 3990 Flags |= ItaniumRTTIBuilder::PTI_Incomplete; 3991 3992 if (auto *Proto = Type->getAs<FunctionProtoType>()) { 3993 if (Proto->isNothrow()) { 3994 Flags |= ItaniumRTTIBuilder::PTI_Noexcept; 3995 Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None); 3996 } 3997 } 3998 3999 return Flags; 4000 } 4001 4002 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, 4003 /// used for pointer types. 4004 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) { 4005 // Itanium C++ ABI 2.9.5p7: 4006 // __flags is a flag word describing the cv-qualification and other 4007 // attributes of the type pointed to 4008 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy); 4009 4010 llvm::Type *UnsignedIntLTy = 4011 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 4012 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 4013 4014 // Itanium C++ ABI 2.9.5p7: 4015 // __pointee is a pointer to the std::type_info derivation for the 4016 // unqualified type being pointed to. 4017 llvm::Constant *PointeeTypeInfo = 4018 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy); 4019 Fields.push_back(PointeeTypeInfo); 4020 } 4021 4022 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info 4023 /// struct, used for member pointer types. 4024 void 4025 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) { 4026 QualType PointeeTy = Ty->getPointeeType(); 4027 4028 // Itanium C++ ABI 2.9.5p7: 4029 // __flags is a flag word describing the cv-qualification and other 4030 // attributes of the type pointed to. 4031 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy); 4032 4033 const RecordType *ClassType = cast<RecordType>(Ty->getClass()); 4034 if (IsIncompleteClassType(ClassType)) 4035 Flags |= PTI_ContainingClassIncomplete; 4036 4037 llvm::Type *UnsignedIntLTy = 4038 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 4039 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 4040 4041 // Itanium C++ ABI 2.9.5p7: 4042 // __pointee is a pointer to the std::type_info derivation for the 4043 // unqualified type being pointed to. 4044 llvm::Constant *PointeeTypeInfo = 4045 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy); 4046 Fields.push_back(PointeeTypeInfo); 4047 4048 // Itanium C++ ABI 2.9.5p9: 4049 // __context is a pointer to an abi::__class_type_info corresponding to the 4050 // class type containing the member pointed to 4051 // (e.g., the "A" in "int A::*"). 4052 Fields.push_back( 4053 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0))); 4054 } 4055 4056 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) { 4057 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty); 4058 } 4059 4060 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) { 4061 // Types added here must also be added to TypeInfoIsInStandardLibrary. 4062 QualType FundamentalTypes[] = { 4063 getContext().VoidTy, getContext().NullPtrTy, 4064 getContext().BoolTy, getContext().WCharTy, 4065 getContext().CharTy, getContext().UnsignedCharTy, 4066 getContext().SignedCharTy, getContext().ShortTy, 4067 getContext().UnsignedShortTy, getContext().IntTy, 4068 getContext().UnsignedIntTy, getContext().LongTy, 4069 getContext().UnsignedLongTy, getContext().LongLongTy, 4070 getContext().UnsignedLongLongTy, getContext().Int128Ty, 4071 getContext().UnsignedInt128Ty, getContext().HalfTy, 4072 getContext().FloatTy, getContext().DoubleTy, 4073 getContext().LongDoubleTy, getContext().Float128Ty, 4074 getContext().Char8Ty, getContext().Char16Ty, 4075 getContext().Char32Ty 4076 }; 4077 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass = 4078 RD->hasAttr<DLLExportAttr>() 4079 ? llvm::GlobalValue::DLLExportStorageClass 4080 : llvm::GlobalValue::DefaultStorageClass; 4081 llvm::GlobalValue::VisibilityTypes Visibility = 4082 CodeGenModule::GetLLVMVisibility(RD->getVisibility()); 4083 for (const QualType &FundamentalType : FundamentalTypes) { 4084 QualType PointerType = getContext().getPointerType(FundamentalType); 4085 QualType PointerTypeConst = getContext().getPointerType( 4086 FundamentalType.withConst()); 4087 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst}) 4088 ItaniumRTTIBuilder(*this).BuildTypeInfo( 4089 Type, llvm::GlobalValue::ExternalLinkage, 4090 Visibility, DLLStorageClass); 4091 } 4092 } 4093 4094 /// What sort of uniqueness rules should we use for the RTTI for the 4095 /// given type? 4096 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness( 4097 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const { 4098 if (shouldRTTIBeUnique()) 4099 return RUK_Unique; 4100 4101 // It's only necessary for linkonce_odr or weak_odr linkage. 4102 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage && 4103 Linkage != llvm::GlobalValue::WeakODRLinkage) 4104 return RUK_Unique; 4105 4106 // It's only necessary with default visibility. 4107 if (CanTy->getVisibility() != DefaultVisibility) 4108 return RUK_Unique; 4109 4110 // If we're not required to publish this symbol, hide it. 4111 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage) 4112 return RUK_NonUniqueHidden; 4113 4114 // If we're required to publish this symbol, as we might be under an 4115 // explicit instantiation, leave it with default visibility but 4116 // enable string-comparisons. 4117 assert(Linkage == llvm::GlobalValue::WeakODRLinkage); 4118 return RUK_NonUniqueVisible; 4119 } 4120 4121 // Find out how to codegen the complete destructor and constructor 4122 namespace { 4123 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT }; 4124 } 4125 static StructorCodegen getCodegenToUse(CodeGenModule &CGM, 4126 const CXXMethodDecl *MD) { 4127 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases) 4128 return StructorCodegen::Emit; 4129 4130 // The complete and base structors are not equivalent if there are any virtual 4131 // bases, so emit separate functions. 4132 if (MD->getParent()->getNumVBases()) 4133 return StructorCodegen::Emit; 4134 4135 GlobalDecl AliasDecl; 4136 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) { 4137 AliasDecl = GlobalDecl(DD, Dtor_Complete); 4138 } else { 4139 const auto *CD = cast<CXXConstructorDecl>(MD); 4140 AliasDecl = GlobalDecl(CD, Ctor_Complete); 4141 } 4142 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl); 4143 4144 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage)) 4145 return StructorCodegen::RAUW; 4146 4147 // FIXME: Should we allow available_externally aliases? 4148 if (!llvm::GlobalAlias::isValidLinkage(Linkage)) 4149 return StructorCodegen::RAUW; 4150 4151 if (llvm::GlobalValue::isWeakForLinker(Linkage)) { 4152 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5). 4153 if (CGM.getTarget().getTriple().isOSBinFormatELF() || 4154 CGM.getTarget().getTriple().isOSBinFormatWasm()) 4155 return StructorCodegen::COMDAT; 4156 return StructorCodegen::Emit; 4157 } 4158 4159 return StructorCodegen::Alias; 4160 } 4161 4162 static void emitConstructorDestructorAlias(CodeGenModule &CGM, 4163 GlobalDecl AliasDecl, 4164 GlobalDecl TargetDecl) { 4165 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl); 4166 4167 StringRef MangledName = CGM.getMangledName(AliasDecl); 4168 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName); 4169 if (Entry && !Entry->isDeclaration()) 4170 return; 4171 4172 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl)); 4173 4174 // Create the alias with no name. 4175 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee); 4176 4177 // Constructors and destructors are always unnamed_addr. 4178 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 4179 4180 // Switch any previous uses to the alias. 4181 if (Entry) { 4182 assert(Entry->getType() == Aliasee->getType() && 4183 "declaration exists with different type"); 4184 Alias->takeName(Entry); 4185 Entry->replaceAllUsesWith(Alias); 4186 Entry->eraseFromParent(); 4187 } else { 4188 Alias->setName(MangledName); 4189 } 4190 4191 // Finally, set up the alias with its proper name and attributes. 4192 CGM.SetCommonAttributes(AliasDecl, Alias); 4193 } 4194 4195 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) { 4196 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 4197 auto *CD = dyn_cast<CXXConstructorDecl>(MD); 4198 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD); 4199 4200 StructorCodegen CGType = getCodegenToUse(CGM, MD); 4201 4202 if (CD ? GD.getCtorType() == Ctor_Complete 4203 : GD.getDtorType() == Dtor_Complete) { 4204 GlobalDecl BaseDecl; 4205 if (CD) 4206 BaseDecl = GD.getWithCtorType(Ctor_Base); 4207 else 4208 BaseDecl = GD.getWithDtorType(Dtor_Base); 4209 4210 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) { 4211 emitConstructorDestructorAlias(CGM, GD, BaseDecl); 4212 return; 4213 } 4214 4215 if (CGType == StructorCodegen::RAUW) { 4216 StringRef MangledName = CGM.getMangledName(GD); 4217 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl); 4218 CGM.addReplacement(MangledName, Aliasee); 4219 return; 4220 } 4221 } 4222 4223 // The base destructor is equivalent to the base destructor of its 4224 // base class if there is exactly one non-virtual base class with a 4225 // non-trivial destructor, there are no fields with a non-trivial 4226 // destructor, and the body of the destructor is trivial. 4227 if (DD && GD.getDtorType() == Dtor_Base && 4228 CGType != StructorCodegen::COMDAT && 4229 !CGM.TryEmitBaseDestructorAsAlias(DD)) 4230 return; 4231 4232 // FIXME: The deleting destructor is equivalent to the selected operator 4233 // delete if: 4234 // * either the delete is a destroying operator delete or the destructor 4235 // would be trivial if it weren't virtual, 4236 // * the conversion from the 'this' parameter to the first parameter of the 4237 // destructor is equivalent to a bitcast, 4238 // * the destructor does not have an implicit "this" return, and 4239 // * the operator delete has the same calling convention and IR function type 4240 // as the destructor. 4241 // In such cases we should try to emit the deleting dtor as an alias to the 4242 // selected 'operator delete'. 4243 4244 llvm::Function *Fn = CGM.codegenCXXStructor(GD); 4245 4246 if (CGType == StructorCodegen::COMDAT) { 4247 SmallString<256> Buffer; 4248 llvm::raw_svector_ostream Out(Buffer); 4249 if (DD) 4250 getMangleContext().mangleCXXDtorComdat(DD, Out); 4251 else 4252 getMangleContext().mangleCXXCtorComdat(CD, Out); 4253 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str()); 4254 Fn->setComdat(C); 4255 } else { 4256 CGM.maybeSetTrivialComdat(*MD, *Fn); 4257 } 4258 } 4259 4260 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) { 4261 // void *__cxa_begin_catch(void*); 4262 llvm::FunctionType *FTy = llvm::FunctionType::get( 4263 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false); 4264 4265 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch"); 4266 } 4267 4268 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) { 4269 // void __cxa_end_catch(); 4270 llvm::FunctionType *FTy = 4271 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 4272 4273 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch"); 4274 } 4275 4276 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) { 4277 // void *__cxa_get_exception_ptr(void*); 4278 llvm::FunctionType *FTy = llvm::FunctionType::get( 4279 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false); 4280 4281 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr"); 4282 } 4283 4284 namespace { 4285 /// A cleanup to call __cxa_end_catch. In many cases, the caught 4286 /// exception type lets us state definitively that the thrown exception 4287 /// type does not have a destructor. In particular: 4288 /// - Catch-alls tell us nothing, so we have to conservatively 4289 /// assume that the thrown exception might have a destructor. 4290 /// - Catches by reference behave according to their base types. 4291 /// - Catches of non-record types will only trigger for exceptions 4292 /// of non-record types, which never have destructors. 4293 /// - Catches of record types can trigger for arbitrary subclasses 4294 /// of the caught type, so we have to assume the actual thrown 4295 /// exception type might have a throwing destructor, even if the 4296 /// caught type's destructor is trivial or nothrow. 4297 struct CallEndCatch final : EHScopeStack::Cleanup { 4298 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {} 4299 bool MightThrow; 4300 4301 void Emit(CodeGenFunction &CGF, Flags flags) override { 4302 if (!MightThrow) { 4303 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM)); 4304 return; 4305 } 4306 4307 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM)); 4308 } 4309 }; 4310 } 4311 4312 /// Emits a call to __cxa_begin_catch and enters a cleanup to call 4313 /// __cxa_end_catch. 4314 /// 4315 /// \param EndMightThrow - true if __cxa_end_catch might throw 4316 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF, 4317 llvm::Value *Exn, 4318 bool EndMightThrow) { 4319 llvm::CallInst *call = 4320 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn); 4321 4322 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow); 4323 4324 return call; 4325 } 4326 4327 /// A "special initializer" callback for initializing a catch 4328 /// parameter during catch initialization. 4329 static void InitCatchParam(CodeGenFunction &CGF, 4330 const VarDecl &CatchParam, 4331 Address ParamAddr, 4332 SourceLocation Loc) { 4333 // Load the exception from where the landing pad saved it. 4334 llvm::Value *Exn = CGF.getExceptionFromSlot(); 4335 4336 CanQualType CatchType = 4337 CGF.CGM.getContext().getCanonicalType(CatchParam.getType()); 4338 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType); 4339 4340 // If we're catching by reference, we can just cast the object 4341 // pointer to the appropriate pointer. 4342 if (isa<ReferenceType>(CatchType)) { 4343 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType(); 4344 bool EndCatchMightThrow = CaughtType->isRecordType(); 4345 4346 // __cxa_begin_catch returns the adjusted object pointer. 4347 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow); 4348 4349 // We have no way to tell the personality function that we're 4350 // catching by reference, so if we're catching a pointer, 4351 // __cxa_begin_catch will actually return that pointer by value. 4352 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) { 4353 QualType PointeeType = PT->getPointeeType(); 4354 4355 // When catching by reference, generally we should just ignore 4356 // this by-value pointer and use the exception object instead. 4357 if (!PointeeType->isRecordType()) { 4358 4359 // Exn points to the struct _Unwind_Exception header, which 4360 // we have to skip past in order to reach the exception data. 4361 unsigned HeaderSize = 4362 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException(); 4363 AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize); 4364 4365 // However, if we're catching a pointer-to-record type that won't 4366 // work, because the personality function might have adjusted 4367 // the pointer. There's actually no way for us to fully satisfy 4368 // the language/ABI contract here: we can't use Exn because it 4369 // might have the wrong adjustment, but we can't use the by-value 4370 // pointer because it's off by a level of abstraction. 4371 // 4372 // The current solution is to dump the adjusted pointer into an 4373 // alloca, which breaks language semantics (because changing the 4374 // pointer doesn't change the exception) but at least works. 4375 // The better solution would be to filter out non-exact matches 4376 // and rethrow them, but this is tricky because the rethrow 4377 // really needs to be catchable by other sites at this landing 4378 // pad. The best solution is to fix the personality function. 4379 } else { 4380 // Pull the pointer for the reference type off. 4381 llvm::Type *PtrTy = 4382 cast<llvm::PointerType>(LLVMCatchTy)->getElementType(); 4383 4384 // Create the temporary and write the adjusted pointer into it. 4385 Address ExnPtrTmp = 4386 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp"); 4387 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); 4388 CGF.Builder.CreateStore(Casted, ExnPtrTmp); 4389 4390 // Bind the reference to the temporary. 4391 AdjustedExn = ExnPtrTmp.getPointer(); 4392 } 4393 } 4394 4395 llvm::Value *ExnCast = 4396 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref"); 4397 CGF.Builder.CreateStore(ExnCast, ParamAddr); 4398 return; 4399 } 4400 4401 // Scalars and complexes. 4402 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType); 4403 if (TEK != TEK_Aggregate) { 4404 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false); 4405 4406 // If the catch type is a pointer type, __cxa_begin_catch returns 4407 // the pointer by value. 4408 if (CatchType->hasPointerRepresentation()) { 4409 llvm::Value *CastExn = 4410 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted"); 4411 4412 switch (CatchType.getQualifiers().getObjCLifetime()) { 4413 case Qualifiers::OCL_Strong: 4414 CastExn = CGF.EmitARCRetainNonBlock(CastExn); 4415 LLVM_FALLTHROUGH; 4416 4417 case Qualifiers::OCL_None: 4418 case Qualifiers::OCL_ExplicitNone: 4419 case Qualifiers::OCL_Autoreleasing: 4420 CGF.Builder.CreateStore(CastExn, ParamAddr); 4421 return; 4422 4423 case Qualifiers::OCL_Weak: 4424 CGF.EmitARCInitWeak(ParamAddr, CastExn); 4425 return; 4426 } 4427 llvm_unreachable("bad ownership qualifier!"); 4428 } 4429 4430 // Otherwise, it returns a pointer into the exception object. 4431 4432 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok 4433 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); 4434 4435 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType); 4436 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType); 4437 switch (TEK) { 4438 case TEK_Complex: 4439 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV, 4440 /*init*/ true); 4441 return; 4442 case TEK_Scalar: { 4443 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc); 4444 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true); 4445 return; 4446 } 4447 case TEK_Aggregate: 4448 llvm_unreachable("evaluation kind filtered out!"); 4449 } 4450 llvm_unreachable("bad evaluation kind"); 4451 } 4452 4453 assert(isa<RecordType>(CatchType) && "unexpected catch type!"); 4454 auto catchRD = CatchType->getAsCXXRecordDecl(); 4455 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD); 4456 4457 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok 4458 4459 // Check for a copy expression. If we don't have a copy expression, 4460 // that means a trivial copy is okay. 4461 const Expr *copyExpr = CatchParam.getInit(); 4462 if (!copyExpr) { 4463 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true); 4464 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy), 4465 caughtExnAlignment); 4466 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType); 4467 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType); 4468 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap); 4469 return; 4470 } 4471 4472 // We have to call __cxa_get_exception_ptr to get the adjusted 4473 // pointer before copying. 4474 llvm::CallInst *rawAdjustedExn = 4475 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn); 4476 4477 // Cast that to the appropriate type. 4478 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy), 4479 caughtExnAlignment); 4480 4481 // The copy expression is defined in terms of an OpaqueValueExpr. 4482 // Find it and map it to the adjusted expression. 4483 CodeGenFunction::OpaqueValueMapping 4484 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr), 4485 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType())); 4486 4487 // Call the copy ctor in a terminate scope. 4488 CGF.EHStack.pushTerminate(); 4489 4490 // Perform the copy construction. 4491 CGF.EmitAggExpr(copyExpr, 4492 AggValueSlot::forAddr(ParamAddr, Qualifiers(), 4493 AggValueSlot::IsNotDestructed, 4494 AggValueSlot::DoesNotNeedGCBarriers, 4495 AggValueSlot::IsNotAliased, 4496 AggValueSlot::DoesNotOverlap)); 4497 4498 // Leave the terminate scope. 4499 CGF.EHStack.popTerminate(); 4500 4501 // Undo the opaque value mapping. 4502 opaque.pop(); 4503 4504 // Finally we can call __cxa_begin_catch. 4505 CallBeginCatch(CGF, Exn, true); 4506 } 4507 4508 /// Begins a catch statement by initializing the catch variable and 4509 /// calling __cxa_begin_catch. 4510 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF, 4511 const CXXCatchStmt *S) { 4512 // We have to be very careful with the ordering of cleanups here: 4513 // C++ [except.throw]p4: 4514 // The destruction [of the exception temporary] occurs 4515 // immediately after the destruction of the object declared in 4516 // the exception-declaration in the handler. 4517 // 4518 // So the precise ordering is: 4519 // 1. Construct catch variable. 4520 // 2. __cxa_begin_catch 4521 // 3. Enter __cxa_end_catch cleanup 4522 // 4. Enter dtor cleanup 4523 // 4524 // We do this by using a slightly abnormal initialization process. 4525 // Delegation sequence: 4526 // - ExitCXXTryStmt opens a RunCleanupsScope 4527 // - EmitAutoVarAlloca creates the variable and debug info 4528 // - InitCatchParam initializes the variable from the exception 4529 // - CallBeginCatch calls __cxa_begin_catch 4530 // - CallBeginCatch enters the __cxa_end_catch cleanup 4531 // - EmitAutoVarCleanups enters the variable destructor cleanup 4532 // - EmitCXXTryStmt emits the code for the catch body 4533 // - EmitCXXTryStmt close the RunCleanupsScope 4534 4535 VarDecl *CatchParam = S->getExceptionDecl(); 4536 if (!CatchParam) { 4537 llvm::Value *Exn = CGF.getExceptionFromSlot(); 4538 CallBeginCatch(CGF, Exn, true); 4539 return; 4540 } 4541 4542 // Emit the local. 4543 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam); 4544 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc()); 4545 CGF.EmitAutoVarCleanups(var); 4546 } 4547 4548 /// Get or define the following function: 4549 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn 4550 /// This code is used only in C++. 4551 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) { 4552 llvm::FunctionType *fnTy = 4553 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false); 4554 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction( 4555 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true); 4556 llvm::Function *fn = 4557 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts()); 4558 if (fn->empty()) { 4559 fn->setDoesNotThrow(); 4560 fn->setDoesNotReturn(); 4561 4562 // What we really want is to massively penalize inlining without 4563 // forbidding it completely. The difference between that and 4564 // 'noinline' is negligible. 4565 fn->addFnAttr(llvm::Attribute::NoInline); 4566 4567 // Allow this function to be shared across translation units, but 4568 // we don't want it to turn into an exported symbol. 4569 fn->setLinkage(llvm::Function::LinkOnceODRLinkage); 4570 fn->setVisibility(llvm::Function::HiddenVisibility); 4571 if (CGM.supportsCOMDAT()) 4572 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName())); 4573 4574 // Set up the function. 4575 llvm::BasicBlock *entry = 4576 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn); 4577 CGBuilderTy builder(CGM, entry); 4578 4579 // Pull the exception pointer out of the parameter list. 4580 llvm::Value *exn = &*fn->arg_begin(); 4581 4582 // Call __cxa_begin_catch(exn). 4583 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn); 4584 catchCall->setDoesNotThrow(); 4585 catchCall->setCallingConv(CGM.getRuntimeCC()); 4586 4587 // Call std::terminate(). 4588 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn()); 4589 termCall->setDoesNotThrow(); 4590 termCall->setDoesNotReturn(); 4591 termCall->setCallingConv(CGM.getRuntimeCC()); 4592 4593 // std::terminate cannot return. 4594 builder.CreateUnreachable(); 4595 } 4596 return fnRef; 4597 } 4598 4599 llvm::CallInst * 4600 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF, 4601 llvm::Value *Exn) { 4602 // In C++, we want to call __cxa_begin_catch() before terminating. 4603 if (Exn) { 4604 assert(CGF.CGM.getLangOpts().CPlusPlus); 4605 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn); 4606 } 4607 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn()); 4608 } 4609 4610 std::pair<llvm::Value *, const CXXRecordDecl *> 4611 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This, 4612 const CXXRecordDecl *RD) { 4613 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD}; 4614 } 4615 4616 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF, 4617 const CXXCatchStmt *C) { 4618 if (CGF.getTarget().hasFeature("exception-handling")) 4619 CGF.EHStack.pushCleanup<CatchRetScope>( 4620 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad)); 4621 ItaniumCXXABI::emitBeginCatch(CGF, C); 4622 } 4623 4624 /// Register a global destructor as best as we know how. 4625 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 4626 llvm::FunctionCallee dtor, 4627 llvm::Constant *addr) { 4628 if (D.getTLSKind() != VarDecl::TLS_None) 4629 llvm::report_fatal_error("thread local storage not yet implemented on AIX"); 4630 4631 // Create __dtor function for the var decl. 4632 llvm::Function *dtorStub = CGF.createAtExitStub(D, dtor, addr); 4633 4634 // Register above __dtor with atexit(). 4635 CGF.registerGlobalDtorWithAtExit(dtorStub); 4636 4637 // Emit __finalize function to unregister __dtor and (as appropriate) call 4638 // __dtor. 4639 emitCXXStermFinalizer(D, dtorStub, addr); 4640 } 4641 4642 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub, 4643 llvm::Constant *addr) { 4644 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false); 4645 SmallString<256> FnName; 4646 { 4647 llvm::raw_svector_ostream Out(FnName); 4648 getMangleContext().mangleDynamicStermFinalizer(&D, Out); 4649 } 4650 4651 // Create the finalization action associated with a variable. 4652 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 4653 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction( 4654 FTy, FnName.str(), FI, D.getLocation()); 4655 4656 CodeGenFunction CGF(CGM); 4657 4658 CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI, 4659 FunctionArgList(), D.getLocation(), 4660 D.getInit()->getExprLoc()); 4661 4662 // The unatexit subroutine unregisters __dtor functions that were previously 4663 // registered by the atexit subroutine. If the referenced function is found, 4664 // the unatexit returns a value of 0, meaning that the cleanup is still 4665 // pending (and we should call the __dtor function). 4666 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub); 4667 4668 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct"); 4669 4670 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call"); 4671 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end"); 4672 4673 // Check if unatexit returns a value of 0. If it does, jump to 4674 // DestructCallBlock, otherwise jump to EndBlock directly. 4675 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock); 4676 4677 CGF.EmitBlock(DestructCallBlock); 4678 4679 // Emit the call to dtorStub. 4680 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub); 4681 4682 // Make sure the call and the callee agree on calling convention. 4683 CI->setCallingConv(dtorStub->getCallingConv()); 4684 4685 CGF.EmitBlock(EndBlock); 4686 4687 CGF.FinishFunction(); 4688 4689 assert(!D.getAttr<InitPriorityAttr>() && 4690 "Prioritized sinit and sterm functions are not yet supported."); 4691 4692 if (isTemplateInstantiation(D.getTemplateSpecializationKind()) || 4693 getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) 4694 // According to C++ [basic.start.init]p2, class template static data 4695 // members (i.e., implicitly or explicitly instantiated specializations) 4696 // have unordered initialization. As a consequence, we can put them into 4697 // their own llvm.global_dtors entry. 4698 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535); 4699 else 4700 CGM.AddCXXStermFinalizerEntry(StermFinalizer); 4701 } 4702