1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This provides C++ code generation targeting the Itanium C++ ABI. The class 10 // in this file generates structures that follow the Itanium C++ ABI, which is 11 // documented at: 12 // https://itanium-cxx-abi.github.io/cxx-abi/abi.html 13 // https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html 14 // 15 // It also supports the closely-related ARM ABI, documented at: 16 // https://developer.arm.com/documentation/ihi0041/g/ 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "CGCXXABI.h" 21 #include "CGCleanup.h" 22 #include "CGRecordLayout.h" 23 #include "CGVTables.h" 24 #include "CodeGenFunction.h" 25 #include "CodeGenModule.h" 26 #include "TargetInfo.h" 27 #include "clang/AST/Attr.h" 28 #include "clang/AST/Mangle.h" 29 #include "clang/AST/StmtCXX.h" 30 #include "clang/AST/Type.h" 31 #include "clang/CodeGen/ConstantInitBuilder.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/GlobalValue.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/Intrinsics.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/Support/ScopedPrinter.h" 38 39 #include <optional> 40 41 using namespace clang; 42 using namespace CodeGen; 43 44 namespace { 45 class ItaniumCXXABI : public CodeGen::CGCXXABI { 46 /// VTables - All the vtables which have been defined. 47 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables; 48 49 /// All the thread wrapper functions that have been used. 50 llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8> 51 ThreadWrappers; 52 53 protected: 54 bool UseARMMethodPtrABI; 55 bool UseARMGuardVarABI; 56 bool Use32BitVTableOffsetABI; 57 58 ItaniumMangleContext &getMangleContext() { 59 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext()); 60 } 61 62 public: 63 ItaniumCXXABI(CodeGen::CodeGenModule &CGM, 64 bool UseARMMethodPtrABI = false, 65 bool UseARMGuardVarABI = false) : 66 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI), 67 UseARMGuardVarABI(UseARMGuardVarABI), 68 Use32BitVTableOffsetABI(false) { } 69 70 bool classifyReturnType(CGFunctionInfo &FI) const override; 71 72 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override { 73 // If C++ prohibits us from making a copy, pass by address. 74 if (!RD->canPassInRegisters()) 75 return RAA_Indirect; 76 return RAA_Default; 77 } 78 79 bool isThisCompleteObject(GlobalDecl GD) const override { 80 // The Itanium ABI has separate complete-object vs. base-object 81 // variants of both constructors and destructors. 82 if (isa<CXXDestructorDecl>(GD.getDecl())) { 83 switch (GD.getDtorType()) { 84 case Dtor_Complete: 85 case Dtor_Deleting: 86 return true; 87 88 case Dtor_Base: 89 return false; 90 91 case Dtor_Comdat: 92 llvm_unreachable("emitting dtor comdat as function?"); 93 } 94 llvm_unreachable("bad dtor kind"); 95 } 96 if (isa<CXXConstructorDecl>(GD.getDecl())) { 97 switch (GD.getCtorType()) { 98 case Ctor_Complete: 99 return true; 100 101 case Ctor_Base: 102 return false; 103 104 case Ctor_CopyingClosure: 105 case Ctor_DefaultClosure: 106 llvm_unreachable("closure ctors in Itanium ABI?"); 107 108 case Ctor_Comdat: 109 llvm_unreachable("emitting ctor comdat as function?"); 110 } 111 llvm_unreachable("bad dtor kind"); 112 } 113 114 // No other kinds. 115 return false; 116 } 117 118 bool isZeroInitializable(const MemberPointerType *MPT) override; 119 120 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override; 121 122 CGCallee 123 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, 124 const Expr *E, 125 Address This, 126 llvm::Value *&ThisPtrForCall, 127 llvm::Value *MemFnPtr, 128 const MemberPointerType *MPT) override; 129 130 llvm::Value * 131 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E, 132 Address Base, 133 llvm::Value *MemPtr, 134 const MemberPointerType *MPT) override; 135 136 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF, 137 const CastExpr *E, 138 llvm::Value *Src) override; 139 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E, 140 llvm::Constant *Src) override; 141 142 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override; 143 144 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override; 145 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT, 146 CharUnits offset) override; 147 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override; 148 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD, 149 CharUnits ThisAdjustment); 150 151 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF, 152 llvm::Value *L, llvm::Value *R, 153 const MemberPointerType *MPT, 154 bool Inequality) override; 155 156 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 157 llvm::Value *Addr, 158 const MemberPointerType *MPT) override; 159 160 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, 161 Address Ptr, QualType ElementType, 162 const CXXDestructorDecl *Dtor) override; 163 164 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override; 165 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override; 166 167 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; 168 169 llvm::CallInst * 170 emitTerminateForUnexpectedException(CodeGenFunction &CGF, 171 llvm::Value *Exn) override; 172 173 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD); 174 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override; 175 CatchTypeInfo 176 getAddrOfCXXCatchHandlerType(QualType Ty, 177 QualType CatchHandlerType) override { 178 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0}; 179 } 180 181 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override; 182 void EmitBadTypeidCall(CodeGenFunction &CGF) override; 183 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy, 184 Address ThisPtr, 185 llvm::Type *StdTypeInfoPtrTy) override; 186 187 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, 188 QualType SrcRecordTy) override; 189 190 /// Determine whether we know that all instances of type RecordTy will have 191 /// the same vtable pointer values, that is distinct from all other vtable 192 /// pointers. While this is required by the Itanium ABI, it doesn't happen in 193 /// practice in some cases due to language extensions. 194 bool hasUniqueVTablePointer(QualType RecordTy) { 195 const CXXRecordDecl *RD = RecordTy->getAsCXXRecordDecl(); 196 197 // Under -fapple-kext, multiple definitions of the same vtable may be 198 // emitted. 199 if (!CGM.getCodeGenOpts().AssumeUniqueVTables || 200 getContext().getLangOpts().AppleKext) 201 return false; 202 203 // If the type_info* would be null, the vtable might be merged with that of 204 // another type. 205 if (!CGM.shouldEmitRTTI()) 206 return false; 207 208 // If there's only one definition of the vtable in the program, it has a 209 // unique address. 210 if (!llvm::GlobalValue::isWeakForLinker(CGM.getVTableLinkage(RD))) 211 return true; 212 213 // Even if there are multiple definitions of the vtable, they are required 214 // by the ABI to use the same symbol name, so should be merged at load 215 // time. However, if the class has hidden visibility, there can be 216 // different versions of the class in different modules, and the ABI 217 // library might treat them as being the same. 218 if (CGM.GetLLVMVisibility(RD->getVisibility()) != 219 llvm::GlobalValue::DefaultVisibility) 220 return false; 221 222 return true; 223 } 224 225 bool shouldEmitExactDynamicCast(QualType DestRecordTy) override { 226 return hasUniqueVTablePointer(DestRecordTy); 227 } 228 229 llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value, 230 QualType SrcRecordTy, QualType DestTy, 231 QualType DestRecordTy, 232 llvm::BasicBlock *CastEnd) override; 233 234 llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr, 235 QualType SrcRecordTy, QualType DestTy, 236 QualType DestRecordTy, 237 llvm::BasicBlock *CastSuccess, 238 llvm::BasicBlock *CastFail) override; 239 240 llvm::Value *emitDynamicCastToVoid(CodeGenFunction &CGF, Address Value, 241 QualType SrcRecordTy) override; 242 243 bool EmitBadCastCall(CodeGenFunction &CGF) override; 244 245 llvm::Value * 246 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This, 247 const CXXRecordDecl *ClassDecl, 248 const CXXRecordDecl *BaseClassDecl) override; 249 250 void EmitCXXConstructors(const CXXConstructorDecl *D) override; 251 252 AddedStructorArgCounts 253 buildStructorSignature(GlobalDecl GD, 254 SmallVectorImpl<CanQualType> &ArgTys) override; 255 256 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, 257 CXXDtorType DT) const override { 258 // Itanium does not emit any destructor variant as an inline thunk. 259 // Delegating may occur as an optimization, but all variants are either 260 // emitted with external linkage or as linkonce if they are inline and used. 261 return false; 262 } 263 264 void EmitCXXDestructors(const CXXDestructorDecl *D) override; 265 266 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, 267 FunctionArgList &Params) override; 268 269 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override; 270 271 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF, 272 const CXXConstructorDecl *D, 273 CXXCtorType Type, 274 bool ForVirtualBase, 275 bool Delegating) override; 276 277 llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF, 278 const CXXDestructorDecl *DD, 279 CXXDtorType Type, 280 bool ForVirtualBase, 281 bool Delegating) override; 282 283 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD, 284 CXXDtorType Type, bool ForVirtualBase, 285 bool Delegating, Address This, 286 QualType ThisTy) override; 287 288 void emitVTableDefinitions(CodeGenVTables &CGVT, 289 const CXXRecordDecl *RD) override; 290 291 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF, 292 CodeGenFunction::VPtr Vptr) override; 293 294 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override { 295 return true; 296 } 297 298 llvm::Constant * 299 getVTableAddressPoint(BaseSubobject Base, 300 const CXXRecordDecl *VTableClass) override; 301 302 llvm::Value *getVTableAddressPointInStructor( 303 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, 304 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override; 305 306 llvm::Value *getVTableAddressPointInStructorWithVTT( 307 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, 308 BaseSubobject Base, const CXXRecordDecl *NearestVBase); 309 310 llvm::Constant * 311 getVTableAddressPointForConstExpr(BaseSubobject Base, 312 const CXXRecordDecl *VTableClass) override; 313 314 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD, 315 CharUnits VPtrOffset) override; 316 317 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, 318 Address This, llvm::Type *Ty, 319 SourceLocation Loc) override; 320 321 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF, 322 const CXXDestructorDecl *Dtor, 323 CXXDtorType DtorType, Address This, 324 DeleteOrMemberCallExpr E) override; 325 326 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override; 327 328 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override; 329 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const; 330 331 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD, 332 bool ReturnAdjustment) override { 333 // Allow inlining of thunks by emitting them with available_externally 334 // linkage together with vtables when needed. 335 if (ForVTable && !Thunk->hasLocalLinkage()) 336 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage); 337 CGM.setGVProperties(Thunk, GD); 338 } 339 340 bool exportThunk() override { return true; } 341 342 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This, 343 const ThisAdjustment &TA) override; 344 345 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret, 346 const ReturnAdjustment &RA) override; 347 348 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, 349 FunctionArgList &Args) const override { 350 assert(!Args.empty() && "expected the arglist to not be empty!"); 351 return Args.size() - 1; 352 } 353 354 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; } 355 StringRef GetDeletedVirtualCallName() override 356 { return "__cxa_deleted_virtual"; } 357 358 CharUnits getArrayCookieSizeImpl(QualType elementType) override; 359 Address InitializeArrayCookie(CodeGenFunction &CGF, 360 Address NewPtr, 361 llvm::Value *NumElements, 362 const CXXNewExpr *expr, 363 QualType ElementType) override; 364 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, 365 Address allocPtr, 366 CharUnits cookieSize) override; 367 368 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, 369 llvm::GlobalVariable *DeclPtr, 370 bool PerformInit) override; 371 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 372 llvm::FunctionCallee dtor, 373 llvm::Constant *addr) override; 374 375 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD, 376 llvm::Value *Val); 377 void EmitThreadLocalInitFuncs( 378 CodeGenModule &CGM, 379 ArrayRef<const VarDecl *> CXXThreadLocals, 380 ArrayRef<llvm::Function *> CXXThreadLocalInits, 381 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override; 382 383 bool usesThreadWrapperFunction(const VarDecl *VD) const override { 384 return !isEmittedWithConstantInitializer(VD) || 385 mayNeedDestruction(VD); 386 } 387 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, 388 QualType LValType) override; 389 390 bool NeedsVTTParameter(GlobalDecl GD) override; 391 392 /**************************** RTTI Uniqueness ******************************/ 393 394 protected: 395 /// Returns true if the ABI requires RTTI type_info objects to be unique 396 /// across a program. 397 virtual bool shouldRTTIBeUnique() const { return true; } 398 399 public: 400 /// What sort of unique-RTTI behavior should we use? 401 enum RTTIUniquenessKind { 402 /// We are guaranteeing, or need to guarantee, that the RTTI string 403 /// is unique. 404 RUK_Unique, 405 406 /// We are not guaranteeing uniqueness for the RTTI string, so we 407 /// can demote to hidden visibility but must use string comparisons. 408 RUK_NonUniqueHidden, 409 410 /// We are not guaranteeing uniqueness for the RTTI string, so we 411 /// have to use string comparisons, but we also have to emit it with 412 /// non-hidden visibility. 413 RUK_NonUniqueVisible 414 }; 415 416 /// Return the required visibility status for the given type and linkage in 417 /// the current ABI. 418 RTTIUniquenessKind 419 classifyRTTIUniqueness(QualType CanTy, 420 llvm::GlobalValue::LinkageTypes Linkage) const; 421 friend class ItaniumRTTIBuilder; 422 423 void emitCXXStructor(GlobalDecl GD) override; 424 425 std::pair<llvm::Value *, const CXXRecordDecl *> 426 LoadVTablePtr(CodeGenFunction &CGF, Address This, 427 const CXXRecordDecl *RD) override; 428 429 private: 430 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const { 431 const auto &VtableLayout = 432 CGM.getItaniumVTableContext().getVTableLayout(RD); 433 434 for (const auto &VtableComponent : VtableLayout.vtable_components()) { 435 // Skip empty slot. 436 if (!VtableComponent.isUsedFunctionPointerKind()) 437 continue; 438 439 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); 440 if (!Method->getCanonicalDecl()->isInlined()) 441 continue; 442 443 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl()); 444 auto *Entry = CGM.GetGlobalValue(Name); 445 // This checks if virtual inline function has already been emitted. 446 // Note that it is possible that this inline function would be emitted 447 // after trying to emit vtable speculatively. Because of this we do 448 // an extra pass after emitting all deferred vtables to find and emit 449 // these vtables opportunistically. 450 if (!Entry || Entry->isDeclaration()) 451 return true; 452 } 453 return false; 454 } 455 456 bool isVTableHidden(const CXXRecordDecl *RD) const { 457 const auto &VtableLayout = 458 CGM.getItaniumVTableContext().getVTableLayout(RD); 459 460 for (const auto &VtableComponent : VtableLayout.vtable_components()) { 461 if (VtableComponent.isRTTIKind()) { 462 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl(); 463 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility) 464 return true; 465 } else if (VtableComponent.isUsedFunctionPointerKind()) { 466 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); 467 if (Method->getVisibility() == Visibility::HiddenVisibility && 468 !Method->isDefined()) 469 return true; 470 } 471 } 472 return false; 473 } 474 }; 475 476 class ARMCXXABI : public ItaniumCXXABI { 477 public: 478 ARMCXXABI(CodeGen::CodeGenModule &CGM) : 479 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 480 /*UseARMGuardVarABI=*/true) {} 481 482 bool constructorsAndDestructorsReturnThis() const override { return true; } 483 484 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, 485 QualType ResTy) override; 486 487 CharUnits getArrayCookieSizeImpl(QualType elementType) override; 488 Address InitializeArrayCookie(CodeGenFunction &CGF, 489 Address NewPtr, 490 llvm::Value *NumElements, 491 const CXXNewExpr *expr, 492 QualType ElementType) override; 493 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr, 494 CharUnits cookieSize) override; 495 }; 496 497 class AppleARM64CXXABI : public ARMCXXABI { 498 public: 499 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) { 500 Use32BitVTableOffsetABI = true; 501 } 502 503 // ARM64 libraries are prepared for non-unique RTTI. 504 bool shouldRTTIBeUnique() const override { return false; } 505 }; 506 507 class FuchsiaCXXABI final : public ItaniumCXXABI { 508 public: 509 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM) 510 : ItaniumCXXABI(CGM) {} 511 512 private: 513 bool constructorsAndDestructorsReturnThis() const override { return true; } 514 }; 515 516 class WebAssemblyCXXABI final : public ItaniumCXXABI { 517 public: 518 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM) 519 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 520 /*UseARMGuardVarABI=*/true) {} 521 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; 522 llvm::CallInst * 523 emitTerminateForUnexpectedException(CodeGenFunction &CGF, 524 llvm::Value *Exn) override; 525 526 private: 527 bool constructorsAndDestructorsReturnThis() const override { return true; } 528 bool canCallMismatchedFunctionType() const override { return false; } 529 }; 530 531 class XLCXXABI final : public ItaniumCXXABI { 532 public: 533 explicit XLCXXABI(CodeGen::CodeGenModule &CGM) 534 : ItaniumCXXABI(CGM) {} 535 536 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 537 llvm::FunctionCallee dtor, 538 llvm::Constant *addr) override; 539 540 bool useSinitAndSterm() const override { return true; } 541 542 private: 543 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub, 544 llvm::Constant *addr); 545 }; 546 } 547 548 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) { 549 switch (CGM.getContext().getCXXABIKind()) { 550 // For IR-generation purposes, there's no significant difference 551 // between the ARM and iOS ABIs. 552 case TargetCXXABI::GenericARM: 553 case TargetCXXABI::iOS: 554 case TargetCXXABI::WatchOS: 555 return new ARMCXXABI(CGM); 556 557 case TargetCXXABI::AppleARM64: 558 return new AppleARM64CXXABI(CGM); 559 560 case TargetCXXABI::Fuchsia: 561 return new FuchsiaCXXABI(CGM); 562 563 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't 564 // include the other 32-bit ARM oddities: constructor/destructor return values 565 // and array cookies. 566 case TargetCXXABI::GenericAArch64: 567 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 568 /*UseARMGuardVarABI=*/true); 569 570 case TargetCXXABI::GenericMIPS: 571 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true); 572 573 case TargetCXXABI::WebAssembly: 574 return new WebAssemblyCXXABI(CGM); 575 576 case TargetCXXABI::XL: 577 return new XLCXXABI(CGM); 578 579 case TargetCXXABI::GenericItanium: 580 if (CGM.getContext().getTargetInfo().getTriple().getArch() 581 == llvm::Triple::le32) { 582 // For PNaCl, use ARM-style method pointers so that PNaCl code 583 // does not assume anything about the alignment of function 584 // pointers. 585 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true); 586 } 587 return new ItaniumCXXABI(CGM); 588 589 case TargetCXXABI::Microsoft: 590 llvm_unreachable("Microsoft ABI is not Itanium-based"); 591 } 592 llvm_unreachable("bad ABI kind"); 593 } 594 595 llvm::Type * 596 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) { 597 if (MPT->isMemberDataPointer()) 598 return CGM.PtrDiffTy; 599 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy); 600 } 601 602 /// In the Itanium and ARM ABIs, method pointers have the form: 603 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr; 604 /// 605 /// In the Itanium ABI: 606 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero 607 /// - the this-adjustment is (memptr.adj) 608 /// - the virtual offset is (memptr.ptr - 1) 609 /// 610 /// In the ARM ABI: 611 /// - method pointers are virtual if (memptr.adj & 1) is nonzero 612 /// - the this-adjustment is (memptr.adj >> 1) 613 /// - the virtual offset is (memptr.ptr) 614 /// ARM uses 'adj' for the virtual flag because Thumb functions 615 /// may be only single-byte aligned. 616 /// 617 /// If the member is virtual, the adjusted 'this' pointer points 618 /// to a vtable pointer from which the virtual offset is applied. 619 /// 620 /// If the member is non-virtual, memptr.ptr is the address of 621 /// the function to call. 622 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer( 623 CodeGenFunction &CGF, const Expr *E, Address ThisAddr, 624 llvm::Value *&ThisPtrForCall, 625 llvm::Value *MemFnPtr, const MemberPointerType *MPT) { 626 CGBuilderTy &Builder = CGF.Builder; 627 628 const FunctionProtoType *FPT = 629 MPT->getPointeeType()->castAs<FunctionProtoType>(); 630 auto *RD = 631 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl()); 632 633 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1); 634 635 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual"); 636 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual"); 637 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end"); 638 639 // Extract memptr.adj, which is in the second field. 640 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj"); 641 642 // Compute the true adjustment. 643 llvm::Value *Adj = RawAdj; 644 if (UseARMMethodPtrABI) 645 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted"); 646 647 // Apply the adjustment and cast back to the original struct type 648 // for consistency. 649 llvm::Value *This = ThisAddr.getPointer(); 650 This = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), This, Adj); 651 ThisPtrForCall = This; 652 653 // Load the function pointer. 654 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr"); 655 656 // If the LSB in the function pointer is 1, the function pointer points to 657 // a virtual function. 658 llvm::Value *IsVirtual; 659 if (UseARMMethodPtrABI) 660 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1); 661 else 662 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1); 663 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual"); 664 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual); 665 666 // In the virtual path, the adjustment left 'This' pointing to the 667 // vtable of the correct base subobject. The "function pointer" is an 668 // offset within the vtable (+1 for the virtual flag on non-ARM). 669 CGF.EmitBlock(FnVirtual); 670 671 // Cast the adjusted this to a pointer to vtable pointer and load. 672 llvm::Type *VTableTy = CGF.CGM.GlobalsInt8PtrTy; 673 CharUnits VTablePtrAlign = 674 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD, 675 CGF.getPointerAlign()); 676 llvm::Value *VTable = CGF.GetVTablePtr( 677 Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD); 678 679 // Apply the offset. 680 // On ARM64, to reserve extra space in virtual member function pointers, 681 // we only pay attention to the low 32 bits of the offset. 682 llvm::Value *VTableOffset = FnAsInt; 683 if (!UseARMMethodPtrABI) 684 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1); 685 if (Use32BitVTableOffsetABI) { 686 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty); 687 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy); 688 } 689 690 // Check the address of the function pointer if CFI on member function 691 // pointers is enabled. 692 llvm::Constant *CheckSourceLocation; 693 llvm::Constant *CheckTypeDesc; 694 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) && 695 CGM.HasHiddenLTOVisibility(RD); 696 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination && 697 CGM.HasHiddenLTOVisibility(RD); 698 bool ShouldEmitWPDInfo = 699 CGM.getCodeGenOpts().WholeProgramVTables && 700 // Don't insert type tests if we are forcing public visibility. 701 !CGM.AlwaysHasLTOVisibilityPublic(RD); 702 llvm::Value *VirtualFn = nullptr; 703 704 { 705 CodeGenFunction::SanitizerScope SanScope(&CGF); 706 llvm::Value *TypeId = nullptr; 707 llvm::Value *CheckResult = nullptr; 708 709 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) { 710 // If doing CFI, VFE or WPD, we will need the metadata node to check 711 // against. 712 llvm::Metadata *MD = 713 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0)); 714 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD); 715 } 716 717 if (ShouldEmitVFEInfo) { 718 llvm::Value *VFPAddr = 719 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset); 720 721 // If doing VFE, load from the vtable with a type.checked.load intrinsic 722 // call. Note that we use the GEP to calculate the address to load from 723 // and pass 0 as the offset to the intrinsic. This is because every 724 // vtable slot of the correct type is marked with matching metadata, and 725 // we know that the load must be from one of these slots. 726 llvm::Value *CheckedLoad = Builder.CreateCall( 727 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load), 728 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId}); 729 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1); 730 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0); 731 } else { 732 // When not doing VFE, emit a normal load, as it allows more 733 // optimisations than type.checked.load. 734 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) { 735 llvm::Value *VFPAddr = 736 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset); 737 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD) 738 ? llvm::Intrinsic::type_test 739 : llvm::Intrinsic::public_type_test; 740 741 CheckResult = 742 Builder.CreateCall(CGM.getIntrinsic(IID), {VFPAddr, TypeId}); 743 } 744 745 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 746 VirtualFn = CGF.Builder.CreateCall( 747 CGM.getIntrinsic(llvm::Intrinsic::load_relative, 748 {VTableOffset->getType()}), 749 {VTable, VTableOffset}); 750 } else { 751 llvm::Value *VFPAddr = 752 CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset); 753 VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.UnqualPtrTy, VFPAddr, 754 CGF.getPointerAlign(), 755 "memptr.virtualfn"); 756 } 757 } 758 assert(VirtualFn && "Virtual fuction pointer not created!"); 759 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo || 760 CheckResult) && 761 "Check result required but not created!"); 762 763 if (ShouldEmitCFICheck) { 764 // If doing CFI, emit the check. 765 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc()); 766 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0)); 767 llvm::Constant *StaticData[] = { 768 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall), 769 CheckSourceLocation, 770 CheckTypeDesc, 771 }; 772 773 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) { 774 CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail); 775 } else { 776 llvm::Value *AllVtables = llvm::MetadataAsValue::get( 777 CGM.getLLVMContext(), 778 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables")); 779 llvm::Value *ValidVtable = Builder.CreateCall( 780 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables}); 781 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall), 782 SanitizerHandler::CFICheckFail, StaticData, 783 {VTable, ValidVtable}); 784 } 785 786 FnVirtual = Builder.GetInsertBlock(); 787 } 788 } // End of sanitizer scope 789 790 CGF.EmitBranch(FnEnd); 791 792 // In the non-virtual path, the function pointer is actually a 793 // function pointer. 794 CGF.EmitBlock(FnNonVirtual); 795 llvm::Value *NonVirtualFn = 796 Builder.CreateIntToPtr(FnAsInt, CGF.UnqualPtrTy, "memptr.nonvirtualfn"); 797 798 // Check the function pointer if CFI on member function pointers is enabled. 799 if (ShouldEmitCFICheck) { 800 CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl(); 801 if (RD->hasDefinition()) { 802 CodeGenFunction::SanitizerScope SanScope(&CGF); 803 804 llvm::Constant *StaticData[] = { 805 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall), 806 CheckSourceLocation, 807 CheckTypeDesc, 808 }; 809 810 llvm::Value *Bit = Builder.getFalse(); 811 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) { 812 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType( 813 getContext().getMemberPointerType( 814 MPT->getPointeeType(), 815 getContext().getRecordType(Base).getTypePtr())); 816 llvm::Value *TypeId = 817 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD); 818 819 llvm::Value *TypeTest = 820 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test), 821 {NonVirtualFn, TypeId}); 822 Bit = Builder.CreateOr(Bit, TypeTest); 823 } 824 825 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall), 826 SanitizerHandler::CFICheckFail, StaticData, 827 {NonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)}); 828 829 FnNonVirtual = Builder.GetInsertBlock(); 830 } 831 } 832 833 // We're done. 834 CGF.EmitBlock(FnEnd); 835 llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.UnqualPtrTy, 2); 836 CalleePtr->addIncoming(VirtualFn, FnVirtual); 837 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual); 838 839 CGCallee Callee(FPT, CalleePtr); 840 return Callee; 841 } 842 843 /// Compute an l-value by applying the given pointer-to-member to a 844 /// base object. 845 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress( 846 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr, 847 const MemberPointerType *MPT) { 848 assert(MemPtr->getType() == CGM.PtrDiffTy); 849 850 CGBuilderTy &Builder = CGF.Builder; 851 852 // Apply the offset, which we assume is non-null. 853 return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.getPointer(), MemPtr, 854 "memptr.offset"); 855 } 856 857 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer 858 /// conversion. 859 /// 860 /// Bitcast conversions are always a no-op under Itanium. 861 /// 862 /// Obligatory offset/adjustment diagram: 863 /// <-- offset --> <-- adjustment --> 864 /// |--------------------------|----------------------|--------------------| 865 /// ^Derived address point ^Base address point ^Member address point 866 /// 867 /// So when converting a base member pointer to a derived member pointer, 868 /// we add the offset to the adjustment because the address point has 869 /// decreased; and conversely, when converting a derived MP to a base MP 870 /// we subtract the offset from the adjustment because the address point 871 /// has increased. 872 /// 873 /// The standard forbids (at compile time) conversion to and from 874 /// virtual bases, which is why we don't have to consider them here. 875 /// 876 /// The standard forbids (at run time) casting a derived MP to a base 877 /// MP when the derived MP does not point to a member of the base. 878 /// This is why -1 is a reasonable choice for null data member 879 /// pointers. 880 llvm::Value * 881 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF, 882 const CastExpr *E, 883 llvm::Value *src) { 884 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 885 E->getCastKind() == CK_BaseToDerivedMemberPointer || 886 E->getCastKind() == CK_ReinterpretMemberPointer); 887 888 // Under Itanium, reinterprets don't require any additional processing. 889 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 890 891 // Use constant emission if we can. 892 if (isa<llvm::Constant>(src)) 893 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src)); 894 895 llvm::Constant *adj = getMemberPointerAdjustment(E); 896 if (!adj) return src; 897 898 CGBuilderTy &Builder = CGF.Builder; 899 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 900 901 const MemberPointerType *destTy = 902 E->getType()->castAs<MemberPointerType>(); 903 904 // For member data pointers, this is just a matter of adding the 905 // offset if the source is non-null. 906 if (destTy->isMemberDataPointer()) { 907 llvm::Value *dst; 908 if (isDerivedToBase) 909 dst = Builder.CreateNSWSub(src, adj, "adj"); 910 else 911 dst = Builder.CreateNSWAdd(src, adj, "adj"); 912 913 // Null check. 914 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType()); 915 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull"); 916 return Builder.CreateSelect(isNull, src, dst); 917 } 918 919 // The this-adjustment is left-shifted by 1 on ARM. 920 if (UseARMMethodPtrABI) { 921 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 922 offset <<= 1; 923 adj = llvm::ConstantInt::get(adj->getType(), offset); 924 } 925 926 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj"); 927 llvm::Value *dstAdj; 928 if (isDerivedToBase) 929 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj"); 930 else 931 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj"); 932 933 return Builder.CreateInsertValue(src, dstAdj, 1); 934 } 935 936 llvm::Constant * 937 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E, 938 llvm::Constant *src) { 939 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 940 E->getCastKind() == CK_BaseToDerivedMemberPointer || 941 E->getCastKind() == CK_ReinterpretMemberPointer); 942 943 // Under Itanium, reinterprets don't require any additional processing. 944 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 945 946 // If the adjustment is trivial, we don't need to do anything. 947 llvm::Constant *adj = getMemberPointerAdjustment(E); 948 if (!adj) return src; 949 950 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 951 952 const MemberPointerType *destTy = 953 E->getType()->castAs<MemberPointerType>(); 954 955 // For member data pointers, this is just a matter of adding the 956 // offset if the source is non-null. 957 if (destTy->isMemberDataPointer()) { 958 // null maps to null. 959 if (src->isAllOnesValue()) return src; 960 961 if (isDerivedToBase) 962 return llvm::ConstantExpr::getNSWSub(src, adj); 963 else 964 return llvm::ConstantExpr::getNSWAdd(src, adj); 965 } 966 967 // The this-adjustment is left-shifted by 1 on ARM. 968 if (UseARMMethodPtrABI) { 969 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 970 offset <<= 1; 971 adj = llvm::ConstantInt::get(adj->getType(), offset); 972 } 973 974 llvm::Constant *srcAdj = src->getAggregateElement(1); 975 llvm::Constant *dstAdj; 976 if (isDerivedToBase) 977 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj); 978 else 979 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj); 980 981 llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1); 982 assert(res != nullptr && "Folding must succeed"); 983 return res; 984 } 985 986 llvm::Constant * 987 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) { 988 // Itanium C++ ABI 2.3: 989 // A NULL pointer is represented as -1. 990 if (MPT->isMemberDataPointer()) 991 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true); 992 993 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0); 994 llvm::Constant *Values[2] = { Zero, Zero }; 995 return llvm::ConstantStruct::getAnon(Values); 996 } 997 998 llvm::Constant * 999 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT, 1000 CharUnits offset) { 1001 // Itanium C++ ABI 2.3: 1002 // A pointer to data member is an offset from the base address of 1003 // the class object containing it, represented as a ptrdiff_t 1004 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()); 1005 } 1006 1007 llvm::Constant * 1008 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) { 1009 return BuildMemberPointer(MD, CharUnits::Zero()); 1010 } 1011 1012 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, 1013 CharUnits ThisAdjustment) { 1014 assert(MD->isInstance() && "Member function must not be static!"); 1015 1016 CodeGenTypes &Types = CGM.getTypes(); 1017 1018 // Get the function pointer (or index if this is a virtual function). 1019 llvm::Constant *MemPtr[2]; 1020 if (MD->isVirtual()) { 1021 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD); 1022 uint64_t VTableOffset; 1023 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1024 // Multiply by 4-byte relative offsets. 1025 VTableOffset = Index * 4; 1026 } else { 1027 const ASTContext &Context = getContext(); 1028 CharUnits PointerWidth = Context.toCharUnitsFromBits( 1029 Context.getTargetInfo().getPointerWidth(LangAS::Default)); 1030 VTableOffset = Index * PointerWidth.getQuantity(); 1031 } 1032 1033 if (UseARMMethodPtrABI) { 1034 // ARM C++ ABI 3.2.1: 1035 // This ABI specifies that adj contains twice the this 1036 // adjustment, plus 1 if the member function is virtual. The 1037 // least significant bit of adj then makes exactly the same 1038 // discrimination as the least significant bit of ptr does for 1039 // Itanium. 1040 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset); 1041 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 1042 2 * ThisAdjustment.getQuantity() + 1); 1043 } else { 1044 // Itanium C++ ABI 2.3: 1045 // For a virtual function, [the pointer field] is 1 plus the 1046 // virtual table offset (in bytes) of the function, 1047 // represented as a ptrdiff_t. 1048 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1); 1049 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 1050 ThisAdjustment.getQuantity()); 1051 } 1052 } else { 1053 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 1054 llvm::Type *Ty; 1055 // Check whether the function has a computable LLVM signature. 1056 if (Types.isFuncTypeConvertible(FPT)) { 1057 // The function has a computable LLVM signature; use the correct type. 1058 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD)); 1059 } else { 1060 // Use an arbitrary non-function type to tell GetAddrOfFunction that the 1061 // function type is incomplete. 1062 Ty = CGM.PtrDiffTy; 1063 } 1064 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty); 1065 1066 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy); 1067 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 1068 (UseARMMethodPtrABI ? 2 : 1) * 1069 ThisAdjustment.getQuantity()); 1070 } 1071 1072 return llvm::ConstantStruct::getAnon(MemPtr); 1073 } 1074 1075 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP, 1076 QualType MPType) { 1077 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>(); 1078 const ValueDecl *MPD = MP.getMemberPointerDecl(); 1079 if (!MPD) 1080 return EmitNullMemberPointer(MPT); 1081 1082 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP); 1083 1084 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) 1085 return BuildMemberPointer(MD, ThisAdjustment); 1086 1087 CharUnits FieldOffset = 1088 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD)); 1089 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset); 1090 } 1091 1092 /// The comparison algorithm is pretty easy: the member pointers are 1093 /// the same if they're either bitwise identical *or* both null. 1094 /// 1095 /// ARM is different here only because null-ness is more complicated. 1096 llvm::Value * 1097 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF, 1098 llvm::Value *L, 1099 llvm::Value *R, 1100 const MemberPointerType *MPT, 1101 bool Inequality) { 1102 CGBuilderTy &Builder = CGF.Builder; 1103 1104 llvm::ICmpInst::Predicate Eq; 1105 llvm::Instruction::BinaryOps And, Or; 1106 if (Inequality) { 1107 Eq = llvm::ICmpInst::ICMP_NE; 1108 And = llvm::Instruction::Or; 1109 Or = llvm::Instruction::And; 1110 } else { 1111 Eq = llvm::ICmpInst::ICMP_EQ; 1112 And = llvm::Instruction::And; 1113 Or = llvm::Instruction::Or; 1114 } 1115 1116 // Member data pointers are easy because there's a unique null 1117 // value, so it just comes down to bitwise equality. 1118 if (MPT->isMemberDataPointer()) 1119 return Builder.CreateICmp(Eq, L, R); 1120 1121 // For member function pointers, the tautologies are more complex. 1122 // The Itanium tautology is: 1123 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj)) 1124 // The ARM tautology is: 1125 // (L == R) <==> (L.ptr == R.ptr && 1126 // (L.adj == R.adj || 1127 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0))) 1128 // The inequality tautologies have exactly the same structure, except 1129 // applying De Morgan's laws. 1130 1131 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr"); 1132 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr"); 1133 1134 // This condition tests whether L.ptr == R.ptr. This must always be 1135 // true for equality to hold. 1136 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr"); 1137 1138 // This condition, together with the assumption that L.ptr == R.ptr, 1139 // tests whether the pointers are both null. ARM imposes an extra 1140 // condition. 1141 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType()); 1142 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null"); 1143 1144 // This condition tests whether L.adj == R.adj. If this isn't 1145 // true, the pointers are unequal unless they're both null. 1146 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj"); 1147 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj"); 1148 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj"); 1149 1150 // Null member function pointers on ARM clear the low bit of Adj, 1151 // so the zero condition has to check that neither low bit is set. 1152 if (UseARMMethodPtrABI) { 1153 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1); 1154 1155 // Compute (l.adj | r.adj) & 1 and test it against zero. 1156 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj"); 1157 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One); 1158 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero, 1159 "cmp.or.adj"); 1160 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero); 1161 } 1162 1163 // Tie together all our conditions. 1164 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq); 1165 Result = Builder.CreateBinOp(And, PtrEq, Result, 1166 Inequality ? "memptr.ne" : "memptr.eq"); 1167 return Result; 1168 } 1169 1170 llvm::Value * 1171 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 1172 llvm::Value *MemPtr, 1173 const MemberPointerType *MPT) { 1174 CGBuilderTy &Builder = CGF.Builder; 1175 1176 /// For member data pointers, this is just a check against -1. 1177 if (MPT->isMemberDataPointer()) { 1178 assert(MemPtr->getType() == CGM.PtrDiffTy); 1179 llvm::Value *NegativeOne = 1180 llvm::Constant::getAllOnesValue(MemPtr->getType()); 1181 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool"); 1182 } 1183 1184 // In Itanium, a member function pointer is not null if 'ptr' is not null. 1185 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr"); 1186 1187 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0); 1188 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool"); 1189 1190 // On ARM, a member function pointer is also non-null if the low bit of 'adj' 1191 // (the virtual bit) is set. 1192 if (UseARMMethodPtrABI) { 1193 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1); 1194 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj"); 1195 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit"); 1196 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero, 1197 "memptr.isvirtual"); 1198 Result = Builder.CreateOr(Result, IsVirtual); 1199 } 1200 1201 return Result; 1202 } 1203 1204 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const { 1205 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl(); 1206 if (!RD) 1207 return false; 1208 1209 // If C++ prohibits us from making a copy, return by address. 1210 if (!RD->canPassInRegisters()) { 1211 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType()); 1212 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 1213 return true; 1214 } 1215 return false; 1216 } 1217 1218 /// The Itanium ABI requires non-zero initialization only for data 1219 /// member pointers, for which '0' is a valid offset. 1220 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) { 1221 return MPT->isMemberFunctionPointer(); 1222 } 1223 1224 /// The Itanium ABI always places an offset to the complete object 1225 /// at entry -2 in the vtable. 1226 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF, 1227 const CXXDeleteExpr *DE, 1228 Address Ptr, 1229 QualType ElementType, 1230 const CXXDestructorDecl *Dtor) { 1231 bool UseGlobalDelete = DE->isGlobalDelete(); 1232 if (UseGlobalDelete) { 1233 // Derive the complete-object pointer, which is what we need 1234 // to pass to the deallocation function. 1235 1236 // Grab the vtable pointer as an intptr_t*. 1237 auto *ClassDecl = 1238 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl()); 1239 llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl); 1240 1241 // Track back to entry -2 and pull out the offset there. 1242 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64( 1243 CGF.IntPtrTy, VTable, -2, "complete-offset.ptr"); 1244 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr, 1245 CGF.getPointerAlign()); 1246 1247 // Apply the offset. 1248 llvm::Value *CompletePtr = Ptr.getPointer(); 1249 CompletePtr = 1250 CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset); 1251 1252 // If we're supposed to call the global delete, make sure we do so 1253 // even if the destructor throws. 1254 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr, 1255 ElementType); 1256 } 1257 1258 // FIXME: Provide a source location here even though there's no 1259 // CXXMemberCallExpr for dtor call. 1260 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting; 1261 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE); 1262 1263 if (UseGlobalDelete) 1264 CGF.PopCleanupBlock(); 1265 } 1266 1267 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) { 1268 // void __cxa_rethrow(); 1269 1270 llvm::FunctionType *FTy = 1271 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 1272 1273 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow"); 1274 1275 if (isNoReturn) 1276 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, std::nullopt); 1277 else 1278 CGF.EmitRuntimeCallOrInvoke(Fn); 1279 } 1280 1281 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) { 1282 // void *__cxa_allocate_exception(size_t thrown_size); 1283 1284 llvm::FunctionType *FTy = 1285 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false); 1286 1287 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception"); 1288 } 1289 1290 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) { 1291 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo, 1292 // void (*dest) (void *)); 1293 1294 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.GlobalsInt8PtrTy, CGM.Int8PtrTy }; 1295 llvm::FunctionType *FTy = 1296 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false); 1297 1298 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw"); 1299 } 1300 1301 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) { 1302 QualType ThrowType = E->getSubExpr()->getType(); 1303 // Now allocate the exception object. 1304 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType()); 1305 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity(); 1306 1307 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM); 1308 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall( 1309 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception"); 1310 1311 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment(); 1312 CGF.EmitAnyExprToExn( 1313 E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign)); 1314 1315 // Now throw the exception. 1316 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType, 1317 /*ForEH=*/true); 1318 1319 // The address of the destructor. If the exception type has a 1320 // trivial destructor (or isn't a record), we just pass null. 1321 llvm::Constant *Dtor = nullptr; 1322 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) { 1323 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl()); 1324 if (!Record->hasTrivialDestructor()) { 1325 CXXDestructorDecl *DtorD = Record->getDestructor(); 1326 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete)); 1327 } 1328 } 1329 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy); 1330 1331 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor }; 1332 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args); 1333 } 1334 1335 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) { 1336 // void *__dynamic_cast(const void *sub, 1337 // global_as const abi::__class_type_info *src, 1338 // global_as const abi::__class_type_info *dst, 1339 // std::ptrdiff_t src2dst_offset); 1340 1341 llvm::Type *Int8PtrTy = CGF.Int8PtrTy; 1342 llvm::Type *GlobInt8PtrTy = CGF.GlobalsInt8PtrTy; 1343 llvm::Type *PtrDiffTy = 1344 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1345 1346 llvm::Type *Args[4] = { Int8PtrTy, GlobInt8PtrTy, GlobInt8PtrTy, PtrDiffTy }; 1347 1348 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false); 1349 1350 // Mark the function as nounwind readonly. 1351 llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext()); 1352 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1353 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly()); 1354 llvm::AttributeList Attrs = llvm::AttributeList::get( 1355 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs); 1356 1357 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs); 1358 } 1359 1360 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) { 1361 // void __cxa_bad_cast(); 1362 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1363 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast"); 1364 } 1365 1366 /// Compute the src2dst_offset hint as described in the 1367 /// Itanium C++ ABI [2.9.7] 1368 static CharUnits computeOffsetHint(ASTContext &Context, 1369 const CXXRecordDecl *Src, 1370 const CXXRecordDecl *Dst) { 1371 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, 1372 /*DetectVirtual=*/false); 1373 1374 // If Dst is not derived from Src we can skip the whole computation below and 1375 // return that Src is not a public base of Dst. Record all inheritance paths. 1376 if (!Dst->isDerivedFrom(Src, Paths)) 1377 return CharUnits::fromQuantity(-2ULL); 1378 1379 unsigned NumPublicPaths = 0; 1380 CharUnits Offset; 1381 1382 // Now walk all possible inheritance paths. 1383 for (const CXXBasePath &Path : Paths) { 1384 if (Path.Access != AS_public) // Ignore non-public inheritance. 1385 continue; 1386 1387 ++NumPublicPaths; 1388 1389 for (const CXXBasePathElement &PathElement : Path) { 1390 // If the path contains a virtual base class we can't give any hint. 1391 // -1: no hint. 1392 if (PathElement.Base->isVirtual()) 1393 return CharUnits::fromQuantity(-1ULL); 1394 1395 if (NumPublicPaths > 1) // Won't use offsets, skip computation. 1396 continue; 1397 1398 // Accumulate the base class offsets. 1399 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class); 1400 Offset += L.getBaseClassOffset( 1401 PathElement.Base->getType()->getAsCXXRecordDecl()); 1402 } 1403 } 1404 1405 // -2: Src is not a public base of Dst. 1406 if (NumPublicPaths == 0) 1407 return CharUnits::fromQuantity(-2ULL); 1408 1409 // -3: Src is a multiple public base type but never a virtual base type. 1410 if (NumPublicPaths > 1) 1411 return CharUnits::fromQuantity(-3ULL); 1412 1413 // Otherwise, the Src type is a unique public nonvirtual base type of Dst. 1414 // Return the offset of Src from the origin of Dst. 1415 return Offset; 1416 } 1417 1418 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) { 1419 // void __cxa_bad_typeid(); 1420 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1421 1422 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); 1423 } 1424 1425 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref, 1426 QualType SrcRecordTy) { 1427 return IsDeref; 1428 } 1429 1430 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) { 1431 llvm::FunctionCallee Fn = getBadTypeidFn(CGF); 1432 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn); 1433 Call->setDoesNotReturn(); 1434 CGF.Builder.CreateUnreachable(); 1435 } 1436 1437 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF, 1438 QualType SrcRecordTy, 1439 Address ThisPtr, 1440 llvm::Type *StdTypeInfoPtrTy) { 1441 auto *ClassDecl = 1442 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl()); 1443 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, CGM.GlobalsInt8PtrTy, 1444 ClassDecl); 1445 1446 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1447 // Load the type info. 1448 Value = CGF.Builder.CreateCall( 1449 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}), 1450 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)}); 1451 } else { 1452 // Load the type info. 1453 Value = 1454 CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL); 1455 } 1456 return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value, 1457 CGF.getPointerAlign()); 1458 } 1459 1460 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, 1461 QualType SrcRecordTy) { 1462 return SrcIsPtr; 1463 } 1464 1465 llvm::Value *ItaniumCXXABI::emitDynamicCastCall( 1466 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy, 1467 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) { 1468 llvm::Type *PtrDiffLTy = 1469 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1470 1471 llvm::Value *SrcRTTI = 1472 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType()); 1473 llvm::Value *DestRTTI = 1474 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType()); 1475 1476 // Compute the offset hint. 1477 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); 1478 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl(); 1479 llvm::Value *OffsetHint = llvm::ConstantInt::get( 1480 PtrDiffLTy, 1481 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity()); 1482 1483 // Emit the call to __dynamic_cast. 1484 llvm::Value *Args[] = {ThisAddr.getPointer(), SrcRTTI, DestRTTI, OffsetHint}; 1485 llvm::Value *Value = 1486 CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), Args); 1487 1488 /// C++ [expr.dynamic.cast]p9: 1489 /// A failed cast to reference type throws std::bad_cast 1490 if (DestTy->isReferenceType()) { 1491 llvm::BasicBlock *BadCastBlock = 1492 CGF.createBasicBlock("dynamic_cast.bad_cast"); 1493 1494 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value); 1495 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd); 1496 1497 CGF.EmitBlock(BadCastBlock); 1498 EmitBadCastCall(CGF); 1499 } 1500 1501 return Value; 1502 } 1503 1504 llvm::Value *ItaniumCXXABI::emitExactDynamicCast( 1505 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy, 1506 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess, 1507 llvm::BasicBlock *CastFail) { 1508 ASTContext &Context = getContext(); 1509 1510 // Find all the inheritance paths. 1511 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); 1512 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl(); 1513 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, 1514 /*DetectVirtual=*/false); 1515 (void)DestDecl->isDerivedFrom(SrcDecl, Paths); 1516 1517 // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr 1518 // might appear. 1519 std::optional<CharUnits> Offset; 1520 for (const CXXBasePath &Path : Paths) { 1521 // dynamic_cast only finds public inheritance paths. 1522 if (Path.Access != AS_public) 1523 continue; 1524 1525 CharUnits PathOffset; 1526 for (const CXXBasePathElement &PathElement : Path) { 1527 // Find the offset along this inheritance step. 1528 const CXXRecordDecl *Base = 1529 PathElement.Base->getType()->getAsCXXRecordDecl(); 1530 if (PathElement.Base->isVirtual()) { 1531 // For a virtual base class, we know that the derived class is exactly 1532 // DestDecl, so we can use the vbase offset from its layout. 1533 const ASTRecordLayout &L = Context.getASTRecordLayout(DestDecl); 1534 PathOffset = L.getVBaseClassOffset(Base); 1535 } else { 1536 const ASTRecordLayout &L = 1537 Context.getASTRecordLayout(PathElement.Class); 1538 PathOffset += L.getBaseClassOffset(Base); 1539 } 1540 } 1541 1542 if (!Offset) 1543 Offset = PathOffset; 1544 else if (Offset != PathOffset) { 1545 // Base appears in at least two different places. Find the most-derived 1546 // object and see if it's a DestDecl. Note that the most-derived object 1547 // must be at least as aligned as this base class subobject, and must 1548 // have a vptr at offset 0. 1549 ThisAddr = Address(emitDynamicCastToVoid(CGF, ThisAddr, SrcRecordTy), 1550 CGF.VoidPtrTy, ThisAddr.getAlignment()); 1551 SrcDecl = DestDecl; 1552 Offset = CharUnits::Zero(); 1553 break; 1554 } 1555 } 1556 1557 if (!Offset) { 1558 // If there are no public inheritance paths, the cast always fails. 1559 CGF.EmitBranch(CastFail); 1560 return llvm::PoisonValue::get(CGF.VoidPtrTy); 1561 } 1562 1563 // Compare the vptr against the expected vptr for the destination type at 1564 // this offset. Note that we do not know what type ThisAddr points to in 1565 // the case where the derived class multiply inherits from the base class 1566 // so we can't use GetVTablePtr, so we load the vptr directly instead. 1567 llvm::Instruction *VPtr = CGF.Builder.CreateLoad( 1568 ThisAddr.withElementType(CGF.VoidPtrPtrTy), "vtable"); 1569 CGM.DecorateInstructionWithTBAA( 1570 VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy)); 1571 llvm::Value *Success = CGF.Builder.CreateICmpEQ( 1572 VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl)); 1573 llvm::Value *Result = ThisAddr.getPointer(); 1574 if (!Offset->isZero()) 1575 Result = CGF.Builder.CreateInBoundsGEP( 1576 CGF.CharTy, Result, 1577 {llvm::ConstantInt::get(CGF.PtrDiffTy, -Offset->getQuantity())}); 1578 CGF.Builder.CreateCondBr(Success, CastSuccess, CastFail); 1579 return Result; 1580 } 1581 1582 llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF, 1583 Address ThisAddr, 1584 QualType SrcRecordTy) { 1585 auto *ClassDecl = 1586 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl()); 1587 llvm::Value *OffsetToTop; 1588 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1589 // Get the vtable pointer. 1590 llvm::Value *VTable = 1591 CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl); 1592 1593 // Get the offset-to-top from the vtable. 1594 OffsetToTop = 1595 CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U); 1596 OffsetToTop = CGF.Builder.CreateAlignedLoad( 1597 CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top"); 1598 } else { 1599 llvm::Type *PtrDiffLTy = 1600 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1601 1602 // Get the vtable pointer. 1603 llvm::Value *VTable = 1604 CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl); 1605 1606 // Get the offset-to-top from the vtable. 1607 OffsetToTop = 1608 CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL); 1609 OffsetToTop = CGF.Builder.CreateAlignedLoad( 1610 PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top"); 1611 } 1612 // Finally, add the offset to the pointer. 1613 return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.getPointer(), 1614 OffsetToTop); 1615 } 1616 1617 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) { 1618 llvm::FunctionCallee Fn = getBadCastFn(CGF); 1619 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn); 1620 Call->setDoesNotReturn(); 1621 CGF.Builder.CreateUnreachable(); 1622 return true; 1623 } 1624 1625 llvm::Value * 1626 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF, 1627 Address This, 1628 const CXXRecordDecl *ClassDecl, 1629 const CXXRecordDecl *BaseClassDecl) { 1630 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl); 1631 CharUnits VBaseOffsetOffset = 1632 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl, 1633 BaseClassDecl); 1634 llvm::Value *VBaseOffsetPtr = 1635 CGF.Builder.CreateConstGEP1_64( 1636 CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(), 1637 "vbase.offset.ptr"); 1638 1639 llvm::Value *VBaseOffset; 1640 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1641 VBaseOffset = CGF.Builder.CreateAlignedLoad( 1642 CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4), 1643 "vbase.offset"); 1644 } else { 1645 VBaseOffset = CGF.Builder.CreateAlignedLoad( 1646 CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset"); 1647 } 1648 return VBaseOffset; 1649 } 1650 1651 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) { 1652 // Just make sure we're in sync with TargetCXXABI. 1653 assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); 1654 1655 // The constructor used for constructing this as a base class; 1656 // ignores virtual bases. 1657 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base)); 1658 1659 // The constructor used for constructing this as a complete class; 1660 // constructs the virtual bases, then calls the base constructor. 1661 if (!D->getParent()->isAbstract()) { 1662 // We don't need to emit the complete ctor if the class is abstract. 1663 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete)); 1664 } 1665 } 1666 1667 CGCXXABI::AddedStructorArgCounts 1668 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD, 1669 SmallVectorImpl<CanQualType> &ArgTys) { 1670 ASTContext &Context = getContext(); 1671 1672 // All parameters are already in place except VTT, which goes after 'this'. 1673 // These are Clang types, so we don't need to worry about sret yet. 1674 1675 // Check if we need to add a VTT parameter (which has type global void **). 1676 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base 1677 : GD.getDtorType() == Dtor_Base) && 1678 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) { 1679 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr); 1680 QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS); 1681 ArgTys.insert(ArgTys.begin() + 1, 1682 Context.getPointerType(CanQualType::CreateUnsafe(Q))); 1683 return AddedStructorArgCounts::prefix(1); 1684 } 1685 return AddedStructorArgCounts{}; 1686 } 1687 1688 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) { 1689 // The destructor used for destructing this as a base class; ignores 1690 // virtual bases. 1691 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base)); 1692 1693 // The destructor used for destructing this as a most-derived class; 1694 // call the base destructor and then destructs any virtual bases. 1695 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete)); 1696 1697 // The destructor in a virtual table is always a 'deleting' 1698 // destructor, which calls the complete destructor and then uses the 1699 // appropriate operator delete. 1700 if (D->isVirtual()) 1701 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting)); 1702 } 1703 1704 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF, 1705 QualType &ResTy, 1706 FunctionArgList &Params) { 1707 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl()); 1708 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)); 1709 1710 // Check if we need a VTT parameter as well. 1711 if (NeedsVTTParameter(CGF.CurGD)) { 1712 ASTContext &Context = getContext(); 1713 1714 // FIXME: avoid the fake decl 1715 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr); 1716 QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS); 1717 QualType T = Context.getPointerType(Q); 1718 auto *VTTDecl = ImplicitParamDecl::Create( 1719 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"), 1720 T, ImplicitParamKind::CXXVTT); 1721 Params.insert(Params.begin() + 1, VTTDecl); 1722 getStructorImplicitParamDecl(CGF) = VTTDecl; 1723 } 1724 } 1725 1726 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { 1727 // Naked functions have no prolog. 1728 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>()) 1729 return; 1730 1731 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue 1732 /// adjustments are required, because they are all handled by thunks. 1733 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF)); 1734 1735 /// Initialize the 'vtt' slot if needed. 1736 if (getStructorImplicitParamDecl(CGF)) { 1737 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad( 1738 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt"); 1739 } 1740 1741 /// If this is a function that the ABI specifies returns 'this', initialize 1742 /// the return slot to 'this' at the start of the function. 1743 /// 1744 /// Unlike the setting of return types, this is done within the ABI 1745 /// implementation instead of by clients of CGCXXABI because: 1746 /// 1) getThisValue is currently protected 1747 /// 2) in theory, an ABI could implement 'this' returns some other way; 1748 /// HasThisReturn only specifies a contract, not the implementation 1749 if (HasThisReturn(CGF.CurGD)) 1750 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue); 1751 } 1752 1753 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs( 1754 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, 1755 bool ForVirtualBase, bool Delegating) { 1756 if (!NeedsVTTParameter(GlobalDecl(D, Type))) 1757 return AddedStructorArgs{}; 1758 1759 // Insert the implicit 'vtt' argument as the second argument. Make sure to 1760 // correctly reflect its address space, which can differ from generic on 1761 // some targets. 1762 llvm::Value *VTT = 1763 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating); 1764 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr); 1765 QualType Q = getContext().getAddrSpaceQualType(getContext().VoidPtrTy, AS); 1766 QualType VTTTy = getContext().getPointerType(Q); 1767 return AddedStructorArgs::prefix({{VTT, VTTTy}}); 1768 } 1769 1770 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam( 1771 CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, 1772 bool ForVirtualBase, bool Delegating) { 1773 GlobalDecl GD(DD, Type); 1774 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating); 1775 } 1776 1777 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF, 1778 const CXXDestructorDecl *DD, 1779 CXXDtorType Type, bool ForVirtualBase, 1780 bool Delegating, Address This, 1781 QualType ThisTy) { 1782 GlobalDecl GD(DD, Type); 1783 llvm::Value *VTT = 1784 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating); 1785 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); 1786 1787 CGCallee Callee; 1788 if (getContext().getLangOpts().AppleKext && 1789 Type != Dtor_Base && DD->isVirtual()) 1790 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent()); 1791 else 1792 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD); 1793 1794 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy, 1795 nullptr); 1796 } 1797 1798 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT, 1799 const CXXRecordDecl *RD) { 1800 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits()); 1801 if (VTable->hasInitializer()) 1802 return; 1803 1804 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext(); 1805 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD); 1806 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD); 1807 llvm::Constant *RTTI = 1808 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD)); 1809 1810 // Create and set the initializer. 1811 ConstantInitBuilder builder(CGM); 1812 auto components = builder.beginStruct(); 1813 CGVT.createVTableInitializer(components, VTLayout, RTTI, 1814 llvm::GlobalValue::isLocalLinkage(Linkage)); 1815 components.finishAndSetAsInitializer(VTable); 1816 1817 // Set the correct linkage. 1818 VTable->setLinkage(Linkage); 1819 1820 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker()) 1821 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName())); 1822 1823 // Set the right visibility. 1824 CGM.setGVProperties(VTable, RD); 1825 1826 // If this is the magic class __cxxabiv1::__fundamental_type_info, 1827 // we will emit the typeinfo for the fundamental types. This is the 1828 // same behaviour as GCC. 1829 const DeclContext *DC = RD->getDeclContext(); 1830 if (RD->getIdentifier() && 1831 RD->getIdentifier()->isStr("__fundamental_type_info") && 1832 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() && 1833 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") && 1834 DC->getParent()->isTranslationUnit()) 1835 EmitFundamentalRTTIDescriptors(RD); 1836 1837 // Always emit type metadata on non-available_externally definitions, and on 1838 // available_externally definitions if we are performing whole program 1839 // devirtualization. For WPD we need the type metadata on all vtable 1840 // definitions to ensure we associate derived classes with base classes 1841 // defined in headers but with a strong definition only in a shared library. 1842 if (!VTable->isDeclarationForLinker() || 1843 CGM.getCodeGenOpts().WholeProgramVTables) { 1844 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout); 1845 // For available_externally definitions, add the vtable to 1846 // @llvm.compiler.used so that it isn't deleted before whole program 1847 // analysis. 1848 if (VTable->isDeclarationForLinker()) { 1849 assert(CGM.getCodeGenOpts().WholeProgramVTables); 1850 CGM.addCompilerUsedGlobal(VTable); 1851 } 1852 } 1853 1854 if (VTContext.isRelativeLayout()) { 1855 CGVT.RemoveHwasanMetadata(VTable); 1856 if (!VTable->isDSOLocal()) 1857 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName()); 1858 } 1859 } 1860 1861 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField( 1862 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) { 1863 if (Vptr.NearestVBase == nullptr) 1864 return false; 1865 return NeedsVTTParameter(CGF.CurGD); 1866 } 1867 1868 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor( 1869 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, 1870 const CXXRecordDecl *NearestVBase) { 1871 1872 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && 1873 NeedsVTTParameter(CGF.CurGD)) { 1874 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base, 1875 NearestVBase); 1876 } 1877 return getVTableAddressPoint(Base, VTableClass); 1878 } 1879 1880 llvm::Constant * 1881 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, 1882 const CXXRecordDecl *VTableClass) { 1883 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits()); 1884 1885 // Find the appropriate vtable within the vtable group, and the address point 1886 // within that vtable. 1887 VTableLayout::AddressPointLocation AddressPoint = 1888 CGM.getItaniumVTableContext() 1889 .getVTableLayout(VTableClass) 1890 .getAddressPoint(Base); 1891 llvm::Value *Indices[] = { 1892 llvm::ConstantInt::get(CGM.Int32Ty, 0), 1893 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex), 1894 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex), 1895 }; 1896 1897 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable, 1898 Indices, /*InBounds=*/true, 1899 /*InRangeIndex=*/1); 1900 } 1901 1902 // Check whether all the non-inline virtual methods for the class have the 1903 // specified attribute. 1904 template <typename T> 1905 static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) { 1906 bool FoundNonInlineVirtualMethodWithAttr = false; 1907 for (const auto *D : RD->noload_decls()) { 1908 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 1909 if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() || 1910 FD->doesThisDeclarationHaveABody()) 1911 continue; 1912 if (!D->hasAttr<T>()) 1913 return false; 1914 FoundNonInlineVirtualMethodWithAttr = true; 1915 } 1916 } 1917 1918 // We didn't find any non-inline virtual methods missing the attribute. We 1919 // will return true when we found at least one non-inline virtual with the 1920 // attribute. (This lets our caller know that the attribute needs to be 1921 // propagated up to the vtable.) 1922 return FoundNonInlineVirtualMethodWithAttr; 1923 } 1924 1925 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT( 1926 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, 1927 const CXXRecordDecl *NearestVBase) { 1928 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && 1929 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT"); 1930 1931 // Get the secondary vpointer index. 1932 uint64_t VirtualPointerIndex = 1933 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base); 1934 1935 /// Load the VTT. 1936 llvm::Value *VTT = CGF.LoadCXXVTT(); 1937 if (VirtualPointerIndex) 1938 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.GlobalsVoidPtrTy, VTT, 1939 VirtualPointerIndex); 1940 1941 // And load the address point from the VTT. 1942 return CGF.Builder.CreateAlignedLoad(CGF.GlobalsVoidPtrTy, VTT, 1943 CGF.getPointerAlign()); 1944 } 1945 1946 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr( 1947 BaseSubobject Base, const CXXRecordDecl *VTableClass) { 1948 return getVTableAddressPoint(Base, VTableClass); 1949 } 1950 1951 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, 1952 CharUnits VPtrOffset) { 1953 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets"); 1954 1955 llvm::GlobalVariable *&VTable = VTables[RD]; 1956 if (VTable) 1957 return VTable; 1958 1959 // Queue up this vtable for possible deferred emission. 1960 CGM.addDeferredVTable(RD); 1961 1962 SmallString<256> Name; 1963 llvm::raw_svector_ostream Out(Name); 1964 getMangleContext().mangleCXXVTable(RD, Out); 1965 1966 const VTableLayout &VTLayout = 1967 CGM.getItaniumVTableContext().getVTableLayout(RD); 1968 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout); 1969 1970 // Use pointer to global alignment for the vtable. Otherwise we would align 1971 // them based on the size of the initializer which doesn't make sense as only 1972 // single values are read. 1973 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr); 1974 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout() 1975 ? 32 1976 : CGM.getTarget().getPointerAlign(AS); 1977 1978 VTable = CGM.CreateOrReplaceCXXRuntimeVariable( 1979 Name, VTableType, llvm::GlobalValue::ExternalLinkage, 1980 getContext().toCharUnitsFromBits(PAlign).getAsAlign()); 1981 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 1982 1983 // In MS C++ if you have a class with virtual functions in which you are using 1984 // selective member import/export, then all virtual functions must be exported 1985 // unless they are inline, otherwise a link error will result. To match this 1986 // behavior, for such classes, we dllimport the vtable if it is defined 1987 // externally and all the non-inline virtual methods are marked dllimport, and 1988 // we dllexport the vtable if it is defined in this TU and all the non-inline 1989 // virtual methods are marked dllexport. 1990 if (CGM.getTarget().hasPS4DLLImportExport()) { 1991 if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) { 1992 if (CGM.getVTables().isVTableExternal(RD)) { 1993 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) 1994 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); 1995 } else { 1996 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) 1997 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); 1998 } 1999 } 2000 } 2001 CGM.setGVProperties(VTable, RD); 2002 2003 return VTable; 2004 } 2005 2006 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF, 2007 GlobalDecl GD, 2008 Address This, 2009 llvm::Type *Ty, 2010 SourceLocation Loc) { 2011 llvm::Type *PtrTy = CGM.GlobalsInt8PtrTy; 2012 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl()); 2013 llvm::Value *VTable = CGF.GetVTablePtr(This, PtrTy, MethodDecl->getParent()); 2014 2015 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD); 2016 llvm::Value *VFunc; 2017 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) { 2018 VFunc = CGF.EmitVTableTypeCheckedLoad( 2019 MethodDecl->getParent(), VTable, PtrTy, 2020 VTableIndex * 2021 CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default) / 2022 8); 2023 } else { 2024 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc); 2025 2026 llvm::Value *VFuncLoad; 2027 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 2028 VFuncLoad = CGF.Builder.CreateCall( 2029 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}), 2030 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)}); 2031 } else { 2032 llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64( 2033 PtrTy, VTable, VTableIndex, "vfn"); 2034 VFuncLoad = CGF.Builder.CreateAlignedLoad(PtrTy, VTableSlotPtr, 2035 CGF.getPointerAlign()); 2036 } 2037 2038 // Add !invariant.load md to virtual function load to indicate that 2039 // function didn't change inside vtable. 2040 // It's safe to add it without -fstrict-vtable-pointers, but it would not 2041 // help in devirtualization because it will only matter if we will have 2 2042 // the same virtual function loads from the same vtable load, which won't 2043 // happen without enabled devirtualization with -fstrict-vtable-pointers. 2044 if (CGM.getCodeGenOpts().OptimizationLevel > 0 && 2045 CGM.getCodeGenOpts().StrictVTablePointers) { 2046 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) { 2047 VFuncLoadInstr->setMetadata( 2048 llvm::LLVMContext::MD_invariant_load, 2049 llvm::MDNode::get(CGM.getLLVMContext(), 2050 llvm::ArrayRef<llvm::Metadata *>())); 2051 } 2052 } 2053 VFunc = VFuncLoad; 2054 } 2055 2056 CGCallee Callee(GD, VFunc); 2057 return Callee; 2058 } 2059 2060 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall( 2061 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType, 2062 Address This, DeleteOrMemberCallExpr E) { 2063 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>(); 2064 auto *D = E.dyn_cast<const CXXDeleteExpr *>(); 2065 assert((CE != nullptr) ^ (D != nullptr)); 2066 assert(CE == nullptr || CE->arg_begin() == CE->arg_end()); 2067 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete); 2068 2069 GlobalDecl GD(Dtor, DtorType); 2070 const CGFunctionInfo *FInfo = 2071 &CGM.getTypes().arrangeCXXStructorDeclaration(GD); 2072 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo); 2073 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty); 2074 2075 QualType ThisTy; 2076 if (CE) { 2077 ThisTy = CE->getObjectType(); 2078 } else { 2079 ThisTy = D->getDestroyedType(); 2080 } 2081 2082 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr, 2083 QualType(), nullptr); 2084 return nullptr; 2085 } 2086 2087 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) { 2088 CodeGenVTables &VTables = CGM.getVTables(); 2089 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD); 2090 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD); 2091 } 2092 2093 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass( 2094 const CXXRecordDecl *RD) const { 2095 // We don't emit available_externally vtables if we are in -fapple-kext mode 2096 // because kext mode does not permit devirtualization. 2097 if (CGM.getLangOpts().AppleKext) 2098 return false; 2099 2100 // If the vtable is hidden then it is not safe to emit an available_externally 2101 // copy of vtable. 2102 if (isVTableHidden(RD)) 2103 return false; 2104 2105 if (CGM.getCodeGenOpts().ForceEmitVTables) 2106 return true; 2107 2108 // If we don't have any not emitted inline virtual function then we are safe 2109 // to emit an available_externally copy of vtable. 2110 // FIXME we can still emit a copy of the vtable if we 2111 // can emit definition of the inline functions. 2112 if (hasAnyUnusedVirtualInlineFunction(RD)) 2113 return false; 2114 2115 // For a class with virtual bases, we must also be able to speculatively 2116 // emit the VTT, because CodeGen doesn't have separate notions of "can emit 2117 // the vtable" and "can emit the VTT". For a base subobject, this means we 2118 // need to be able to emit non-virtual base vtables. 2119 if (RD->getNumVBases()) { 2120 for (const auto &B : RD->bases()) { 2121 auto *BRD = B.getType()->getAsCXXRecordDecl(); 2122 assert(BRD && "no class for base specifier"); 2123 if (B.isVirtual() || !BRD->isDynamicClass()) 2124 continue; 2125 if (!canSpeculativelyEmitVTableAsBaseClass(BRD)) 2126 return false; 2127 } 2128 } 2129 2130 return true; 2131 } 2132 2133 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const { 2134 if (!canSpeculativelyEmitVTableAsBaseClass(RD)) 2135 return false; 2136 2137 // For a complete-object vtable (or more specifically, for the VTT), we need 2138 // to be able to speculatively emit the vtables of all dynamic virtual bases. 2139 for (const auto &B : RD->vbases()) { 2140 auto *BRD = B.getType()->getAsCXXRecordDecl(); 2141 assert(BRD && "no class for base specifier"); 2142 if (!BRD->isDynamicClass()) 2143 continue; 2144 if (!canSpeculativelyEmitVTableAsBaseClass(BRD)) 2145 return false; 2146 } 2147 2148 return true; 2149 } 2150 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF, 2151 Address InitialPtr, 2152 int64_t NonVirtualAdjustment, 2153 int64_t VirtualAdjustment, 2154 bool IsReturnAdjustment) { 2155 if (!NonVirtualAdjustment && !VirtualAdjustment) 2156 return InitialPtr.getPointer(); 2157 2158 Address V = InitialPtr.withElementType(CGF.Int8Ty); 2159 2160 // In a base-to-derived cast, the non-virtual adjustment is applied first. 2161 if (NonVirtualAdjustment && !IsReturnAdjustment) { 2162 V = CGF.Builder.CreateConstInBoundsByteGEP(V, 2163 CharUnits::fromQuantity(NonVirtualAdjustment)); 2164 } 2165 2166 // Perform the virtual adjustment if we have one. 2167 llvm::Value *ResultPtr; 2168 if (VirtualAdjustment) { 2169 Address VTablePtrPtr = V.withElementType(CGF.Int8PtrTy); 2170 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr); 2171 2172 llvm::Value *Offset; 2173 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64( 2174 CGF.Int8Ty, VTablePtr, VirtualAdjustment); 2175 if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) { 2176 // Load the adjustment offset from the vtable as a 32-bit int. 2177 Offset = 2178 CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr, 2179 CharUnits::fromQuantity(4)); 2180 } else { 2181 llvm::Type *PtrDiffTy = 2182 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 2183 2184 // Load the adjustment offset from the vtable. 2185 Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr, 2186 CGF.getPointerAlign()); 2187 } 2188 // Adjust our pointer. 2189 ResultPtr = CGF.Builder.CreateInBoundsGEP( 2190 V.getElementType(), V.getPointer(), Offset); 2191 } else { 2192 ResultPtr = V.getPointer(); 2193 } 2194 2195 // In a derived-to-base conversion, the non-virtual adjustment is 2196 // applied second. 2197 if (NonVirtualAdjustment && IsReturnAdjustment) { 2198 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr, 2199 NonVirtualAdjustment); 2200 } 2201 2202 return ResultPtr; 2203 } 2204 2205 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF, 2206 Address This, 2207 const ThisAdjustment &TA) { 2208 return performTypeAdjustment(CGF, This, TA.NonVirtual, 2209 TA.Virtual.Itanium.VCallOffsetOffset, 2210 /*IsReturnAdjustment=*/false); 2211 } 2212 2213 llvm::Value * 2214 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret, 2215 const ReturnAdjustment &RA) { 2216 return performTypeAdjustment(CGF, Ret, RA.NonVirtual, 2217 RA.Virtual.Itanium.VBaseOffsetOffset, 2218 /*IsReturnAdjustment=*/true); 2219 } 2220 2221 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF, 2222 RValue RV, QualType ResultType) { 2223 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl())) 2224 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType); 2225 2226 // Destructor thunks in the ARM ABI have indeterminate results. 2227 llvm::Type *T = CGF.ReturnValue.getElementType(); 2228 RValue Undef = RValue::get(llvm::UndefValue::get(T)); 2229 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType); 2230 } 2231 2232 /************************** Array allocation cookies **************************/ 2233 2234 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) { 2235 // The array cookie is a size_t; pad that up to the element alignment. 2236 // The cookie is actually right-justified in that space. 2237 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes), 2238 CGM.getContext().getPreferredTypeAlignInChars(elementType)); 2239 } 2240 2241 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 2242 Address NewPtr, 2243 llvm::Value *NumElements, 2244 const CXXNewExpr *expr, 2245 QualType ElementType) { 2246 assert(requiresArrayCookie(expr)); 2247 2248 unsigned AS = NewPtr.getAddressSpace(); 2249 2250 ASTContext &Ctx = getContext(); 2251 CharUnits SizeSize = CGF.getSizeSize(); 2252 2253 // The size of the cookie. 2254 CharUnits CookieSize = 2255 std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType)); 2256 assert(CookieSize == getArrayCookieSizeImpl(ElementType)); 2257 2258 // Compute an offset to the cookie. 2259 Address CookiePtr = NewPtr; 2260 CharUnits CookieOffset = CookieSize - SizeSize; 2261 if (!CookieOffset.isZero()) 2262 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset); 2263 2264 // Write the number of elements into the appropriate slot. 2265 Address NumElementsPtr = CookiePtr.withElementType(CGF.SizeTy); 2266 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr); 2267 2268 // Handle the array cookie specially in ASan. 2269 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 && 2270 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() || 2271 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) { 2272 // The store to the CookiePtr does not need to be instrumented. 2273 SI->setNoSanitizeMetadata(); 2274 llvm::FunctionType *FTy = 2275 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false); 2276 llvm::FunctionCallee F = 2277 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie"); 2278 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer()); 2279 } 2280 2281 // Finally, compute a pointer to the actual data buffer by skipping 2282 // over the cookie completely. 2283 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize); 2284 } 2285 2286 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 2287 Address allocPtr, 2288 CharUnits cookieSize) { 2289 // The element size is right-justified in the cookie. 2290 Address numElementsPtr = allocPtr; 2291 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize(); 2292 if (!numElementsOffset.isZero()) 2293 numElementsPtr = 2294 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset); 2295 2296 unsigned AS = allocPtr.getAddressSpace(); 2297 numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy); 2298 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0) 2299 return CGF.Builder.CreateLoad(numElementsPtr); 2300 // In asan mode emit a function call instead of a regular load and let the 2301 // run-time deal with it: if the shadow is properly poisoned return the 2302 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs. 2303 // We can't simply ignore this load using nosanitize metadata because 2304 // the metadata may be lost. 2305 llvm::FunctionType *FTy = 2306 llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false); 2307 llvm::FunctionCallee F = 2308 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie"); 2309 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer()); 2310 } 2311 2312 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) { 2313 // ARM says that the cookie is always: 2314 // struct array_cookie { 2315 // std::size_t element_size; // element_size != 0 2316 // std::size_t element_count; 2317 // }; 2318 // But the base ABI doesn't give anything an alignment greater than 2319 // 8, so we can dismiss this as typical ABI-author blindness to 2320 // actual language complexity and round up to the element alignment. 2321 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes), 2322 CGM.getContext().getTypeAlignInChars(elementType)); 2323 } 2324 2325 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 2326 Address newPtr, 2327 llvm::Value *numElements, 2328 const CXXNewExpr *expr, 2329 QualType elementType) { 2330 assert(requiresArrayCookie(expr)); 2331 2332 // The cookie is always at the start of the buffer. 2333 Address cookie = newPtr; 2334 2335 // The first element is the element size. 2336 cookie = cookie.withElementType(CGF.SizeTy); 2337 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy, 2338 getContext().getTypeSizeInChars(elementType).getQuantity()); 2339 CGF.Builder.CreateStore(elementSize, cookie); 2340 2341 // The second element is the element count. 2342 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1); 2343 CGF.Builder.CreateStore(numElements, cookie); 2344 2345 // Finally, compute a pointer to the actual data buffer by skipping 2346 // over the cookie completely. 2347 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType); 2348 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize); 2349 } 2350 2351 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 2352 Address allocPtr, 2353 CharUnits cookieSize) { 2354 // The number of elements is at offset sizeof(size_t) relative to 2355 // the allocated pointer. 2356 Address numElementsPtr 2357 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize()); 2358 2359 numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy); 2360 return CGF.Builder.CreateLoad(numElementsPtr); 2361 } 2362 2363 /*********************** Static local initialization **************************/ 2364 2365 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM, 2366 llvm::PointerType *GuardPtrTy) { 2367 // int __cxa_guard_acquire(__guard *guard_object); 2368 llvm::FunctionType *FTy = 2369 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy), 2370 GuardPtrTy, /*isVarArg=*/false); 2371 return CGM.CreateRuntimeFunction( 2372 FTy, "__cxa_guard_acquire", 2373 llvm::AttributeList::get(CGM.getLLVMContext(), 2374 llvm::AttributeList::FunctionIndex, 2375 llvm::Attribute::NoUnwind)); 2376 } 2377 2378 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM, 2379 llvm::PointerType *GuardPtrTy) { 2380 // void __cxa_guard_release(__guard *guard_object); 2381 llvm::FunctionType *FTy = 2382 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 2383 return CGM.CreateRuntimeFunction( 2384 FTy, "__cxa_guard_release", 2385 llvm::AttributeList::get(CGM.getLLVMContext(), 2386 llvm::AttributeList::FunctionIndex, 2387 llvm::Attribute::NoUnwind)); 2388 } 2389 2390 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM, 2391 llvm::PointerType *GuardPtrTy) { 2392 // void __cxa_guard_abort(__guard *guard_object); 2393 llvm::FunctionType *FTy = 2394 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 2395 return CGM.CreateRuntimeFunction( 2396 FTy, "__cxa_guard_abort", 2397 llvm::AttributeList::get(CGM.getLLVMContext(), 2398 llvm::AttributeList::FunctionIndex, 2399 llvm::Attribute::NoUnwind)); 2400 } 2401 2402 namespace { 2403 struct CallGuardAbort final : EHScopeStack::Cleanup { 2404 llvm::GlobalVariable *Guard; 2405 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {} 2406 2407 void Emit(CodeGenFunction &CGF, Flags flags) override { 2408 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()), 2409 Guard); 2410 } 2411 }; 2412 } 2413 2414 /// The ARM code here follows the Itanium code closely enough that we 2415 /// just special-case it at particular places. 2416 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, 2417 const VarDecl &D, 2418 llvm::GlobalVariable *var, 2419 bool shouldPerformInit) { 2420 CGBuilderTy &Builder = CGF.Builder; 2421 2422 // Inline variables that weren't instantiated from variable templates have 2423 // partially-ordered initialization within their translation unit. 2424 bool NonTemplateInline = 2425 D.isInline() && 2426 !isTemplateInstantiation(D.getTemplateSpecializationKind()); 2427 2428 // We only need to use thread-safe statics for local non-TLS variables and 2429 // inline variables; other global initialization is always single-threaded 2430 // or (through lazy dynamic loading in multiple threads) unsequenced. 2431 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics && 2432 (D.isLocalVarDecl() || NonTemplateInline) && 2433 !D.getTLSKind(); 2434 2435 // If we have a global variable with internal linkage and thread-safe statics 2436 // are disabled, we can just let the guard variable be of type i8. 2437 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage(); 2438 2439 llvm::IntegerType *guardTy; 2440 CharUnits guardAlignment; 2441 if (useInt8GuardVariable) { 2442 guardTy = CGF.Int8Ty; 2443 guardAlignment = CharUnits::One(); 2444 } else { 2445 // Guard variables are 64 bits in the generic ABI and size width on ARM 2446 // (i.e. 32-bit on AArch32, 64-bit on AArch64). 2447 if (UseARMGuardVarABI) { 2448 guardTy = CGF.SizeTy; 2449 guardAlignment = CGF.getSizeAlign(); 2450 } else { 2451 guardTy = CGF.Int64Ty; 2452 guardAlignment = 2453 CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlign(guardTy)); 2454 } 2455 } 2456 llvm::PointerType *guardPtrTy = llvm::PointerType::get( 2457 CGF.CGM.getLLVMContext(), 2458 CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace()); 2459 2460 // Create the guard variable if we don't already have it (as we 2461 // might if we're double-emitting this function body). 2462 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D); 2463 if (!guard) { 2464 // Mangle the name for the guard. 2465 SmallString<256> guardName; 2466 { 2467 llvm::raw_svector_ostream out(guardName); 2468 getMangleContext().mangleStaticGuardVariable(&D, out); 2469 } 2470 2471 // Create the guard variable with a zero-initializer. 2472 // Just absorb linkage, visibility and dll storage class from the guarded 2473 // variable. 2474 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy, 2475 false, var->getLinkage(), 2476 llvm::ConstantInt::get(guardTy, 0), 2477 guardName.str()); 2478 guard->setDSOLocal(var->isDSOLocal()); 2479 guard->setVisibility(var->getVisibility()); 2480 guard->setDLLStorageClass(var->getDLLStorageClass()); 2481 // If the variable is thread-local, so is its guard variable. 2482 guard->setThreadLocalMode(var->getThreadLocalMode()); 2483 guard->setAlignment(guardAlignment.getAsAlign()); 2484 2485 // The ABI says: "It is suggested that it be emitted in the same COMDAT 2486 // group as the associated data object." In practice, this doesn't work for 2487 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm. 2488 llvm::Comdat *C = var->getComdat(); 2489 if (!D.isLocalVarDecl() && C && 2490 (CGM.getTarget().getTriple().isOSBinFormatELF() || 2491 CGM.getTarget().getTriple().isOSBinFormatWasm())) { 2492 guard->setComdat(C); 2493 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) { 2494 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName())); 2495 } 2496 2497 CGM.setStaticLocalDeclGuardAddress(&D, guard); 2498 } 2499 2500 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment); 2501 2502 // Test whether the variable has completed initialization. 2503 // 2504 // Itanium C++ ABI 3.3.2: 2505 // The following is pseudo-code showing how these functions can be used: 2506 // if (obj_guard.first_byte == 0) { 2507 // if ( __cxa_guard_acquire (&obj_guard) ) { 2508 // try { 2509 // ... initialize the object ...; 2510 // } catch (...) { 2511 // __cxa_guard_abort (&obj_guard); 2512 // throw; 2513 // } 2514 // ... queue object destructor with __cxa_atexit() ...; 2515 // __cxa_guard_release (&obj_guard); 2516 // } 2517 // } 2518 // 2519 // If threadsafe statics are enabled, but we don't have inline atomics, just 2520 // call __cxa_guard_acquire unconditionally. The "inline" check isn't 2521 // actually inline, and the user might not expect calls to __atomic libcalls. 2522 2523 unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth(); 2524 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); 2525 if (!threadsafe || MaxInlineWidthInBits) { 2526 // Load the first byte of the guard variable. 2527 llvm::LoadInst *LI = 2528 Builder.CreateLoad(guardAddr.withElementType(CGM.Int8Ty)); 2529 2530 // Itanium ABI: 2531 // An implementation supporting thread-safety on multiprocessor 2532 // systems must also guarantee that references to the initialized 2533 // object do not occur before the load of the initialization flag. 2534 // 2535 // In LLVM, we do this by marking the load Acquire. 2536 if (threadsafe) 2537 LI->setAtomic(llvm::AtomicOrdering::Acquire); 2538 2539 // For ARM, we should only check the first bit, rather than the entire byte: 2540 // 2541 // ARM C++ ABI 3.2.3.1: 2542 // To support the potential use of initialization guard variables 2543 // as semaphores that are the target of ARM SWP and LDREX/STREX 2544 // synchronizing instructions we define a static initialization 2545 // guard variable to be a 4-byte aligned, 4-byte word with the 2546 // following inline access protocol. 2547 // #define INITIALIZED 1 2548 // if ((obj_guard & INITIALIZED) != INITIALIZED) { 2549 // if (__cxa_guard_acquire(&obj_guard)) 2550 // ... 2551 // } 2552 // 2553 // and similarly for ARM64: 2554 // 2555 // ARM64 C++ ABI 3.2.2: 2556 // This ABI instead only specifies the value bit 0 of the static guard 2557 // variable; all other bits are platform defined. Bit 0 shall be 0 when the 2558 // variable is not initialized and 1 when it is. 2559 llvm::Value *V = 2560 (UseARMGuardVarABI && !useInt8GuardVariable) 2561 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1)) 2562 : LI; 2563 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized"); 2564 2565 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check"); 2566 2567 // Check if the first byte of the guard variable is zero. 2568 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock, 2569 CodeGenFunction::GuardKind::VariableGuard, &D); 2570 2571 CGF.EmitBlock(InitCheckBlock); 2572 } 2573 2574 // The semantics of dynamic initialization of variables with static or thread 2575 // storage duration depends on whether they are declared at block-scope. The 2576 // initialization of such variables at block-scope can be aborted with an 2577 // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry 2578 // to their initialization has undefined behavior (also per C++20 2579 // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions 2580 // lead to termination (per C++20 [except.terminate]p1), and recursive 2581 // references to the variables are governed only by the lifetime rules (per 2582 // C++20 [class.cdtor]p2), which means such references are perfectly fine as 2583 // long as they avoid touching memory. As a result, block-scope variables must 2584 // not be marked as initialized until after initialization completes (unless 2585 // the mark is reverted following an exception), but non-block-scope variables 2586 // must be marked prior to initialization so that recursive accesses during 2587 // initialization do not restart initialization. 2588 2589 // Variables used when coping with thread-safe statics and exceptions. 2590 if (threadsafe) { 2591 // Call __cxa_guard_acquire. 2592 llvm::Value *V 2593 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard); 2594 2595 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); 2596 2597 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"), 2598 InitBlock, EndBlock); 2599 2600 // Call __cxa_guard_abort along the exceptional edge. 2601 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard); 2602 2603 CGF.EmitBlock(InitBlock); 2604 } else if (!D.isLocalVarDecl()) { 2605 // For non-local variables, store 1 into the first byte of the guard 2606 // variable before the object initialization begins so that references 2607 // to the variable during initialization don't restart initialization. 2608 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1), 2609 guardAddr.withElementType(CGM.Int8Ty)); 2610 } 2611 2612 // Emit the initializer and add a global destructor if appropriate. 2613 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit); 2614 2615 if (threadsafe) { 2616 // Pop the guard-abort cleanup if we pushed one. 2617 CGF.PopCleanupBlock(); 2618 2619 // Call __cxa_guard_release. This cannot throw. 2620 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy), 2621 guardAddr.getPointer()); 2622 } else if (D.isLocalVarDecl()) { 2623 // For local variables, store 1 into the first byte of the guard variable 2624 // after the object initialization completes so that initialization is 2625 // retried if initialization is interrupted by an exception. 2626 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1), 2627 guardAddr.withElementType(CGM.Int8Ty)); 2628 } 2629 2630 CGF.EmitBlock(EndBlock); 2631 } 2632 2633 /// Register a global destructor using __cxa_atexit. 2634 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, 2635 llvm::FunctionCallee dtor, 2636 llvm::Constant *addr, bool TLS) { 2637 assert(!CGF.getTarget().getTriple().isOSAIX() && 2638 "unexpected call to emitGlobalDtorWithCXAAtExit"); 2639 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) && 2640 "__cxa_atexit is disabled"); 2641 const char *Name = "__cxa_atexit"; 2642 if (TLS) { 2643 const llvm::Triple &T = CGF.getTarget().getTriple(); 2644 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit"; 2645 } 2646 2647 // We're assuming that the destructor function is something we can 2648 // reasonably call with the default CC. 2649 llvm::Type *dtorTy = CGF.UnqualPtrTy; 2650 2651 // Preserve address space of addr. 2652 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0; 2653 auto AddrPtrTy = AddrAS ? llvm::PointerType::get(CGF.getLLVMContext(), AddrAS) 2654 : CGF.Int8PtrTy; 2655 2656 // Create a variable that binds the atexit to this shared object. 2657 llvm::Constant *handle = 2658 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle"); 2659 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts()); 2660 GV->setVisibility(llvm::GlobalValue::HiddenVisibility); 2661 2662 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); 2663 llvm::Type *paramTys[] = {dtorTy, AddrPtrTy, handle->getType()}; 2664 llvm::FunctionType *atexitTy = 2665 llvm::FunctionType::get(CGF.IntTy, paramTys, false); 2666 2667 // Fetch the actual function. 2668 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name); 2669 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee())) 2670 fn->setDoesNotThrow(); 2671 2672 if (!addr) 2673 // addr is null when we are trying to register a dtor annotated with 2674 // __attribute__((destructor)) in a constructor function. Using null here is 2675 // okay because this argument is just passed back to the destructor 2676 // function. 2677 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy); 2678 2679 llvm::Value *args[] = {dtor.getCallee(), addr, handle}; 2680 CGF.EmitNounwindRuntimeCall(atexit, args); 2681 } 2682 2683 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM, 2684 StringRef FnName) { 2685 // Create a function that registers/unregisters destructors that have the same 2686 // priority. 2687 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false); 2688 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction( 2689 FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation()); 2690 2691 return GlobalInitOrCleanupFn; 2692 } 2693 2694 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() { 2695 for (const auto &I : DtorsUsingAtExit) { 2696 int Priority = I.first; 2697 std::string GlobalCleanupFnName = 2698 std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority); 2699 2700 llvm::Function *GlobalCleanupFn = 2701 createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName); 2702 2703 CodeGenFunction CGF(*this); 2704 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn, 2705 getTypes().arrangeNullaryFunction(), FunctionArgList(), 2706 SourceLocation(), SourceLocation()); 2707 auto AL = ApplyDebugLocation::CreateArtificial(CGF); 2708 2709 // Get the destructor function type, void(*)(void). 2710 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false); 2711 2712 // Destructor functions are run/unregistered in non-ascending 2713 // order of their priorities. 2714 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second; 2715 auto itv = Dtors.rbegin(); 2716 while (itv != Dtors.rend()) { 2717 llvm::Function *Dtor = *itv; 2718 2719 // We're assuming that the destructor function is something we can 2720 // reasonably call with the correct CC. 2721 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(Dtor); 2722 llvm::Value *NeedsDestruct = 2723 CGF.Builder.CreateIsNull(V, "needs_destruct"); 2724 2725 llvm::BasicBlock *DestructCallBlock = 2726 CGF.createBasicBlock("destruct.call"); 2727 llvm::BasicBlock *EndBlock = CGF.createBasicBlock( 2728 (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end"); 2729 // Check if unatexit returns a value of 0. If it does, jump to 2730 // DestructCallBlock, otherwise jump to EndBlock directly. 2731 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock); 2732 2733 CGF.EmitBlock(DestructCallBlock); 2734 2735 // Emit the call to casted Dtor. 2736 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, Dtor); 2737 // Make sure the call and the callee agree on calling convention. 2738 CI->setCallingConv(Dtor->getCallingConv()); 2739 2740 CGF.EmitBlock(EndBlock); 2741 2742 itv++; 2743 } 2744 2745 CGF.FinishFunction(); 2746 AddGlobalDtor(GlobalCleanupFn, Priority); 2747 } 2748 } 2749 2750 void CodeGenModule::registerGlobalDtorsWithAtExit() { 2751 for (const auto &I : DtorsUsingAtExit) { 2752 int Priority = I.first; 2753 std::string GlobalInitFnName = 2754 std::string("__GLOBAL_init_") + llvm::to_string(Priority); 2755 llvm::Function *GlobalInitFn = 2756 createGlobalInitOrCleanupFn(*this, GlobalInitFnName); 2757 2758 CodeGenFunction CGF(*this); 2759 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn, 2760 getTypes().arrangeNullaryFunction(), FunctionArgList(), 2761 SourceLocation(), SourceLocation()); 2762 auto AL = ApplyDebugLocation::CreateArtificial(CGF); 2763 2764 // Since constructor functions are run in non-descending order of their 2765 // priorities, destructors are registered in non-descending order of their 2766 // priorities, and since destructor functions are run in the reverse order 2767 // of their registration, destructor functions are run in non-ascending 2768 // order of their priorities. 2769 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second; 2770 for (auto *Dtor : Dtors) { 2771 // Register the destructor function calling __cxa_atexit if it is 2772 // available. Otherwise fall back on calling atexit. 2773 if (getCodeGenOpts().CXAAtExit) { 2774 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false); 2775 } else { 2776 // We're assuming that the destructor function is something we can 2777 // reasonably call with the correct CC. 2778 CGF.registerGlobalDtorWithAtExit(Dtor); 2779 } 2780 } 2781 2782 CGF.FinishFunction(); 2783 AddGlobalCtor(GlobalInitFn, Priority); 2784 } 2785 2786 if (getCXXABI().useSinitAndSterm()) 2787 unregisterGlobalDtorsWithUnAtExit(); 2788 } 2789 2790 /// Register a global destructor as best as we know how. 2791 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 2792 llvm::FunctionCallee dtor, 2793 llvm::Constant *addr) { 2794 if (D.isNoDestroy(CGM.getContext())) 2795 return; 2796 2797 // OpenMP offloading supports C++ constructors and destructors but we do not 2798 // always have 'atexit' available. Instead lower these to use the LLVM global 2799 // destructors which we can handle directly in the runtime. Note that this is 2800 // not strictly 1-to-1 with using `atexit` because we no longer tear down 2801 // globals in reverse order of when they were constructed. 2802 if (!CGM.getLangOpts().hasAtExit() && !D.isStaticLocal()) 2803 return CGF.registerGlobalDtorWithLLVM(D, dtor, addr); 2804 2805 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit 2806 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage 2807 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled. 2808 // We can always use __cxa_thread_atexit. 2809 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind()) 2810 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind()); 2811 2812 // In Apple kexts, we want to add a global destructor entry. 2813 // FIXME: shouldn't this be guarded by some variable? 2814 if (CGM.getLangOpts().AppleKext) { 2815 // Generate a global destructor entry. 2816 return CGM.AddCXXDtorEntry(dtor, addr); 2817 } 2818 2819 CGF.registerGlobalDtorWithAtExit(D, dtor, addr); 2820 } 2821 2822 static bool isThreadWrapperReplaceable(const VarDecl *VD, 2823 CodeGen::CodeGenModule &CGM) { 2824 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!"); 2825 // Darwin prefers to have references to thread local variables to go through 2826 // the thread wrapper instead of directly referencing the backing variable. 2827 return VD->getTLSKind() == VarDecl::TLS_Dynamic && 2828 CGM.getTarget().getTriple().isOSDarwin(); 2829 } 2830 2831 /// Get the appropriate linkage for the wrapper function. This is essentially 2832 /// the weak form of the variable's linkage; every translation unit which needs 2833 /// the wrapper emits a copy, and we want the linker to merge them. 2834 static llvm::GlobalValue::LinkageTypes 2835 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) { 2836 llvm::GlobalValue::LinkageTypes VarLinkage = 2837 CGM.getLLVMLinkageVarDefinition(VD); 2838 2839 // For internal linkage variables, we don't need an external or weak wrapper. 2840 if (llvm::GlobalValue::isLocalLinkage(VarLinkage)) 2841 return VarLinkage; 2842 2843 // If the thread wrapper is replaceable, give it appropriate linkage. 2844 if (isThreadWrapperReplaceable(VD, CGM)) 2845 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) && 2846 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage)) 2847 return VarLinkage; 2848 return llvm::GlobalValue::WeakODRLinkage; 2849 } 2850 2851 llvm::Function * 2852 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD, 2853 llvm::Value *Val) { 2854 // Mangle the name for the thread_local wrapper function. 2855 SmallString<256> WrapperName; 2856 { 2857 llvm::raw_svector_ostream Out(WrapperName); 2858 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out); 2859 } 2860 2861 // FIXME: If VD is a definition, we should regenerate the function attributes 2862 // before returning. 2863 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName)) 2864 return cast<llvm::Function>(V); 2865 2866 QualType RetQT = VD->getType(); 2867 if (RetQT->isReferenceType()) 2868 RetQT = RetQT.getNonReferenceType(); 2869 2870 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration( 2871 getContext().getPointerType(RetQT), FunctionArgList()); 2872 2873 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI); 2874 llvm::Function *Wrapper = 2875 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM), 2876 WrapperName.str(), &CGM.getModule()); 2877 2878 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker()) 2879 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName())); 2880 2881 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false); 2882 2883 // Always resolve references to the wrapper at link time. 2884 if (!Wrapper->hasLocalLinkage()) 2885 if (!isThreadWrapperReplaceable(VD, CGM) || 2886 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) || 2887 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) || 2888 VD->getVisibility() == HiddenVisibility) 2889 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility); 2890 2891 if (isThreadWrapperReplaceable(VD, CGM)) { 2892 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2893 Wrapper->addFnAttr(llvm::Attribute::NoUnwind); 2894 } 2895 2896 ThreadWrappers.push_back({VD, Wrapper}); 2897 return Wrapper; 2898 } 2899 2900 void ItaniumCXXABI::EmitThreadLocalInitFuncs( 2901 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals, 2902 ArrayRef<llvm::Function *> CXXThreadLocalInits, 2903 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) { 2904 llvm::Function *InitFunc = nullptr; 2905 2906 // Separate initializers into those with ordered (or partially-ordered) 2907 // initialization and those with unordered initialization. 2908 llvm::SmallVector<llvm::Function *, 8> OrderedInits; 2909 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits; 2910 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) { 2911 if (isTemplateInstantiation( 2912 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind())) 2913 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] = 2914 CXXThreadLocalInits[I]; 2915 else 2916 OrderedInits.push_back(CXXThreadLocalInits[I]); 2917 } 2918 2919 if (!OrderedInits.empty()) { 2920 // Generate a guarded initialization function. 2921 llvm::FunctionType *FTy = 2922 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 2923 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 2924 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI, 2925 SourceLocation(), 2926 /*TLS=*/true); 2927 llvm::GlobalVariable *Guard = new llvm::GlobalVariable( 2928 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false, 2929 llvm::GlobalVariable::InternalLinkage, 2930 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard"); 2931 Guard->setThreadLocal(true); 2932 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel()); 2933 2934 CharUnits GuardAlign = CharUnits::One(); 2935 Guard->setAlignment(GuardAlign.getAsAlign()); 2936 2937 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc( 2938 InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign)); 2939 // On Darwin platforms, use CXX_FAST_TLS calling convention. 2940 if (CGM.getTarget().getTriple().isOSDarwin()) { 2941 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2942 InitFunc->addFnAttr(llvm::Attribute::NoUnwind); 2943 } 2944 } 2945 2946 // Create declarations for thread wrappers for all thread-local variables 2947 // with non-discardable definitions in this translation unit. 2948 for (const VarDecl *VD : CXXThreadLocals) { 2949 if (VD->hasDefinition() && 2950 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) { 2951 llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD)); 2952 getOrCreateThreadLocalWrapper(VD, GV); 2953 } 2954 } 2955 2956 // Emit all referenced thread wrappers. 2957 for (auto VDAndWrapper : ThreadWrappers) { 2958 const VarDecl *VD = VDAndWrapper.first; 2959 llvm::GlobalVariable *Var = 2960 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD))); 2961 llvm::Function *Wrapper = VDAndWrapper.second; 2962 2963 // Some targets require that all access to thread local variables go through 2964 // the thread wrapper. This means that we cannot attempt to create a thread 2965 // wrapper or a thread helper. 2966 if (!VD->hasDefinition()) { 2967 if (isThreadWrapperReplaceable(VD, CGM)) { 2968 Wrapper->setLinkage(llvm::Function::ExternalLinkage); 2969 continue; 2970 } 2971 2972 // If this isn't a TU in which this variable is defined, the thread 2973 // wrapper is discardable. 2974 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage) 2975 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage); 2976 } 2977 2978 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper); 2979 2980 // Mangle the name for the thread_local initialization function. 2981 SmallString<256> InitFnName; 2982 { 2983 llvm::raw_svector_ostream Out(InitFnName); 2984 getMangleContext().mangleItaniumThreadLocalInit(VD, Out); 2985 } 2986 2987 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false); 2988 2989 // If we have a definition for the variable, emit the initialization 2990 // function as an alias to the global Init function (if any). Otherwise, 2991 // produce a declaration of the initialization function. 2992 llvm::GlobalValue *Init = nullptr; 2993 bool InitIsInitFunc = false; 2994 bool HasConstantInitialization = false; 2995 if (!usesThreadWrapperFunction(VD)) { 2996 HasConstantInitialization = true; 2997 } else if (VD->hasDefinition()) { 2998 InitIsInitFunc = true; 2999 llvm::Function *InitFuncToUse = InitFunc; 3000 if (isTemplateInstantiation(VD->getTemplateSpecializationKind())) 3001 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl()); 3002 if (InitFuncToUse) 3003 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(), 3004 InitFuncToUse); 3005 } else { 3006 // Emit a weak global function referring to the initialization function. 3007 // This function will not exist if the TU defining the thread_local 3008 // variable in question does not need any dynamic initialization for 3009 // its thread_local variables. 3010 Init = llvm::Function::Create(InitFnTy, 3011 llvm::GlobalVariable::ExternalWeakLinkage, 3012 InitFnName.str(), &CGM.getModule()); 3013 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 3014 CGM.SetLLVMFunctionAttributes( 3015 GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false); 3016 } 3017 3018 if (Init) { 3019 Init->setVisibility(Var->getVisibility()); 3020 // Don't mark an extern_weak function DSO local on windows. 3021 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage()) 3022 Init->setDSOLocal(Var->isDSOLocal()); 3023 } 3024 3025 llvm::LLVMContext &Context = CGM.getModule().getContext(); 3026 3027 // The linker on AIX is not happy with missing weak symbols. However, 3028 // other TUs will not know whether the initialization routine exists 3029 // so create an empty, init function to satisfy the linker. 3030 // This is needed whenever a thread wrapper function is not used, and 3031 // also when the symbol is weak. 3032 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() && 3033 isEmittedWithConstantInitializer(VD, true) && 3034 !mayNeedDestruction(VD)) { 3035 // Init should be null. If it were non-null, then the logic above would 3036 // either be defining the function to be an alias or declaring the 3037 // function with the expectation that the definition of the variable 3038 // is elsewhere. 3039 assert(Init == nullptr && "Expected Init to be null."); 3040 3041 llvm::Function *Func = llvm::Function::Create( 3042 InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule()); 3043 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 3044 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, 3045 cast<llvm::Function>(Func), 3046 /*IsThunk=*/false); 3047 // Create a function body that just returns 3048 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func); 3049 CGBuilderTy Builder(CGM, Entry); 3050 Builder.CreateRetVoid(); 3051 } 3052 3053 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper); 3054 CGBuilderTy Builder(CGM, Entry); 3055 if (HasConstantInitialization) { 3056 // No dynamic initialization to invoke. 3057 } else if (InitIsInitFunc) { 3058 if (Init) { 3059 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init); 3060 if (isThreadWrapperReplaceable(VD, CGM)) { 3061 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 3062 llvm::Function *Fn = 3063 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee()); 3064 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 3065 } 3066 } 3067 } else if (CGM.getTriple().isOSAIX()) { 3068 // On AIX, except if constinit and also neither of class type or of 3069 // (possibly multi-dimensional) array of class type, thread_local vars 3070 // will have init routines regardless of whether they are 3071 // const-initialized. Since the routine is guaranteed to exist, we can 3072 // unconditionally call it without testing for its existance. This 3073 // avoids potentially unresolved weak symbols which the AIX linker 3074 // isn't happy with. 3075 Builder.CreateCall(InitFnTy, Init); 3076 } else { 3077 // Don't know whether we have an init function. Call it if it exists. 3078 llvm::Value *Have = Builder.CreateIsNotNull(Init); 3079 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 3080 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 3081 Builder.CreateCondBr(Have, InitBB, ExitBB); 3082 3083 Builder.SetInsertPoint(InitBB); 3084 Builder.CreateCall(InitFnTy, Init); 3085 Builder.CreateBr(ExitBB); 3086 3087 Builder.SetInsertPoint(ExitBB); 3088 } 3089 3090 // For a reference, the result of the wrapper function is a pointer to 3091 // the referenced object. 3092 llvm::Value *Val = Builder.CreateThreadLocalAddress(Var); 3093 3094 if (VD->getType()->isReferenceType()) { 3095 CharUnits Align = CGM.getContext().getDeclAlign(VD); 3096 Val = Builder.CreateAlignedLoad(Var->getValueType(), Val, Align); 3097 } 3098 3099 Builder.CreateRet(Val); 3100 } 3101 } 3102 3103 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, 3104 const VarDecl *VD, 3105 QualType LValType) { 3106 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD); 3107 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val); 3108 3109 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper); 3110 CallVal->setCallingConv(Wrapper->getCallingConv()); 3111 3112 LValue LV; 3113 if (VD->getType()->isReferenceType()) 3114 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType); 3115 else 3116 LV = CGF.MakeAddrLValue(CallVal, LValType, 3117 CGF.getContext().getDeclAlign(VD)); 3118 // FIXME: need setObjCGCLValueClass? 3119 return LV; 3120 } 3121 3122 /// Return whether the given global decl needs a VTT parameter, which it does 3123 /// if it's a base constructor or destructor with virtual bases. 3124 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { 3125 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 3126 3127 // We don't have any virtual bases, just return early. 3128 if (!MD->getParent()->getNumVBases()) 3129 return false; 3130 3131 // Check if we have a base constructor. 3132 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base) 3133 return true; 3134 3135 // Check if we have a base destructor. 3136 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) 3137 return true; 3138 3139 return false; 3140 } 3141 3142 namespace { 3143 class ItaniumRTTIBuilder { 3144 CodeGenModule &CGM; // Per-module state. 3145 llvm::LLVMContext &VMContext; 3146 const ItaniumCXXABI &CXXABI; // Per-module state. 3147 3148 /// Fields - The fields of the RTTI descriptor currently being built. 3149 SmallVector<llvm::Constant *, 16> Fields; 3150 3151 /// GetAddrOfTypeName - Returns the mangled type name of the given type. 3152 llvm::GlobalVariable * 3153 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage); 3154 3155 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI 3156 /// descriptor of the given type. 3157 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty); 3158 3159 /// BuildVTablePointer - Build the vtable pointer for the given type. 3160 void BuildVTablePointer(const Type *Ty); 3161 3162 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single 3163 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b. 3164 void BuildSIClassTypeInfo(const CXXRecordDecl *RD); 3165 3166 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for 3167 /// classes with bases that do not satisfy the abi::__si_class_type_info 3168 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. 3169 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD); 3170 3171 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used 3172 /// for pointer types. 3173 void BuildPointerTypeInfo(QualType PointeeTy); 3174 3175 /// BuildObjCObjectTypeInfo - Build the appropriate kind of 3176 /// type_info for an object type. 3177 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty); 3178 3179 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info 3180 /// struct, used for member pointer types. 3181 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty); 3182 3183 public: 3184 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI) 3185 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {} 3186 3187 // Pointer type info flags. 3188 enum { 3189 /// PTI_Const - Type has const qualifier. 3190 PTI_Const = 0x1, 3191 3192 /// PTI_Volatile - Type has volatile qualifier. 3193 PTI_Volatile = 0x2, 3194 3195 /// PTI_Restrict - Type has restrict qualifier. 3196 PTI_Restrict = 0x4, 3197 3198 /// PTI_Incomplete - Type is incomplete. 3199 PTI_Incomplete = 0x8, 3200 3201 /// PTI_ContainingClassIncomplete - Containing class is incomplete. 3202 /// (in pointer to member). 3203 PTI_ContainingClassIncomplete = 0x10, 3204 3205 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS). 3206 //PTI_TransactionSafe = 0x20, 3207 3208 /// PTI_Noexcept - Pointee is noexcept function (C++1z). 3209 PTI_Noexcept = 0x40, 3210 }; 3211 3212 // VMI type info flags. 3213 enum { 3214 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance. 3215 VMI_NonDiamondRepeat = 0x1, 3216 3217 /// VMI_DiamondShaped - Class is diamond shaped. 3218 VMI_DiamondShaped = 0x2 3219 }; 3220 3221 // Base class type info flags. 3222 enum { 3223 /// BCTI_Virtual - Base class is virtual. 3224 BCTI_Virtual = 0x1, 3225 3226 /// BCTI_Public - Base class is public. 3227 BCTI_Public = 0x2 3228 }; 3229 3230 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or 3231 /// link to an existing RTTI descriptor if one already exists. 3232 llvm::Constant *BuildTypeInfo(QualType Ty); 3233 3234 /// BuildTypeInfo - Build the RTTI type info struct for the given type. 3235 llvm::Constant *BuildTypeInfo( 3236 QualType Ty, 3237 llvm::GlobalVariable::LinkageTypes Linkage, 3238 llvm::GlobalValue::VisibilityTypes Visibility, 3239 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass); 3240 }; 3241 } 3242 3243 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName( 3244 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) { 3245 SmallString<256> Name; 3246 llvm::raw_svector_ostream Out(Name); 3247 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out); 3248 3249 // We know that the mangled name of the type starts at index 4 of the 3250 // mangled name of the typename, so we can just index into it in order to 3251 // get the mangled name of the type. 3252 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext, 3253 Name.substr(4)); 3254 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy); 3255 3256 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable( 3257 Name, Init->getType(), Linkage, Align.getAsAlign()); 3258 3259 GV->setInitializer(Init); 3260 3261 return GV; 3262 } 3263 3264 llvm::Constant * 3265 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) { 3266 // Mangle the RTTI name. 3267 SmallString<256> Name; 3268 llvm::raw_svector_ostream Out(Name); 3269 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 3270 3271 // Look for an existing global. 3272 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name); 3273 3274 if (!GV) { 3275 // Create a new global variable. 3276 // Note for the future: If we would ever like to do deferred emission of 3277 // RTTI, check if emitting vtables opportunistically need any adjustment. 3278 3279 GV = new llvm::GlobalVariable( 3280 CGM.getModule(), CGM.GlobalsInt8PtrTy, 3281 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name); 3282 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 3283 CGM.setGVProperties(GV, RD); 3284 // Import the typeinfo symbol when all non-inline virtual methods are 3285 // imported. 3286 if (CGM.getTarget().hasPS4DLLImportExport()) { 3287 if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) { 3288 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); 3289 CGM.setDSOLocal(GV); 3290 } 3291 } 3292 } 3293 3294 return GV; 3295 } 3296 3297 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type 3298 /// info for that type is defined in the standard library. 3299 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) { 3300 // Itanium C++ ABI 2.9.2: 3301 // Basic type information (e.g. for "int", "bool", etc.) will be kept in 3302 // the run-time support library. Specifically, the run-time support 3303 // library should contain type_info objects for the types X, X* and 3304 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char, 3305 // unsigned char, signed char, short, unsigned short, int, unsigned int, 3306 // long, unsigned long, long long, unsigned long long, float, double, 3307 // long double, char16_t, char32_t, and the IEEE 754r decimal and 3308 // half-precision floating point types. 3309 // 3310 // GCC also emits RTTI for __int128. 3311 // FIXME: We do not emit RTTI information for decimal types here. 3312 3313 // Types added here must also be added to EmitFundamentalRTTIDescriptors. 3314 switch (Ty->getKind()) { 3315 case BuiltinType::Void: 3316 case BuiltinType::NullPtr: 3317 case BuiltinType::Bool: 3318 case BuiltinType::WChar_S: 3319 case BuiltinType::WChar_U: 3320 case BuiltinType::Char_U: 3321 case BuiltinType::Char_S: 3322 case BuiltinType::UChar: 3323 case BuiltinType::SChar: 3324 case BuiltinType::Short: 3325 case BuiltinType::UShort: 3326 case BuiltinType::Int: 3327 case BuiltinType::UInt: 3328 case BuiltinType::Long: 3329 case BuiltinType::ULong: 3330 case BuiltinType::LongLong: 3331 case BuiltinType::ULongLong: 3332 case BuiltinType::Half: 3333 case BuiltinType::Float: 3334 case BuiltinType::Double: 3335 case BuiltinType::LongDouble: 3336 case BuiltinType::Float16: 3337 case BuiltinType::Float128: 3338 case BuiltinType::Ibm128: 3339 case BuiltinType::Char8: 3340 case BuiltinType::Char16: 3341 case BuiltinType::Char32: 3342 case BuiltinType::Int128: 3343 case BuiltinType::UInt128: 3344 return true; 3345 3346 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 3347 case BuiltinType::Id: 3348 #include "clang/Basic/OpenCLImageTypes.def" 3349 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 3350 case BuiltinType::Id: 3351 #include "clang/Basic/OpenCLExtensionTypes.def" 3352 case BuiltinType::OCLSampler: 3353 case BuiltinType::OCLEvent: 3354 case BuiltinType::OCLClkEvent: 3355 case BuiltinType::OCLQueue: 3356 case BuiltinType::OCLReserveID: 3357 #define SVE_TYPE(Name, Id, SingletonId) \ 3358 case BuiltinType::Id: 3359 #include "clang/Basic/AArch64SVEACLETypes.def" 3360 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 3361 case BuiltinType::Id: 3362 #include "clang/Basic/PPCTypes.def" 3363 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 3364 #include "clang/Basic/RISCVVTypes.def" 3365 #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 3366 #include "clang/Basic/WebAssemblyReferenceTypes.def" 3367 case BuiltinType::ShortAccum: 3368 case BuiltinType::Accum: 3369 case BuiltinType::LongAccum: 3370 case BuiltinType::UShortAccum: 3371 case BuiltinType::UAccum: 3372 case BuiltinType::ULongAccum: 3373 case BuiltinType::ShortFract: 3374 case BuiltinType::Fract: 3375 case BuiltinType::LongFract: 3376 case BuiltinType::UShortFract: 3377 case BuiltinType::UFract: 3378 case BuiltinType::ULongFract: 3379 case BuiltinType::SatShortAccum: 3380 case BuiltinType::SatAccum: 3381 case BuiltinType::SatLongAccum: 3382 case BuiltinType::SatUShortAccum: 3383 case BuiltinType::SatUAccum: 3384 case BuiltinType::SatULongAccum: 3385 case BuiltinType::SatShortFract: 3386 case BuiltinType::SatFract: 3387 case BuiltinType::SatLongFract: 3388 case BuiltinType::SatUShortFract: 3389 case BuiltinType::SatUFract: 3390 case BuiltinType::SatULongFract: 3391 case BuiltinType::BFloat16: 3392 return false; 3393 3394 case BuiltinType::Dependent: 3395 #define BUILTIN_TYPE(Id, SingletonId) 3396 #define PLACEHOLDER_TYPE(Id, SingletonId) \ 3397 case BuiltinType::Id: 3398 #include "clang/AST/BuiltinTypes.def" 3399 llvm_unreachable("asking for RRTI for a placeholder type!"); 3400 3401 case BuiltinType::ObjCId: 3402 case BuiltinType::ObjCClass: 3403 case BuiltinType::ObjCSel: 3404 llvm_unreachable("FIXME: Objective-C types are unsupported!"); 3405 } 3406 3407 llvm_unreachable("Invalid BuiltinType Kind!"); 3408 } 3409 3410 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) { 3411 QualType PointeeTy = PointerTy->getPointeeType(); 3412 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy); 3413 if (!BuiltinTy) 3414 return false; 3415 3416 // Check the qualifiers. 3417 Qualifiers Quals = PointeeTy.getQualifiers(); 3418 Quals.removeConst(); 3419 3420 if (!Quals.empty()) 3421 return false; 3422 3423 return TypeInfoIsInStandardLibrary(BuiltinTy); 3424 } 3425 3426 /// IsStandardLibraryRTTIDescriptor - Returns whether the type 3427 /// information for the given type exists in the standard library. 3428 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) { 3429 // Type info for builtin types is defined in the standard library. 3430 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty)) 3431 return TypeInfoIsInStandardLibrary(BuiltinTy); 3432 3433 // Type info for some pointer types to builtin types is defined in the 3434 // standard library. 3435 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty)) 3436 return TypeInfoIsInStandardLibrary(PointerTy); 3437 3438 return false; 3439 } 3440 3441 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for 3442 /// the given type exists somewhere else, and that we should not emit the type 3443 /// information in this translation unit. Assumes that it is not a 3444 /// standard-library type. 3445 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, 3446 QualType Ty) { 3447 ASTContext &Context = CGM.getContext(); 3448 3449 // If RTTI is disabled, assume it might be disabled in the 3450 // translation unit that defines any potential key function, too. 3451 if (!Context.getLangOpts().RTTI) return false; 3452 3453 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 3454 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); 3455 if (!RD->hasDefinition()) 3456 return false; 3457 3458 if (!RD->isDynamicClass()) 3459 return false; 3460 3461 // FIXME: this may need to be reconsidered if the key function 3462 // changes. 3463 // N.B. We must always emit the RTTI data ourselves if there exists a key 3464 // function. 3465 bool IsDLLImport = RD->hasAttr<DLLImportAttr>(); 3466 3467 // Don't import the RTTI but emit it locally. 3468 if (CGM.getTriple().isWindowsGNUEnvironment()) 3469 return false; 3470 3471 if (CGM.getVTables().isVTableExternal(RD)) { 3472 if (CGM.getTarget().hasPS4DLLImportExport()) 3473 return true; 3474 3475 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment() 3476 ? false 3477 : true; 3478 } 3479 if (IsDLLImport) 3480 return true; 3481 } 3482 3483 return false; 3484 } 3485 3486 /// IsIncompleteClassType - Returns whether the given record type is incomplete. 3487 static bool IsIncompleteClassType(const RecordType *RecordTy) { 3488 return !RecordTy->getDecl()->isCompleteDefinition(); 3489 } 3490 3491 /// ContainsIncompleteClassType - Returns whether the given type contains an 3492 /// incomplete class type. This is true if 3493 /// 3494 /// * The given type is an incomplete class type. 3495 /// * The given type is a pointer type whose pointee type contains an 3496 /// incomplete class type. 3497 /// * The given type is a member pointer type whose class is an incomplete 3498 /// class type. 3499 /// * The given type is a member pointer type whoise pointee type contains an 3500 /// incomplete class type. 3501 /// is an indirect or direct pointer to an incomplete class type. 3502 static bool ContainsIncompleteClassType(QualType Ty) { 3503 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 3504 if (IsIncompleteClassType(RecordTy)) 3505 return true; 3506 } 3507 3508 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty)) 3509 return ContainsIncompleteClassType(PointerTy->getPointeeType()); 3510 3511 if (const MemberPointerType *MemberPointerTy = 3512 dyn_cast<MemberPointerType>(Ty)) { 3513 // Check if the class type is incomplete. 3514 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass()); 3515 if (IsIncompleteClassType(ClassType)) 3516 return true; 3517 3518 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType()); 3519 } 3520 3521 return false; 3522 } 3523 3524 // CanUseSingleInheritance - Return whether the given record decl has a "single, 3525 // public, non-virtual base at offset zero (i.e. the derived class is dynamic 3526 // iff the base is)", according to Itanium C++ ABI, 2.95p6b. 3527 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) { 3528 // Check the number of bases. 3529 if (RD->getNumBases() != 1) 3530 return false; 3531 3532 // Get the base. 3533 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(); 3534 3535 // Check that the base is not virtual. 3536 if (Base->isVirtual()) 3537 return false; 3538 3539 // Check that the base is public. 3540 if (Base->getAccessSpecifier() != AS_public) 3541 return false; 3542 3543 // Check that the class is dynamic iff the base is. 3544 auto *BaseDecl = 3545 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl()); 3546 if (!BaseDecl->isEmpty() && 3547 BaseDecl->isDynamicClass() != RD->isDynamicClass()) 3548 return false; 3549 3550 return true; 3551 } 3552 3553 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) { 3554 // abi::__class_type_info. 3555 static const char * const ClassTypeInfo = 3556 "_ZTVN10__cxxabiv117__class_type_infoE"; 3557 // abi::__si_class_type_info. 3558 static const char * const SIClassTypeInfo = 3559 "_ZTVN10__cxxabiv120__si_class_type_infoE"; 3560 // abi::__vmi_class_type_info. 3561 static const char * const VMIClassTypeInfo = 3562 "_ZTVN10__cxxabiv121__vmi_class_type_infoE"; 3563 3564 const char *VTableName = nullptr; 3565 3566 switch (Ty->getTypeClass()) { 3567 #define TYPE(Class, Base) 3568 #define ABSTRACT_TYPE(Class, Base) 3569 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 3570 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3571 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 3572 #include "clang/AST/TypeNodes.inc" 3573 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 3574 3575 case Type::LValueReference: 3576 case Type::RValueReference: 3577 llvm_unreachable("References shouldn't get here"); 3578 3579 case Type::Auto: 3580 case Type::DeducedTemplateSpecialization: 3581 llvm_unreachable("Undeduced type shouldn't get here"); 3582 3583 case Type::Pipe: 3584 llvm_unreachable("Pipe types shouldn't get here"); 3585 3586 case Type::Builtin: 3587 case Type::BitInt: 3588 // GCC treats vector and complex types as fundamental types. 3589 case Type::Vector: 3590 case Type::ExtVector: 3591 case Type::ConstantMatrix: 3592 case Type::Complex: 3593 case Type::Atomic: 3594 // FIXME: GCC treats block pointers as fundamental types?! 3595 case Type::BlockPointer: 3596 // abi::__fundamental_type_info. 3597 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE"; 3598 break; 3599 3600 case Type::ConstantArray: 3601 case Type::IncompleteArray: 3602 case Type::VariableArray: 3603 // abi::__array_type_info. 3604 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE"; 3605 break; 3606 3607 case Type::FunctionNoProto: 3608 case Type::FunctionProto: 3609 // abi::__function_type_info. 3610 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE"; 3611 break; 3612 3613 case Type::Enum: 3614 // abi::__enum_type_info. 3615 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE"; 3616 break; 3617 3618 case Type::Record: { 3619 const CXXRecordDecl *RD = 3620 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl()); 3621 3622 if (!RD->hasDefinition() || !RD->getNumBases()) { 3623 VTableName = ClassTypeInfo; 3624 } else if (CanUseSingleInheritance(RD)) { 3625 VTableName = SIClassTypeInfo; 3626 } else { 3627 VTableName = VMIClassTypeInfo; 3628 } 3629 3630 break; 3631 } 3632 3633 case Type::ObjCObject: 3634 // Ignore protocol qualifiers. 3635 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr(); 3636 3637 // Handle id and Class. 3638 if (isa<BuiltinType>(Ty)) { 3639 VTableName = ClassTypeInfo; 3640 break; 3641 } 3642 3643 assert(isa<ObjCInterfaceType>(Ty)); 3644 [[fallthrough]]; 3645 3646 case Type::ObjCInterface: 3647 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) { 3648 VTableName = SIClassTypeInfo; 3649 } else { 3650 VTableName = ClassTypeInfo; 3651 } 3652 break; 3653 3654 case Type::ObjCObjectPointer: 3655 case Type::Pointer: 3656 // abi::__pointer_type_info. 3657 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE"; 3658 break; 3659 3660 case Type::MemberPointer: 3661 // abi::__pointer_to_member_type_info. 3662 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE"; 3663 break; 3664 } 3665 3666 llvm::Constant *VTable = nullptr; 3667 3668 // Check if the alias exists. If it doesn't, then get or create the global. 3669 if (CGM.getItaniumVTableContext().isRelativeLayout()) 3670 VTable = CGM.getModule().getNamedAlias(VTableName); 3671 if (!VTable) { 3672 llvm::Type *Ty = llvm::ArrayType::get(CGM.GlobalsInt8PtrTy, 0); 3673 VTable = CGM.getModule().getOrInsertGlobal(VTableName, Ty); 3674 } 3675 3676 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts())); 3677 3678 llvm::Type *PtrDiffTy = 3679 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()); 3680 3681 // The vtable address point is 2. 3682 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 3683 // The vtable address point is 8 bytes after its start: 3684 // 4 for the offset to top + 4 for the relative offset to rtti. 3685 llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8); 3686 VTable = 3687 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight); 3688 } else { 3689 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2); 3690 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.GlobalsInt8PtrTy, 3691 VTable, Two); 3692 } 3693 3694 Fields.push_back(VTable); 3695 } 3696 3697 /// Return the linkage that the type info and type info name constants 3698 /// should have for the given type. 3699 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM, 3700 QualType Ty) { 3701 // Itanium C++ ABI 2.9.5p7: 3702 // In addition, it and all of the intermediate abi::__pointer_type_info 3703 // structs in the chain down to the abi::__class_type_info for the 3704 // incomplete class type must be prevented from resolving to the 3705 // corresponding type_info structs for the complete class type, possibly 3706 // by making them local static objects. Finally, a dummy class RTTI is 3707 // generated for the incomplete type that will not resolve to the final 3708 // complete class RTTI (because the latter need not exist), possibly by 3709 // making it a local static object. 3710 if (ContainsIncompleteClassType(Ty)) 3711 return llvm::GlobalValue::InternalLinkage; 3712 3713 switch (Ty->getLinkage()) { 3714 case Linkage::Invalid: 3715 llvm_unreachable("Linkage hasn't been computed!"); 3716 3717 case Linkage::None: 3718 case Linkage::Internal: 3719 case Linkage::UniqueExternal: 3720 return llvm::GlobalValue::InternalLinkage; 3721 3722 case Linkage::VisibleNone: 3723 case Linkage::Module: 3724 case Linkage::External: 3725 // RTTI is not enabled, which means that this type info struct is going 3726 // to be used for exception handling. Give it linkonce_odr linkage. 3727 if (!CGM.getLangOpts().RTTI) 3728 return llvm::GlobalValue::LinkOnceODRLinkage; 3729 3730 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) { 3731 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl()); 3732 if (RD->hasAttr<WeakAttr>()) 3733 return llvm::GlobalValue::WeakODRLinkage; 3734 if (CGM.getTriple().isWindowsItaniumEnvironment()) 3735 if (RD->hasAttr<DLLImportAttr>() && 3736 ShouldUseExternalRTTIDescriptor(CGM, Ty)) 3737 return llvm::GlobalValue::ExternalLinkage; 3738 // MinGW always uses LinkOnceODRLinkage for type info. 3739 if (RD->isDynamicClass() && 3740 !CGM.getContext() 3741 .getTargetInfo() 3742 .getTriple() 3743 .isWindowsGNUEnvironment()) 3744 return CGM.getVTableLinkage(RD); 3745 } 3746 3747 return llvm::GlobalValue::LinkOnceODRLinkage; 3748 } 3749 3750 llvm_unreachable("Invalid linkage!"); 3751 } 3752 3753 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) { 3754 // We want to operate on the canonical type. 3755 Ty = Ty.getCanonicalType(); 3756 3757 // Check if we've already emitted an RTTI descriptor for this type. 3758 SmallString<256> Name; 3759 llvm::raw_svector_ostream Out(Name); 3760 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 3761 3762 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name); 3763 if (OldGV && !OldGV->isDeclaration()) { 3764 assert(!OldGV->hasAvailableExternallyLinkage() && 3765 "available_externally typeinfos not yet implemented"); 3766 3767 return OldGV; 3768 } 3769 3770 // Check if there is already an external RTTI descriptor for this type. 3771 if (IsStandardLibraryRTTIDescriptor(Ty) || 3772 ShouldUseExternalRTTIDescriptor(CGM, Ty)) 3773 return GetAddrOfExternalRTTIDescriptor(Ty); 3774 3775 // Emit the standard library with external linkage. 3776 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty); 3777 3778 // Give the type_info object and name the formal visibility of the 3779 // type itself. 3780 llvm::GlobalValue::VisibilityTypes llvmVisibility; 3781 if (llvm::GlobalValue::isLocalLinkage(Linkage)) 3782 // If the linkage is local, only default visibility makes sense. 3783 llvmVisibility = llvm::GlobalValue::DefaultVisibility; 3784 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) == 3785 ItaniumCXXABI::RUK_NonUniqueHidden) 3786 llvmVisibility = llvm::GlobalValue::HiddenVisibility; 3787 else 3788 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility()); 3789 3790 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass = 3791 llvm::GlobalValue::DefaultStorageClass; 3792 if (auto RD = Ty->getAsCXXRecordDecl()) { 3793 if ((CGM.getTriple().isWindowsItaniumEnvironment() && 3794 RD->hasAttr<DLLExportAttr>()) || 3795 (CGM.shouldMapVisibilityToDLLExport(RD) && 3796 !llvm::GlobalValue::isLocalLinkage(Linkage) && 3797 llvmVisibility == llvm::GlobalValue::DefaultVisibility)) 3798 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass; 3799 } 3800 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass); 3801 } 3802 3803 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo( 3804 QualType Ty, 3805 llvm::GlobalVariable::LinkageTypes Linkage, 3806 llvm::GlobalValue::VisibilityTypes Visibility, 3807 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) { 3808 // Add the vtable pointer. 3809 BuildVTablePointer(cast<Type>(Ty)); 3810 3811 // And the name. 3812 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage); 3813 llvm::Constant *TypeNameField; 3814 3815 // If we're supposed to demote the visibility, be sure to set a flag 3816 // to use a string comparison for type_info comparisons. 3817 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness = 3818 CXXABI.classifyRTTIUniqueness(Ty, Linkage); 3819 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) { 3820 // The flag is the sign bit, which on ARM64 is defined to be clear 3821 // for global pointers. This is very ARM64-specific. 3822 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty); 3823 llvm::Constant *flag = 3824 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63); 3825 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag); 3826 TypeNameField = 3827 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.GlobalsInt8PtrTy); 3828 } else { 3829 TypeNameField = TypeName; 3830 } 3831 Fields.push_back(TypeNameField); 3832 3833 switch (Ty->getTypeClass()) { 3834 #define TYPE(Class, Base) 3835 #define ABSTRACT_TYPE(Class, Base) 3836 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 3837 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3838 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 3839 #include "clang/AST/TypeNodes.inc" 3840 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 3841 3842 // GCC treats vector types as fundamental types. 3843 case Type::Builtin: 3844 case Type::Vector: 3845 case Type::ExtVector: 3846 case Type::ConstantMatrix: 3847 case Type::Complex: 3848 case Type::BlockPointer: 3849 // Itanium C++ ABI 2.9.5p4: 3850 // abi::__fundamental_type_info adds no data members to std::type_info. 3851 break; 3852 3853 case Type::LValueReference: 3854 case Type::RValueReference: 3855 llvm_unreachable("References shouldn't get here"); 3856 3857 case Type::Auto: 3858 case Type::DeducedTemplateSpecialization: 3859 llvm_unreachable("Undeduced type shouldn't get here"); 3860 3861 case Type::Pipe: 3862 break; 3863 3864 case Type::BitInt: 3865 break; 3866 3867 case Type::ConstantArray: 3868 case Type::IncompleteArray: 3869 case Type::VariableArray: 3870 // Itanium C++ ABI 2.9.5p5: 3871 // abi::__array_type_info adds no data members to std::type_info. 3872 break; 3873 3874 case Type::FunctionNoProto: 3875 case Type::FunctionProto: 3876 // Itanium C++ ABI 2.9.5p5: 3877 // abi::__function_type_info adds no data members to std::type_info. 3878 break; 3879 3880 case Type::Enum: 3881 // Itanium C++ ABI 2.9.5p5: 3882 // abi::__enum_type_info adds no data members to std::type_info. 3883 break; 3884 3885 case Type::Record: { 3886 const CXXRecordDecl *RD = 3887 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl()); 3888 if (!RD->hasDefinition() || !RD->getNumBases()) { 3889 // We don't need to emit any fields. 3890 break; 3891 } 3892 3893 if (CanUseSingleInheritance(RD)) 3894 BuildSIClassTypeInfo(RD); 3895 else 3896 BuildVMIClassTypeInfo(RD); 3897 3898 break; 3899 } 3900 3901 case Type::ObjCObject: 3902 case Type::ObjCInterface: 3903 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty)); 3904 break; 3905 3906 case Type::ObjCObjectPointer: 3907 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); 3908 break; 3909 3910 case Type::Pointer: 3911 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType()); 3912 break; 3913 3914 case Type::MemberPointer: 3915 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty)); 3916 break; 3917 3918 case Type::Atomic: 3919 // No fields, at least for the moment. 3920 break; 3921 } 3922 3923 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields); 3924 3925 SmallString<256> Name; 3926 llvm::raw_svector_ostream Out(Name); 3927 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 3928 llvm::Module &M = CGM.getModule(); 3929 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name); 3930 llvm::GlobalVariable *GV = 3931 new llvm::GlobalVariable(M, Init->getType(), 3932 /*isConstant=*/true, Linkage, Init, Name); 3933 3934 // Export the typeinfo in the same circumstances as the vtable is exported. 3935 auto GVDLLStorageClass = DLLStorageClass; 3936 if (CGM.getTarget().hasPS4DLLImportExport()) { 3937 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 3938 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); 3939 if (RD->hasAttr<DLLExportAttr>() || 3940 CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) { 3941 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass; 3942 } 3943 } 3944 } 3945 3946 // If there's already an old global variable, replace it with the new one. 3947 if (OldGV) { 3948 GV->takeName(OldGV); 3949 OldGV->replaceAllUsesWith(GV); 3950 OldGV->eraseFromParent(); 3951 } 3952 3953 if (CGM.supportsCOMDAT() && GV->isWeakForLinker()) 3954 GV->setComdat(M.getOrInsertComdat(GV->getName())); 3955 3956 CharUnits Align = CGM.getContext().toCharUnitsFromBits( 3957 CGM.getTarget().getPointerAlign(CGM.GetGlobalVarAddressSpace(nullptr))); 3958 GV->setAlignment(Align.getAsAlign()); 3959 3960 // The Itanium ABI specifies that type_info objects must be globally 3961 // unique, with one exception: if the type is an incomplete class 3962 // type or a (possibly indirect) pointer to one. That exception 3963 // affects the general case of comparing type_info objects produced 3964 // by the typeid operator, which is why the comparison operators on 3965 // std::type_info generally use the type_info name pointers instead 3966 // of the object addresses. However, the language's built-in uses 3967 // of RTTI generally require class types to be complete, even when 3968 // manipulating pointers to those class types. This allows the 3969 // implementation of dynamic_cast to rely on address equality tests, 3970 // which is much faster. 3971 3972 // All of this is to say that it's important that both the type_info 3973 // object and the type_info name be uniqued when weakly emitted. 3974 3975 TypeName->setVisibility(Visibility); 3976 CGM.setDSOLocal(TypeName); 3977 3978 GV->setVisibility(Visibility); 3979 CGM.setDSOLocal(GV); 3980 3981 TypeName->setDLLStorageClass(DLLStorageClass); 3982 GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport() 3983 ? GVDLLStorageClass 3984 : DLLStorageClass); 3985 3986 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition); 3987 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition); 3988 3989 return GV; 3990 } 3991 3992 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info 3993 /// for the given Objective-C object type. 3994 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) { 3995 // Drop qualifiers. 3996 const Type *T = OT->getBaseType().getTypePtr(); 3997 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T)); 3998 3999 // The builtin types are abi::__class_type_infos and don't require 4000 // extra fields. 4001 if (isa<BuiltinType>(T)) return; 4002 4003 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl(); 4004 ObjCInterfaceDecl *Super = Class->getSuperClass(); 4005 4006 // Root classes are also __class_type_info. 4007 if (!Super) return; 4008 4009 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super); 4010 4011 // Everything else is single inheritance. 4012 llvm::Constant *BaseTypeInfo = 4013 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy); 4014 Fields.push_back(BaseTypeInfo); 4015 } 4016 4017 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single 4018 /// inheritance, according to the Itanium C++ ABI, 2.95p6b. 4019 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) { 4020 // Itanium C++ ABI 2.9.5p6b: 4021 // It adds to abi::__class_type_info a single member pointing to the 4022 // type_info structure for the base type, 4023 llvm::Constant *BaseTypeInfo = 4024 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType()); 4025 Fields.push_back(BaseTypeInfo); 4026 } 4027 4028 namespace { 4029 /// SeenBases - Contains virtual and non-virtual bases seen when traversing 4030 /// a class hierarchy. 4031 struct SeenBases { 4032 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases; 4033 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases; 4034 }; 4035 } 4036 4037 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in 4038 /// abi::__vmi_class_type_info. 4039 /// 4040 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, 4041 SeenBases &Bases) { 4042 4043 unsigned Flags = 0; 4044 4045 auto *BaseDecl = 4046 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl()); 4047 4048 if (Base->isVirtual()) { 4049 // Mark the virtual base as seen. 4050 if (!Bases.VirtualBases.insert(BaseDecl).second) { 4051 // If this virtual base has been seen before, then the class is diamond 4052 // shaped. 4053 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped; 4054 } else { 4055 if (Bases.NonVirtualBases.count(BaseDecl)) 4056 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 4057 } 4058 } else { 4059 // Mark the non-virtual base as seen. 4060 if (!Bases.NonVirtualBases.insert(BaseDecl).second) { 4061 // If this non-virtual base has been seen before, then the class has non- 4062 // diamond shaped repeated inheritance. 4063 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 4064 } else { 4065 if (Bases.VirtualBases.count(BaseDecl)) 4066 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 4067 } 4068 } 4069 4070 // Walk all bases. 4071 for (const auto &I : BaseDecl->bases()) 4072 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); 4073 4074 return Flags; 4075 } 4076 4077 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) { 4078 unsigned Flags = 0; 4079 SeenBases Bases; 4080 4081 // Walk all bases. 4082 for (const auto &I : RD->bases()) 4083 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); 4084 4085 return Flags; 4086 } 4087 4088 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for 4089 /// classes with bases that do not satisfy the abi::__si_class_type_info 4090 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. 4091 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) { 4092 llvm::Type *UnsignedIntLTy = 4093 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 4094 4095 // Itanium C++ ABI 2.9.5p6c: 4096 // __flags is a word with flags describing details about the class 4097 // structure, which may be referenced by using the __flags_masks 4098 // enumeration. These flags refer to both direct and indirect bases. 4099 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD); 4100 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 4101 4102 // Itanium C++ ABI 2.9.5p6c: 4103 // __base_count is a word with the number of direct proper base class 4104 // descriptions that follow. 4105 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases())); 4106 4107 if (!RD->getNumBases()) 4108 return; 4109 4110 // Now add the base class descriptions. 4111 4112 // Itanium C++ ABI 2.9.5p6c: 4113 // __base_info[] is an array of base class descriptions -- one for every 4114 // direct proper base. Each description is of the type: 4115 // 4116 // struct abi::__base_class_type_info { 4117 // public: 4118 // const __class_type_info *__base_type; 4119 // long __offset_flags; 4120 // 4121 // enum __offset_flags_masks { 4122 // __virtual_mask = 0x1, 4123 // __public_mask = 0x2, 4124 // __offset_shift = 8 4125 // }; 4126 // }; 4127 4128 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long 4129 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on 4130 // LLP64 platforms. 4131 // FIXME: Consider updating libc++abi to match, and extend this logic to all 4132 // LLP64 platforms. 4133 QualType OffsetFlagsTy = CGM.getContext().LongTy; 4134 const TargetInfo &TI = CGM.getContext().getTargetInfo(); 4135 if (TI.getTriple().isOSCygMing() && 4136 TI.getPointerWidth(LangAS::Default) > TI.getLongWidth()) 4137 OffsetFlagsTy = CGM.getContext().LongLongTy; 4138 llvm::Type *OffsetFlagsLTy = 4139 CGM.getTypes().ConvertType(OffsetFlagsTy); 4140 4141 for (const auto &Base : RD->bases()) { 4142 // The __base_type member points to the RTTI for the base type. 4143 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType())); 4144 4145 auto *BaseDecl = 4146 cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl()); 4147 4148 int64_t OffsetFlags = 0; 4149 4150 // All but the lower 8 bits of __offset_flags are a signed offset. 4151 // For a non-virtual base, this is the offset in the object of the base 4152 // subobject. For a virtual base, this is the offset in the virtual table of 4153 // the virtual base offset for the virtual base referenced (negative). 4154 CharUnits Offset; 4155 if (Base.isVirtual()) 4156 Offset = 4157 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl); 4158 else { 4159 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); 4160 Offset = Layout.getBaseClassOffset(BaseDecl); 4161 }; 4162 4163 OffsetFlags = uint64_t(Offset.getQuantity()) << 8; 4164 4165 // The low-order byte of __offset_flags contains flags, as given by the 4166 // masks from the enumeration __offset_flags_masks. 4167 if (Base.isVirtual()) 4168 OffsetFlags |= BCTI_Virtual; 4169 if (Base.getAccessSpecifier() == AS_public) 4170 OffsetFlags |= BCTI_Public; 4171 4172 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags)); 4173 } 4174 } 4175 4176 /// Compute the flags for a __pbase_type_info, and remove the corresponding 4177 /// pieces from \p Type. 4178 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) { 4179 unsigned Flags = 0; 4180 4181 if (Type.isConstQualified()) 4182 Flags |= ItaniumRTTIBuilder::PTI_Const; 4183 if (Type.isVolatileQualified()) 4184 Flags |= ItaniumRTTIBuilder::PTI_Volatile; 4185 if (Type.isRestrictQualified()) 4186 Flags |= ItaniumRTTIBuilder::PTI_Restrict; 4187 Type = Type.getUnqualifiedType(); 4188 4189 // Itanium C++ ABI 2.9.5p7: 4190 // When the abi::__pbase_type_info is for a direct or indirect pointer to an 4191 // incomplete class type, the incomplete target type flag is set. 4192 if (ContainsIncompleteClassType(Type)) 4193 Flags |= ItaniumRTTIBuilder::PTI_Incomplete; 4194 4195 if (auto *Proto = Type->getAs<FunctionProtoType>()) { 4196 if (Proto->isNothrow()) { 4197 Flags |= ItaniumRTTIBuilder::PTI_Noexcept; 4198 Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None); 4199 } 4200 } 4201 4202 return Flags; 4203 } 4204 4205 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, 4206 /// used for pointer types. 4207 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) { 4208 // Itanium C++ ABI 2.9.5p7: 4209 // __flags is a flag word describing the cv-qualification and other 4210 // attributes of the type pointed to 4211 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy); 4212 4213 llvm::Type *UnsignedIntLTy = 4214 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 4215 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 4216 4217 // Itanium C++ ABI 2.9.5p7: 4218 // __pointee is a pointer to the std::type_info derivation for the 4219 // unqualified type being pointed to. 4220 llvm::Constant *PointeeTypeInfo = 4221 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy); 4222 Fields.push_back(PointeeTypeInfo); 4223 } 4224 4225 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info 4226 /// struct, used for member pointer types. 4227 void 4228 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) { 4229 QualType PointeeTy = Ty->getPointeeType(); 4230 4231 // Itanium C++ ABI 2.9.5p7: 4232 // __flags is a flag word describing the cv-qualification and other 4233 // attributes of the type pointed to. 4234 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy); 4235 4236 const RecordType *ClassType = cast<RecordType>(Ty->getClass()); 4237 if (IsIncompleteClassType(ClassType)) 4238 Flags |= PTI_ContainingClassIncomplete; 4239 4240 llvm::Type *UnsignedIntLTy = 4241 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 4242 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 4243 4244 // Itanium C++ ABI 2.9.5p7: 4245 // __pointee is a pointer to the std::type_info derivation for the 4246 // unqualified type being pointed to. 4247 llvm::Constant *PointeeTypeInfo = 4248 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy); 4249 Fields.push_back(PointeeTypeInfo); 4250 4251 // Itanium C++ ABI 2.9.5p9: 4252 // __context is a pointer to an abi::__class_type_info corresponding to the 4253 // class type containing the member pointed to 4254 // (e.g., the "A" in "int A::*"). 4255 Fields.push_back( 4256 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0))); 4257 } 4258 4259 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) { 4260 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty); 4261 } 4262 4263 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) { 4264 // Types added here must also be added to TypeInfoIsInStandardLibrary. 4265 QualType FundamentalTypes[] = { 4266 getContext().VoidTy, getContext().NullPtrTy, 4267 getContext().BoolTy, getContext().WCharTy, 4268 getContext().CharTy, getContext().UnsignedCharTy, 4269 getContext().SignedCharTy, getContext().ShortTy, 4270 getContext().UnsignedShortTy, getContext().IntTy, 4271 getContext().UnsignedIntTy, getContext().LongTy, 4272 getContext().UnsignedLongTy, getContext().LongLongTy, 4273 getContext().UnsignedLongLongTy, getContext().Int128Ty, 4274 getContext().UnsignedInt128Ty, getContext().HalfTy, 4275 getContext().FloatTy, getContext().DoubleTy, 4276 getContext().LongDoubleTy, getContext().Float128Ty, 4277 getContext().Char8Ty, getContext().Char16Ty, 4278 getContext().Char32Ty 4279 }; 4280 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass = 4281 RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD) 4282 ? llvm::GlobalValue::DLLExportStorageClass 4283 : llvm::GlobalValue::DefaultStorageClass; 4284 llvm::GlobalValue::VisibilityTypes Visibility = 4285 CodeGenModule::GetLLVMVisibility(RD->getVisibility()); 4286 for (const QualType &FundamentalType : FundamentalTypes) { 4287 QualType PointerType = getContext().getPointerType(FundamentalType); 4288 QualType PointerTypeConst = getContext().getPointerType( 4289 FundamentalType.withConst()); 4290 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst}) 4291 ItaniumRTTIBuilder(*this).BuildTypeInfo( 4292 Type, llvm::GlobalValue::ExternalLinkage, 4293 Visibility, DLLStorageClass); 4294 } 4295 } 4296 4297 /// What sort of uniqueness rules should we use for the RTTI for the 4298 /// given type? 4299 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness( 4300 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const { 4301 if (shouldRTTIBeUnique()) 4302 return RUK_Unique; 4303 4304 // It's only necessary for linkonce_odr or weak_odr linkage. 4305 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage && 4306 Linkage != llvm::GlobalValue::WeakODRLinkage) 4307 return RUK_Unique; 4308 4309 // It's only necessary with default visibility. 4310 if (CanTy->getVisibility() != DefaultVisibility) 4311 return RUK_Unique; 4312 4313 // If we're not required to publish this symbol, hide it. 4314 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage) 4315 return RUK_NonUniqueHidden; 4316 4317 // If we're required to publish this symbol, as we might be under an 4318 // explicit instantiation, leave it with default visibility but 4319 // enable string-comparisons. 4320 assert(Linkage == llvm::GlobalValue::WeakODRLinkage); 4321 return RUK_NonUniqueVisible; 4322 } 4323 4324 // Find out how to codegen the complete destructor and constructor 4325 namespace { 4326 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT }; 4327 } 4328 static StructorCodegen getCodegenToUse(CodeGenModule &CGM, 4329 const CXXMethodDecl *MD) { 4330 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases) 4331 return StructorCodegen::Emit; 4332 4333 // The complete and base structors are not equivalent if there are any virtual 4334 // bases, so emit separate functions. 4335 if (MD->getParent()->getNumVBases()) 4336 return StructorCodegen::Emit; 4337 4338 GlobalDecl AliasDecl; 4339 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) { 4340 AliasDecl = GlobalDecl(DD, Dtor_Complete); 4341 } else { 4342 const auto *CD = cast<CXXConstructorDecl>(MD); 4343 AliasDecl = GlobalDecl(CD, Ctor_Complete); 4344 } 4345 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl); 4346 4347 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage)) 4348 return StructorCodegen::RAUW; 4349 4350 // FIXME: Should we allow available_externally aliases? 4351 if (!llvm::GlobalAlias::isValidLinkage(Linkage)) 4352 return StructorCodegen::RAUW; 4353 4354 if (llvm::GlobalValue::isWeakForLinker(Linkage)) { 4355 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5). 4356 if (CGM.getTarget().getTriple().isOSBinFormatELF() || 4357 CGM.getTarget().getTriple().isOSBinFormatWasm()) 4358 return StructorCodegen::COMDAT; 4359 return StructorCodegen::Emit; 4360 } 4361 4362 return StructorCodegen::Alias; 4363 } 4364 4365 static void emitConstructorDestructorAlias(CodeGenModule &CGM, 4366 GlobalDecl AliasDecl, 4367 GlobalDecl TargetDecl) { 4368 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl); 4369 4370 StringRef MangledName = CGM.getMangledName(AliasDecl); 4371 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName); 4372 if (Entry && !Entry->isDeclaration()) 4373 return; 4374 4375 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl)); 4376 4377 // Create the alias with no name. 4378 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee); 4379 4380 // Constructors and destructors are always unnamed_addr. 4381 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 4382 4383 // Switch any previous uses to the alias. 4384 if (Entry) { 4385 assert(Entry->getType() == Aliasee->getType() && 4386 "declaration exists with different type"); 4387 Alias->takeName(Entry); 4388 Entry->replaceAllUsesWith(Alias); 4389 Entry->eraseFromParent(); 4390 } else { 4391 Alias->setName(MangledName); 4392 } 4393 4394 // Finally, set up the alias with its proper name and attributes. 4395 CGM.SetCommonAttributes(AliasDecl, Alias); 4396 } 4397 4398 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) { 4399 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 4400 auto *CD = dyn_cast<CXXConstructorDecl>(MD); 4401 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD); 4402 4403 StructorCodegen CGType = getCodegenToUse(CGM, MD); 4404 4405 if (CD ? GD.getCtorType() == Ctor_Complete 4406 : GD.getDtorType() == Dtor_Complete) { 4407 GlobalDecl BaseDecl; 4408 if (CD) 4409 BaseDecl = GD.getWithCtorType(Ctor_Base); 4410 else 4411 BaseDecl = GD.getWithDtorType(Dtor_Base); 4412 4413 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) { 4414 emitConstructorDestructorAlias(CGM, GD, BaseDecl); 4415 return; 4416 } 4417 4418 if (CGType == StructorCodegen::RAUW) { 4419 StringRef MangledName = CGM.getMangledName(GD); 4420 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl); 4421 CGM.addReplacement(MangledName, Aliasee); 4422 return; 4423 } 4424 } 4425 4426 // The base destructor is equivalent to the base destructor of its 4427 // base class if there is exactly one non-virtual base class with a 4428 // non-trivial destructor, there are no fields with a non-trivial 4429 // destructor, and the body of the destructor is trivial. 4430 if (DD && GD.getDtorType() == Dtor_Base && 4431 CGType != StructorCodegen::COMDAT && 4432 !CGM.TryEmitBaseDestructorAsAlias(DD)) 4433 return; 4434 4435 // FIXME: The deleting destructor is equivalent to the selected operator 4436 // delete if: 4437 // * either the delete is a destroying operator delete or the destructor 4438 // would be trivial if it weren't virtual, 4439 // * the conversion from the 'this' parameter to the first parameter of the 4440 // destructor is equivalent to a bitcast, 4441 // * the destructor does not have an implicit "this" return, and 4442 // * the operator delete has the same calling convention and IR function type 4443 // as the destructor. 4444 // In such cases we should try to emit the deleting dtor as an alias to the 4445 // selected 'operator delete'. 4446 4447 llvm::Function *Fn = CGM.codegenCXXStructor(GD); 4448 4449 if (CGType == StructorCodegen::COMDAT) { 4450 SmallString<256> Buffer; 4451 llvm::raw_svector_ostream Out(Buffer); 4452 if (DD) 4453 getMangleContext().mangleCXXDtorComdat(DD, Out); 4454 else 4455 getMangleContext().mangleCXXCtorComdat(CD, Out); 4456 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str()); 4457 Fn->setComdat(C); 4458 } else { 4459 CGM.maybeSetTrivialComdat(*MD, *Fn); 4460 } 4461 } 4462 4463 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) { 4464 // void *__cxa_begin_catch(void*); 4465 llvm::FunctionType *FTy = llvm::FunctionType::get( 4466 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false); 4467 4468 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch"); 4469 } 4470 4471 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) { 4472 // void __cxa_end_catch(); 4473 llvm::FunctionType *FTy = 4474 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 4475 4476 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch"); 4477 } 4478 4479 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) { 4480 // void *__cxa_get_exception_ptr(void*); 4481 llvm::FunctionType *FTy = llvm::FunctionType::get( 4482 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false); 4483 4484 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr"); 4485 } 4486 4487 namespace { 4488 /// A cleanup to call __cxa_end_catch. In many cases, the caught 4489 /// exception type lets us state definitively that the thrown exception 4490 /// type does not have a destructor. In particular: 4491 /// - Catch-alls tell us nothing, so we have to conservatively 4492 /// assume that the thrown exception might have a destructor. 4493 /// - Catches by reference behave according to their base types. 4494 /// - Catches of non-record types will only trigger for exceptions 4495 /// of non-record types, which never have destructors. 4496 /// - Catches of record types can trigger for arbitrary subclasses 4497 /// of the caught type, so we have to assume the actual thrown 4498 /// exception type might have a throwing destructor, even if the 4499 /// caught type's destructor is trivial or nothrow. 4500 struct CallEndCatch final : EHScopeStack::Cleanup { 4501 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {} 4502 bool MightThrow; 4503 4504 void Emit(CodeGenFunction &CGF, Flags flags) override { 4505 if (!MightThrow) { 4506 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM)); 4507 return; 4508 } 4509 4510 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM)); 4511 } 4512 }; 4513 } 4514 4515 /// Emits a call to __cxa_begin_catch and enters a cleanup to call 4516 /// __cxa_end_catch. If -fassume-nothrow-exception-dtor is specified, we assume 4517 /// that the exception object's dtor is nothrow, therefore the __cxa_end_catch 4518 /// call can be marked as nounwind even if EndMightThrow is true. 4519 /// 4520 /// \param EndMightThrow - true if __cxa_end_catch might throw 4521 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF, 4522 llvm::Value *Exn, 4523 bool EndMightThrow) { 4524 llvm::CallInst *call = 4525 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn); 4526 4527 CGF.EHStack.pushCleanup<CallEndCatch>( 4528 NormalAndEHCleanup, 4529 EndMightThrow && !CGF.CGM.getLangOpts().AssumeNothrowExceptionDtor); 4530 4531 return call; 4532 } 4533 4534 /// A "special initializer" callback for initializing a catch 4535 /// parameter during catch initialization. 4536 static void InitCatchParam(CodeGenFunction &CGF, 4537 const VarDecl &CatchParam, 4538 Address ParamAddr, 4539 SourceLocation Loc) { 4540 // Load the exception from where the landing pad saved it. 4541 llvm::Value *Exn = CGF.getExceptionFromSlot(); 4542 4543 CanQualType CatchType = 4544 CGF.CGM.getContext().getCanonicalType(CatchParam.getType()); 4545 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType); 4546 4547 // If we're catching by reference, we can just cast the object 4548 // pointer to the appropriate pointer. 4549 if (isa<ReferenceType>(CatchType)) { 4550 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType(); 4551 bool EndCatchMightThrow = CaughtType->isRecordType(); 4552 4553 // __cxa_begin_catch returns the adjusted object pointer. 4554 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow); 4555 4556 // We have no way to tell the personality function that we're 4557 // catching by reference, so if we're catching a pointer, 4558 // __cxa_begin_catch will actually return that pointer by value. 4559 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) { 4560 QualType PointeeType = PT->getPointeeType(); 4561 4562 // When catching by reference, generally we should just ignore 4563 // this by-value pointer and use the exception object instead. 4564 if (!PointeeType->isRecordType()) { 4565 4566 // Exn points to the struct _Unwind_Exception header, which 4567 // we have to skip past in order to reach the exception data. 4568 unsigned HeaderSize = 4569 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException(); 4570 AdjustedExn = 4571 CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize); 4572 4573 // However, if we're catching a pointer-to-record type that won't 4574 // work, because the personality function might have adjusted 4575 // the pointer. There's actually no way for us to fully satisfy 4576 // the language/ABI contract here: we can't use Exn because it 4577 // might have the wrong adjustment, but we can't use the by-value 4578 // pointer because it's off by a level of abstraction. 4579 // 4580 // The current solution is to dump the adjusted pointer into an 4581 // alloca, which breaks language semantics (because changing the 4582 // pointer doesn't change the exception) but at least works. 4583 // The better solution would be to filter out non-exact matches 4584 // and rethrow them, but this is tricky because the rethrow 4585 // really needs to be catchable by other sites at this landing 4586 // pad. The best solution is to fix the personality function. 4587 } else { 4588 // Pull the pointer for the reference type off. 4589 llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType); 4590 4591 // Create the temporary and write the adjusted pointer into it. 4592 Address ExnPtrTmp = 4593 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp"); 4594 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); 4595 CGF.Builder.CreateStore(Casted, ExnPtrTmp); 4596 4597 // Bind the reference to the temporary. 4598 AdjustedExn = ExnPtrTmp.getPointer(); 4599 } 4600 } 4601 4602 llvm::Value *ExnCast = 4603 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref"); 4604 CGF.Builder.CreateStore(ExnCast, ParamAddr); 4605 return; 4606 } 4607 4608 // Scalars and complexes. 4609 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType); 4610 if (TEK != TEK_Aggregate) { 4611 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false); 4612 4613 // If the catch type is a pointer type, __cxa_begin_catch returns 4614 // the pointer by value. 4615 if (CatchType->hasPointerRepresentation()) { 4616 llvm::Value *CastExn = 4617 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted"); 4618 4619 switch (CatchType.getQualifiers().getObjCLifetime()) { 4620 case Qualifiers::OCL_Strong: 4621 CastExn = CGF.EmitARCRetainNonBlock(CastExn); 4622 [[fallthrough]]; 4623 4624 case Qualifiers::OCL_None: 4625 case Qualifiers::OCL_ExplicitNone: 4626 case Qualifiers::OCL_Autoreleasing: 4627 CGF.Builder.CreateStore(CastExn, ParamAddr); 4628 return; 4629 4630 case Qualifiers::OCL_Weak: 4631 CGF.EmitARCInitWeak(ParamAddr, CastExn); 4632 return; 4633 } 4634 llvm_unreachable("bad ownership qualifier!"); 4635 } 4636 4637 // Otherwise, it returns a pointer into the exception object. 4638 4639 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(AdjustedExn, CatchType); 4640 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType); 4641 switch (TEK) { 4642 case TEK_Complex: 4643 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV, 4644 /*init*/ true); 4645 return; 4646 case TEK_Scalar: { 4647 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc); 4648 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true); 4649 return; 4650 } 4651 case TEK_Aggregate: 4652 llvm_unreachable("evaluation kind filtered out!"); 4653 } 4654 llvm_unreachable("bad evaluation kind"); 4655 } 4656 4657 assert(isa<RecordType>(CatchType) && "unexpected catch type!"); 4658 auto catchRD = CatchType->getAsCXXRecordDecl(); 4659 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD); 4660 4661 llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok 4662 4663 // Check for a copy expression. If we don't have a copy expression, 4664 // that means a trivial copy is okay. 4665 const Expr *copyExpr = CatchParam.getInit(); 4666 if (!copyExpr) { 4667 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true); 4668 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy), 4669 LLVMCatchTy, caughtExnAlignment); 4670 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType); 4671 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType); 4672 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap); 4673 return; 4674 } 4675 4676 // We have to call __cxa_get_exception_ptr to get the adjusted 4677 // pointer before copying. 4678 llvm::CallInst *rawAdjustedExn = 4679 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn); 4680 4681 // Cast that to the appropriate type. 4682 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy), 4683 LLVMCatchTy, caughtExnAlignment); 4684 4685 // The copy expression is defined in terms of an OpaqueValueExpr. 4686 // Find it and map it to the adjusted expression. 4687 CodeGenFunction::OpaqueValueMapping 4688 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr), 4689 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType())); 4690 4691 // Call the copy ctor in a terminate scope. 4692 CGF.EHStack.pushTerminate(); 4693 4694 // Perform the copy construction. 4695 CGF.EmitAggExpr(copyExpr, 4696 AggValueSlot::forAddr(ParamAddr, Qualifiers(), 4697 AggValueSlot::IsNotDestructed, 4698 AggValueSlot::DoesNotNeedGCBarriers, 4699 AggValueSlot::IsNotAliased, 4700 AggValueSlot::DoesNotOverlap)); 4701 4702 // Leave the terminate scope. 4703 CGF.EHStack.popTerminate(); 4704 4705 // Undo the opaque value mapping. 4706 opaque.pop(); 4707 4708 // Finally we can call __cxa_begin_catch. 4709 CallBeginCatch(CGF, Exn, true); 4710 } 4711 4712 /// Begins a catch statement by initializing the catch variable and 4713 /// calling __cxa_begin_catch. 4714 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF, 4715 const CXXCatchStmt *S) { 4716 // We have to be very careful with the ordering of cleanups here: 4717 // C++ [except.throw]p4: 4718 // The destruction [of the exception temporary] occurs 4719 // immediately after the destruction of the object declared in 4720 // the exception-declaration in the handler. 4721 // 4722 // So the precise ordering is: 4723 // 1. Construct catch variable. 4724 // 2. __cxa_begin_catch 4725 // 3. Enter __cxa_end_catch cleanup 4726 // 4. Enter dtor cleanup 4727 // 4728 // We do this by using a slightly abnormal initialization process. 4729 // Delegation sequence: 4730 // - ExitCXXTryStmt opens a RunCleanupsScope 4731 // - EmitAutoVarAlloca creates the variable and debug info 4732 // - InitCatchParam initializes the variable from the exception 4733 // - CallBeginCatch calls __cxa_begin_catch 4734 // - CallBeginCatch enters the __cxa_end_catch cleanup 4735 // - EmitAutoVarCleanups enters the variable destructor cleanup 4736 // - EmitCXXTryStmt emits the code for the catch body 4737 // - EmitCXXTryStmt close the RunCleanupsScope 4738 4739 VarDecl *CatchParam = S->getExceptionDecl(); 4740 if (!CatchParam) { 4741 llvm::Value *Exn = CGF.getExceptionFromSlot(); 4742 CallBeginCatch(CGF, Exn, true); 4743 return; 4744 } 4745 4746 // Emit the local. 4747 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam); 4748 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc()); 4749 CGF.EmitAutoVarCleanups(var); 4750 } 4751 4752 /// Get or define the following function: 4753 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn 4754 /// This code is used only in C++. 4755 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) { 4756 ASTContext &C = CGM.getContext(); 4757 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration( 4758 C.VoidTy, {C.getPointerType(C.CharTy)}); 4759 llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(FI); 4760 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction( 4761 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true); 4762 llvm::Function *fn = 4763 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts()); 4764 if (fn->empty()) { 4765 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, fn, /*IsThunk=*/false); 4766 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, fn); 4767 fn->setDoesNotThrow(); 4768 fn->setDoesNotReturn(); 4769 4770 // What we really want is to massively penalize inlining without 4771 // forbidding it completely. The difference between that and 4772 // 'noinline' is negligible. 4773 fn->addFnAttr(llvm::Attribute::NoInline); 4774 4775 // Allow this function to be shared across translation units, but 4776 // we don't want it to turn into an exported symbol. 4777 fn->setLinkage(llvm::Function::LinkOnceODRLinkage); 4778 fn->setVisibility(llvm::Function::HiddenVisibility); 4779 if (CGM.supportsCOMDAT()) 4780 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName())); 4781 4782 // Set up the function. 4783 llvm::BasicBlock *entry = 4784 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn); 4785 CGBuilderTy builder(CGM, entry); 4786 4787 // Pull the exception pointer out of the parameter list. 4788 llvm::Value *exn = &*fn->arg_begin(); 4789 4790 // Call __cxa_begin_catch(exn). 4791 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn); 4792 catchCall->setDoesNotThrow(); 4793 catchCall->setCallingConv(CGM.getRuntimeCC()); 4794 4795 // Call std::terminate(). 4796 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn()); 4797 termCall->setDoesNotThrow(); 4798 termCall->setDoesNotReturn(); 4799 termCall->setCallingConv(CGM.getRuntimeCC()); 4800 4801 // std::terminate cannot return. 4802 builder.CreateUnreachable(); 4803 } 4804 return fnRef; 4805 } 4806 4807 llvm::CallInst * 4808 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF, 4809 llvm::Value *Exn) { 4810 // In C++, we want to call __cxa_begin_catch() before terminating. 4811 if (Exn) { 4812 assert(CGF.CGM.getLangOpts().CPlusPlus); 4813 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn); 4814 } 4815 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn()); 4816 } 4817 4818 std::pair<llvm::Value *, const CXXRecordDecl *> 4819 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This, 4820 const CXXRecordDecl *RD) { 4821 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD}; 4822 } 4823 4824 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF, 4825 const CXXCatchStmt *C) { 4826 if (CGF.getTarget().hasFeature("exception-handling")) 4827 CGF.EHStack.pushCleanup<CatchRetScope>( 4828 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad)); 4829 ItaniumCXXABI::emitBeginCatch(CGF, C); 4830 } 4831 4832 llvm::CallInst * 4833 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF, 4834 llvm::Value *Exn) { 4835 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on 4836 // the violating exception to mark it handled, but it is currently hard to do 4837 // with wasm EH instruction structure with catch/catch_all, we just call 4838 // std::terminate and ignore the violating exception as in CGCXXABI. 4839 // TODO Consider code transformation that makes calling __clang_call_terminate 4840 // possible. 4841 return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn); 4842 } 4843 4844 /// Register a global destructor as best as we know how. 4845 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 4846 llvm::FunctionCallee Dtor, 4847 llvm::Constant *Addr) { 4848 if (D.getTLSKind() != VarDecl::TLS_None) { 4849 llvm::PointerType *PtrTy = CGF.UnqualPtrTy; 4850 4851 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...); 4852 llvm::FunctionType *AtExitTy = 4853 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, PtrTy}, true); 4854 4855 // Fetch the actual function. 4856 llvm::FunctionCallee AtExit = 4857 CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np"); 4858 4859 // Create __dtor function for the var decl. 4860 llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit); 4861 4862 // Register above __dtor with atexit(). 4863 // First param is flags and must be 0, second param is function ptr 4864 llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy); 4865 CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub}); 4866 4867 // Cannot unregister TLS __dtor so done 4868 return; 4869 } 4870 4871 // Create __dtor function for the var decl. 4872 llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr); 4873 4874 // Register above __dtor with atexit(). 4875 CGF.registerGlobalDtorWithAtExit(DtorStub); 4876 4877 // Emit __finalize function to unregister __dtor and (as appropriate) call 4878 // __dtor. 4879 emitCXXStermFinalizer(D, DtorStub, Addr); 4880 } 4881 4882 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub, 4883 llvm::Constant *addr) { 4884 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false); 4885 SmallString<256> FnName; 4886 { 4887 llvm::raw_svector_ostream Out(FnName); 4888 getMangleContext().mangleDynamicStermFinalizer(&D, Out); 4889 } 4890 4891 // Create the finalization action associated with a variable. 4892 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 4893 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction( 4894 FTy, FnName.str(), FI, D.getLocation()); 4895 4896 CodeGenFunction CGF(CGM); 4897 4898 CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI, 4899 FunctionArgList(), D.getLocation(), 4900 D.getInit()->getExprLoc()); 4901 4902 // The unatexit subroutine unregisters __dtor functions that were previously 4903 // registered by the atexit subroutine. If the referenced function is found, 4904 // the unatexit returns a value of 0, meaning that the cleanup is still 4905 // pending (and we should call the __dtor function). 4906 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub); 4907 4908 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct"); 4909 4910 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call"); 4911 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end"); 4912 4913 // Check if unatexit returns a value of 0. If it does, jump to 4914 // DestructCallBlock, otherwise jump to EndBlock directly. 4915 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock); 4916 4917 CGF.EmitBlock(DestructCallBlock); 4918 4919 // Emit the call to dtorStub. 4920 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub); 4921 4922 // Make sure the call and the callee agree on calling convention. 4923 CI->setCallingConv(dtorStub->getCallingConv()); 4924 4925 CGF.EmitBlock(EndBlock); 4926 4927 CGF.FinishFunction(); 4928 4929 if (auto *IPA = D.getAttr<InitPriorityAttr>()) { 4930 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer, 4931 IPA->getPriority()); 4932 } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) || 4933 getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) { 4934 // According to C++ [basic.start.init]p2, class template static data 4935 // members (i.e., implicitly or explicitly instantiated specializations) 4936 // have unordered initialization. As a consequence, we can put them into 4937 // their own llvm.global_dtors entry. 4938 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535); 4939 } else { 4940 CGM.AddCXXStermFinalizerEntry(StermFinalizer); 4941 } 4942 } 4943