1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This provides C++ code generation targeting the Itanium C++ ABI. The class 10 // in this file generates structures that follow the Itanium C++ ABI, which is 11 // documented at: 12 // https://itanium-cxx-abi.github.io/cxx-abi/abi.html 13 // https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html 14 // 15 // It also supports the closely-related ARM ABI, documented at: 16 // https://developer.arm.com/documentation/ihi0041/g/ 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "CGCXXABI.h" 21 #include "CGCleanup.h" 22 #include "CGRecordLayout.h" 23 #include "CGVTables.h" 24 #include "CodeGenFunction.h" 25 #include "CodeGenModule.h" 26 #include "TargetInfo.h" 27 #include "clang/AST/Attr.h" 28 #include "clang/AST/Mangle.h" 29 #include "clang/AST/StmtCXX.h" 30 #include "clang/AST/Type.h" 31 #include "clang/CodeGen/ConstantInitBuilder.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/GlobalValue.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/Intrinsics.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/Support/ScopedPrinter.h" 38 39 using namespace clang; 40 using namespace CodeGen; 41 42 namespace { 43 class ItaniumCXXABI : public CodeGen::CGCXXABI { 44 /// VTables - All the vtables which have been defined. 45 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables; 46 47 /// All the thread wrapper functions that have been used. 48 llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8> 49 ThreadWrappers; 50 51 protected: 52 bool UseARMMethodPtrABI; 53 bool UseARMGuardVarABI; 54 bool Use32BitVTableOffsetABI; 55 56 ItaniumMangleContext &getMangleContext() { 57 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext()); 58 } 59 60 public: 61 ItaniumCXXABI(CodeGen::CodeGenModule &CGM, 62 bool UseARMMethodPtrABI = false, 63 bool UseARMGuardVarABI = false) : 64 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI), 65 UseARMGuardVarABI(UseARMGuardVarABI), 66 Use32BitVTableOffsetABI(false) { } 67 68 bool classifyReturnType(CGFunctionInfo &FI) const override; 69 70 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override { 71 // If C++ prohibits us from making a copy, pass by address. 72 if (!RD->canPassInRegisters()) 73 return RAA_Indirect; 74 return RAA_Default; 75 } 76 77 bool isThisCompleteObject(GlobalDecl GD) const override { 78 // The Itanium ABI has separate complete-object vs. base-object 79 // variants of both constructors and destructors. 80 if (isa<CXXDestructorDecl>(GD.getDecl())) { 81 switch (GD.getDtorType()) { 82 case Dtor_Complete: 83 case Dtor_Deleting: 84 return true; 85 86 case Dtor_Base: 87 return false; 88 89 case Dtor_Comdat: 90 llvm_unreachable("emitting dtor comdat as function?"); 91 } 92 llvm_unreachable("bad dtor kind"); 93 } 94 if (isa<CXXConstructorDecl>(GD.getDecl())) { 95 switch (GD.getCtorType()) { 96 case Ctor_Complete: 97 return true; 98 99 case Ctor_Base: 100 return false; 101 102 case Ctor_CopyingClosure: 103 case Ctor_DefaultClosure: 104 llvm_unreachable("closure ctors in Itanium ABI?"); 105 106 case Ctor_Comdat: 107 llvm_unreachable("emitting ctor comdat as function?"); 108 } 109 llvm_unreachable("bad dtor kind"); 110 } 111 112 // No other kinds. 113 return false; 114 } 115 116 bool isZeroInitializable(const MemberPointerType *MPT) override; 117 118 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override; 119 120 CGCallee 121 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, 122 const Expr *E, 123 Address This, 124 llvm::Value *&ThisPtrForCall, 125 llvm::Value *MemFnPtr, 126 const MemberPointerType *MPT) override; 127 128 llvm::Value * 129 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E, 130 Address Base, 131 llvm::Value *MemPtr, 132 const MemberPointerType *MPT) override; 133 134 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF, 135 const CastExpr *E, 136 llvm::Value *Src) override; 137 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E, 138 llvm::Constant *Src) override; 139 140 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override; 141 142 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override; 143 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT, 144 CharUnits offset) override; 145 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override; 146 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD, 147 CharUnits ThisAdjustment); 148 149 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF, 150 llvm::Value *L, llvm::Value *R, 151 const MemberPointerType *MPT, 152 bool Inequality) override; 153 154 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 155 llvm::Value *Addr, 156 const MemberPointerType *MPT) override; 157 158 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, 159 Address Ptr, QualType ElementType, 160 const CXXDestructorDecl *Dtor) override; 161 162 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override; 163 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override; 164 165 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; 166 167 llvm::CallInst * 168 emitTerminateForUnexpectedException(CodeGenFunction &CGF, 169 llvm::Value *Exn) override; 170 171 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD); 172 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override; 173 CatchTypeInfo 174 getAddrOfCXXCatchHandlerType(QualType Ty, 175 QualType CatchHandlerType) override { 176 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0}; 177 } 178 179 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override; 180 void EmitBadTypeidCall(CodeGenFunction &CGF) override; 181 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy, 182 Address ThisPtr, 183 llvm::Type *StdTypeInfoPtrTy) override; 184 185 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, 186 QualType SrcRecordTy) override; 187 188 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value, 189 QualType SrcRecordTy, QualType DestTy, 190 QualType DestRecordTy, 191 llvm::BasicBlock *CastEnd) override; 192 193 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value, 194 QualType SrcRecordTy, 195 QualType DestTy) override; 196 197 bool EmitBadCastCall(CodeGenFunction &CGF) override; 198 199 llvm::Value * 200 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This, 201 const CXXRecordDecl *ClassDecl, 202 const CXXRecordDecl *BaseClassDecl) override; 203 204 void EmitCXXConstructors(const CXXConstructorDecl *D) override; 205 206 AddedStructorArgCounts 207 buildStructorSignature(GlobalDecl GD, 208 SmallVectorImpl<CanQualType> &ArgTys) override; 209 210 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, 211 CXXDtorType DT) const override { 212 // Itanium does not emit any destructor variant as an inline thunk. 213 // Delegating may occur as an optimization, but all variants are either 214 // emitted with external linkage or as linkonce if they are inline and used. 215 return false; 216 } 217 218 void EmitCXXDestructors(const CXXDestructorDecl *D) override; 219 220 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, 221 FunctionArgList &Params) override; 222 223 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override; 224 225 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF, 226 const CXXConstructorDecl *D, 227 CXXCtorType Type, 228 bool ForVirtualBase, 229 bool Delegating) override; 230 231 llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF, 232 const CXXDestructorDecl *DD, 233 CXXDtorType Type, 234 bool ForVirtualBase, 235 bool Delegating) override; 236 237 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD, 238 CXXDtorType Type, bool ForVirtualBase, 239 bool Delegating, Address This, 240 QualType ThisTy) override; 241 242 void emitVTableDefinitions(CodeGenVTables &CGVT, 243 const CXXRecordDecl *RD) override; 244 245 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF, 246 CodeGenFunction::VPtr Vptr) override; 247 248 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override { 249 return true; 250 } 251 252 llvm::Constant * 253 getVTableAddressPoint(BaseSubobject Base, 254 const CXXRecordDecl *VTableClass) override; 255 256 llvm::Value *getVTableAddressPointInStructor( 257 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, 258 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override; 259 260 llvm::Value *getVTableAddressPointInStructorWithVTT( 261 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, 262 BaseSubobject Base, const CXXRecordDecl *NearestVBase); 263 264 llvm::Constant * 265 getVTableAddressPointForConstExpr(BaseSubobject Base, 266 const CXXRecordDecl *VTableClass) override; 267 268 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD, 269 CharUnits VPtrOffset) override; 270 271 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, 272 Address This, llvm::Type *Ty, 273 SourceLocation Loc) override; 274 275 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF, 276 const CXXDestructorDecl *Dtor, 277 CXXDtorType DtorType, Address This, 278 DeleteOrMemberCallExpr E) override; 279 280 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override; 281 282 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override; 283 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const; 284 285 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD, 286 bool ReturnAdjustment) override { 287 // Allow inlining of thunks by emitting them with available_externally 288 // linkage together with vtables when needed. 289 if (ForVTable && !Thunk->hasLocalLinkage()) 290 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage); 291 CGM.setGVProperties(Thunk, GD); 292 } 293 294 bool exportThunk() override { return true; } 295 296 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This, 297 const ThisAdjustment &TA) override; 298 299 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret, 300 const ReturnAdjustment &RA) override; 301 302 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, 303 FunctionArgList &Args) const override { 304 assert(!Args.empty() && "expected the arglist to not be empty!"); 305 return Args.size() - 1; 306 } 307 308 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; } 309 StringRef GetDeletedVirtualCallName() override 310 { return "__cxa_deleted_virtual"; } 311 312 CharUnits getArrayCookieSizeImpl(QualType elementType) override; 313 Address InitializeArrayCookie(CodeGenFunction &CGF, 314 Address NewPtr, 315 llvm::Value *NumElements, 316 const CXXNewExpr *expr, 317 QualType ElementType) override; 318 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, 319 Address allocPtr, 320 CharUnits cookieSize) override; 321 322 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, 323 llvm::GlobalVariable *DeclPtr, 324 bool PerformInit) override; 325 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 326 llvm::FunctionCallee dtor, 327 llvm::Constant *addr) override; 328 329 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD, 330 llvm::Value *Val); 331 void EmitThreadLocalInitFuncs( 332 CodeGenModule &CGM, 333 ArrayRef<const VarDecl *> CXXThreadLocals, 334 ArrayRef<llvm::Function *> CXXThreadLocalInits, 335 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override; 336 337 bool usesThreadWrapperFunction(const VarDecl *VD) const override { 338 return !isEmittedWithConstantInitializer(VD) || 339 mayNeedDestruction(VD); 340 } 341 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, 342 QualType LValType) override; 343 344 bool NeedsVTTParameter(GlobalDecl GD) override; 345 346 /**************************** RTTI Uniqueness ******************************/ 347 348 protected: 349 /// Returns true if the ABI requires RTTI type_info objects to be unique 350 /// across a program. 351 virtual bool shouldRTTIBeUnique() const { return true; } 352 353 public: 354 /// What sort of unique-RTTI behavior should we use? 355 enum RTTIUniquenessKind { 356 /// We are guaranteeing, or need to guarantee, that the RTTI string 357 /// is unique. 358 RUK_Unique, 359 360 /// We are not guaranteeing uniqueness for the RTTI string, so we 361 /// can demote to hidden visibility but must use string comparisons. 362 RUK_NonUniqueHidden, 363 364 /// We are not guaranteeing uniqueness for the RTTI string, so we 365 /// have to use string comparisons, but we also have to emit it with 366 /// non-hidden visibility. 367 RUK_NonUniqueVisible 368 }; 369 370 /// Return the required visibility status for the given type and linkage in 371 /// the current ABI. 372 RTTIUniquenessKind 373 classifyRTTIUniqueness(QualType CanTy, 374 llvm::GlobalValue::LinkageTypes Linkage) const; 375 friend class ItaniumRTTIBuilder; 376 377 void emitCXXStructor(GlobalDecl GD) override; 378 379 std::pair<llvm::Value *, const CXXRecordDecl *> 380 LoadVTablePtr(CodeGenFunction &CGF, Address This, 381 const CXXRecordDecl *RD) override; 382 383 private: 384 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const { 385 const auto &VtableLayout = 386 CGM.getItaniumVTableContext().getVTableLayout(RD); 387 388 for (const auto &VtableComponent : VtableLayout.vtable_components()) { 389 // Skip empty slot. 390 if (!VtableComponent.isUsedFunctionPointerKind()) 391 continue; 392 393 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); 394 if (!Method->getCanonicalDecl()->isInlined()) 395 continue; 396 397 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl()); 398 auto *Entry = CGM.GetGlobalValue(Name); 399 // This checks if virtual inline function has already been emitted. 400 // Note that it is possible that this inline function would be emitted 401 // after trying to emit vtable speculatively. Because of this we do 402 // an extra pass after emitting all deferred vtables to find and emit 403 // these vtables opportunistically. 404 if (!Entry || Entry->isDeclaration()) 405 return true; 406 } 407 return false; 408 } 409 410 bool isVTableHidden(const CXXRecordDecl *RD) const { 411 const auto &VtableLayout = 412 CGM.getItaniumVTableContext().getVTableLayout(RD); 413 414 for (const auto &VtableComponent : VtableLayout.vtable_components()) { 415 if (VtableComponent.isRTTIKind()) { 416 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl(); 417 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility) 418 return true; 419 } else if (VtableComponent.isUsedFunctionPointerKind()) { 420 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); 421 if (Method->getVisibility() == Visibility::HiddenVisibility && 422 !Method->isDefined()) 423 return true; 424 } 425 } 426 return false; 427 } 428 }; 429 430 class ARMCXXABI : public ItaniumCXXABI { 431 public: 432 ARMCXXABI(CodeGen::CodeGenModule &CGM) : 433 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 434 /*UseARMGuardVarABI=*/true) {} 435 436 bool HasThisReturn(GlobalDecl GD) const override { 437 return (isa<CXXConstructorDecl>(GD.getDecl()) || ( 438 isa<CXXDestructorDecl>(GD.getDecl()) && 439 GD.getDtorType() != Dtor_Deleting)); 440 } 441 442 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, 443 QualType ResTy) override; 444 445 CharUnits getArrayCookieSizeImpl(QualType elementType) override; 446 Address InitializeArrayCookie(CodeGenFunction &CGF, 447 Address NewPtr, 448 llvm::Value *NumElements, 449 const CXXNewExpr *expr, 450 QualType ElementType) override; 451 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr, 452 CharUnits cookieSize) override; 453 }; 454 455 class AppleARM64CXXABI : public ARMCXXABI { 456 public: 457 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) { 458 Use32BitVTableOffsetABI = true; 459 } 460 461 // ARM64 libraries are prepared for non-unique RTTI. 462 bool shouldRTTIBeUnique() const override { return false; } 463 }; 464 465 class FuchsiaCXXABI final : public ItaniumCXXABI { 466 public: 467 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM) 468 : ItaniumCXXABI(CGM) {} 469 470 private: 471 bool HasThisReturn(GlobalDecl GD) const override { 472 return isa<CXXConstructorDecl>(GD.getDecl()) || 473 (isa<CXXDestructorDecl>(GD.getDecl()) && 474 GD.getDtorType() != Dtor_Deleting); 475 } 476 }; 477 478 class WebAssemblyCXXABI final : public ItaniumCXXABI { 479 public: 480 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM) 481 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 482 /*UseARMGuardVarABI=*/true) {} 483 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; 484 llvm::CallInst * 485 emitTerminateForUnexpectedException(CodeGenFunction &CGF, 486 llvm::Value *Exn) override; 487 488 private: 489 bool HasThisReturn(GlobalDecl GD) const override { 490 return isa<CXXConstructorDecl>(GD.getDecl()) || 491 (isa<CXXDestructorDecl>(GD.getDecl()) && 492 GD.getDtorType() != Dtor_Deleting); 493 } 494 bool canCallMismatchedFunctionType() const override { return false; } 495 }; 496 497 class XLCXXABI final : public ItaniumCXXABI { 498 public: 499 explicit XLCXXABI(CodeGen::CodeGenModule &CGM) 500 : ItaniumCXXABI(CGM) {} 501 502 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 503 llvm::FunctionCallee dtor, 504 llvm::Constant *addr) override; 505 506 bool useSinitAndSterm() const override { return true; } 507 508 private: 509 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub, 510 llvm::Constant *addr); 511 }; 512 } 513 514 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) { 515 switch (CGM.getContext().getCXXABIKind()) { 516 // For IR-generation purposes, there's no significant difference 517 // between the ARM and iOS ABIs. 518 case TargetCXXABI::GenericARM: 519 case TargetCXXABI::iOS: 520 case TargetCXXABI::WatchOS: 521 return new ARMCXXABI(CGM); 522 523 case TargetCXXABI::AppleARM64: 524 return new AppleARM64CXXABI(CGM); 525 526 case TargetCXXABI::Fuchsia: 527 return new FuchsiaCXXABI(CGM); 528 529 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't 530 // include the other 32-bit ARM oddities: constructor/destructor return values 531 // and array cookies. 532 case TargetCXXABI::GenericAArch64: 533 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 534 /*UseARMGuardVarABI=*/true); 535 536 case TargetCXXABI::GenericMIPS: 537 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true); 538 539 case TargetCXXABI::WebAssembly: 540 return new WebAssemblyCXXABI(CGM); 541 542 case TargetCXXABI::XL: 543 return new XLCXXABI(CGM); 544 545 case TargetCXXABI::GenericItanium: 546 if (CGM.getContext().getTargetInfo().getTriple().getArch() 547 == llvm::Triple::le32) { 548 // For PNaCl, use ARM-style method pointers so that PNaCl code 549 // does not assume anything about the alignment of function 550 // pointers. 551 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true); 552 } 553 return new ItaniumCXXABI(CGM); 554 555 case TargetCXXABI::Microsoft: 556 llvm_unreachable("Microsoft ABI is not Itanium-based"); 557 } 558 llvm_unreachable("bad ABI kind"); 559 } 560 561 llvm::Type * 562 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) { 563 if (MPT->isMemberDataPointer()) 564 return CGM.PtrDiffTy; 565 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy); 566 } 567 568 /// In the Itanium and ARM ABIs, method pointers have the form: 569 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr; 570 /// 571 /// In the Itanium ABI: 572 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero 573 /// - the this-adjustment is (memptr.adj) 574 /// - the virtual offset is (memptr.ptr - 1) 575 /// 576 /// In the ARM ABI: 577 /// - method pointers are virtual if (memptr.adj & 1) is nonzero 578 /// - the this-adjustment is (memptr.adj >> 1) 579 /// - the virtual offset is (memptr.ptr) 580 /// ARM uses 'adj' for the virtual flag because Thumb functions 581 /// may be only single-byte aligned. 582 /// 583 /// If the member is virtual, the adjusted 'this' pointer points 584 /// to a vtable pointer from which the virtual offset is applied. 585 /// 586 /// If the member is non-virtual, memptr.ptr is the address of 587 /// the function to call. 588 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer( 589 CodeGenFunction &CGF, const Expr *E, Address ThisAddr, 590 llvm::Value *&ThisPtrForCall, 591 llvm::Value *MemFnPtr, const MemberPointerType *MPT) { 592 CGBuilderTy &Builder = CGF.Builder; 593 594 const FunctionProtoType *FPT = 595 MPT->getPointeeType()->getAs<FunctionProtoType>(); 596 auto *RD = 597 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl()); 598 599 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType( 600 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr)); 601 602 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1); 603 604 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual"); 605 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual"); 606 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end"); 607 608 // Extract memptr.adj, which is in the second field. 609 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj"); 610 611 // Compute the true adjustment. 612 llvm::Value *Adj = RawAdj; 613 if (UseARMMethodPtrABI) 614 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted"); 615 616 // Apply the adjustment and cast back to the original struct type 617 // for consistency. 618 llvm::Value *This = ThisAddr.getPointer(); 619 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy()); 620 Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj); 621 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted"); 622 ThisPtrForCall = This; 623 624 // Load the function pointer. 625 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr"); 626 627 // If the LSB in the function pointer is 1, the function pointer points to 628 // a virtual function. 629 llvm::Value *IsVirtual; 630 if (UseARMMethodPtrABI) 631 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1); 632 else 633 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1); 634 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual"); 635 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual); 636 637 // In the virtual path, the adjustment left 'This' pointing to the 638 // vtable of the correct base subobject. The "function pointer" is an 639 // offset within the vtable (+1 for the virtual flag on non-ARM). 640 CGF.EmitBlock(FnVirtual); 641 642 // Cast the adjusted this to a pointer to vtable pointer and load. 643 llvm::Type *VTableTy = Builder.getInt8PtrTy(); 644 CharUnits VTablePtrAlign = 645 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD, 646 CGF.getPointerAlign()); 647 llvm::Value *VTable = CGF.GetVTablePtr( 648 Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD); 649 650 // Apply the offset. 651 // On ARM64, to reserve extra space in virtual member function pointers, 652 // we only pay attention to the low 32 bits of the offset. 653 llvm::Value *VTableOffset = FnAsInt; 654 if (!UseARMMethodPtrABI) 655 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1); 656 if (Use32BitVTableOffsetABI) { 657 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty); 658 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy); 659 } 660 661 // Check the address of the function pointer if CFI on member function 662 // pointers is enabled. 663 llvm::Constant *CheckSourceLocation; 664 llvm::Constant *CheckTypeDesc; 665 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) && 666 CGM.HasHiddenLTOVisibility(RD); 667 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination && 668 CGM.HasHiddenLTOVisibility(RD); 669 bool ShouldEmitWPDInfo = 670 CGM.getCodeGenOpts().WholeProgramVTables && 671 // Don't insert type tests if we are forcing public visibility. 672 !CGM.AlwaysHasLTOVisibilityPublic(RD); 673 llvm::Value *VirtualFn = nullptr; 674 675 { 676 CodeGenFunction::SanitizerScope SanScope(&CGF); 677 llvm::Value *TypeId = nullptr; 678 llvm::Value *CheckResult = nullptr; 679 680 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) { 681 // If doing CFI, VFE or WPD, we will need the metadata node to check 682 // against. 683 llvm::Metadata *MD = 684 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0)); 685 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD); 686 } 687 688 if (ShouldEmitVFEInfo) { 689 llvm::Value *VFPAddr = 690 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset); 691 692 // If doing VFE, load from the vtable with a type.checked.load intrinsic 693 // call. Note that we use the GEP to calculate the address to load from 694 // and pass 0 as the offset to the intrinsic. This is because every 695 // vtable slot of the correct type is marked with matching metadata, and 696 // we know that the load must be from one of these slots. 697 llvm::Value *CheckedLoad = Builder.CreateCall( 698 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load), 699 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId}); 700 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1); 701 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0); 702 VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(), 703 "memptr.virtualfn"); 704 } else { 705 // When not doing VFE, emit a normal load, as it allows more 706 // optimisations than type.checked.load. 707 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) { 708 llvm::Value *VFPAddr = 709 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset); 710 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD) 711 ? llvm::Intrinsic::type_test 712 : llvm::Intrinsic::public_type_test; 713 714 CheckResult = Builder.CreateCall( 715 CGM.getIntrinsic(IID), 716 {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId}); 717 } 718 719 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 720 VirtualFn = CGF.Builder.CreateCall( 721 CGM.getIntrinsic(llvm::Intrinsic::load_relative, 722 {VTableOffset->getType()}), 723 {VTable, VTableOffset}); 724 VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo()); 725 } else { 726 llvm::Value *VFPAddr = 727 CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset); 728 VFPAddr = CGF.Builder.CreateBitCast( 729 VFPAddr, FTy->getPointerTo()->getPointerTo()); 730 VirtualFn = CGF.Builder.CreateAlignedLoad( 731 FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(), 732 "memptr.virtualfn"); 733 } 734 } 735 assert(VirtualFn && "Virtual fuction pointer not created!"); 736 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo || 737 CheckResult) && 738 "Check result required but not created!"); 739 740 if (ShouldEmitCFICheck) { 741 // If doing CFI, emit the check. 742 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc()); 743 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0)); 744 llvm::Constant *StaticData[] = { 745 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall), 746 CheckSourceLocation, 747 CheckTypeDesc, 748 }; 749 750 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) { 751 CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail); 752 } else { 753 llvm::Value *AllVtables = llvm::MetadataAsValue::get( 754 CGM.getLLVMContext(), 755 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables")); 756 llvm::Value *ValidVtable = Builder.CreateCall( 757 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables}); 758 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall), 759 SanitizerHandler::CFICheckFail, StaticData, 760 {VTable, ValidVtable}); 761 } 762 763 FnVirtual = Builder.GetInsertBlock(); 764 } 765 } // End of sanitizer scope 766 767 CGF.EmitBranch(FnEnd); 768 769 // In the non-virtual path, the function pointer is actually a 770 // function pointer. 771 CGF.EmitBlock(FnNonVirtual); 772 llvm::Value *NonVirtualFn = 773 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn"); 774 775 // Check the function pointer if CFI on member function pointers is enabled. 776 if (ShouldEmitCFICheck) { 777 CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl(); 778 if (RD->hasDefinition()) { 779 CodeGenFunction::SanitizerScope SanScope(&CGF); 780 781 llvm::Constant *StaticData[] = { 782 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall), 783 CheckSourceLocation, 784 CheckTypeDesc, 785 }; 786 787 llvm::Value *Bit = Builder.getFalse(); 788 llvm::Value *CastedNonVirtualFn = 789 Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy); 790 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) { 791 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType( 792 getContext().getMemberPointerType( 793 MPT->getPointeeType(), 794 getContext().getRecordType(Base).getTypePtr())); 795 llvm::Value *TypeId = 796 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD); 797 798 llvm::Value *TypeTest = 799 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test), 800 {CastedNonVirtualFn, TypeId}); 801 Bit = Builder.CreateOr(Bit, TypeTest); 802 } 803 804 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall), 805 SanitizerHandler::CFICheckFail, StaticData, 806 {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)}); 807 808 FnNonVirtual = Builder.GetInsertBlock(); 809 } 810 } 811 812 // We're done. 813 CGF.EmitBlock(FnEnd); 814 llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2); 815 CalleePtr->addIncoming(VirtualFn, FnVirtual); 816 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual); 817 818 CGCallee Callee(FPT, CalleePtr); 819 return Callee; 820 } 821 822 /// Compute an l-value by applying the given pointer-to-member to a 823 /// base object. 824 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress( 825 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr, 826 const MemberPointerType *MPT) { 827 assert(MemPtr->getType() == CGM.PtrDiffTy); 828 829 CGBuilderTy &Builder = CGF.Builder; 830 831 // Cast to char*. 832 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty); 833 834 // Apply the offset, which we assume is non-null. 835 llvm::Value *Addr = Builder.CreateInBoundsGEP( 836 Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset"); 837 838 // Cast the address to the appropriate pointer type, adopting the 839 // address space of the base pointer. 840 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType()) 841 ->getPointerTo(Base.getAddressSpace()); 842 return Builder.CreateBitCast(Addr, PType); 843 } 844 845 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer 846 /// conversion. 847 /// 848 /// Bitcast conversions are always a no-op under Itanium. 849 /// 850 /// Obligatory offset/adjustment diagram: 851 /// <-- offset --> <-- adjustment --> 852 /// |--------------------------|----------------------|--------------------| 853 /// ^Derived address point ^Base address point ^Member address point 854 /// 855 /// So when converting a base member pointer to a derived member pointer, 856 /// we add the offset to the adjustment because the address point has 857 /// decreased; and conversely, when converting a derived MP to a base MP 858 /// we subtract the offset from the adjustment because the address point 859 /// has increased. 860 /// 861 /// The standard forbids (at compile time) conversion to and from 862 /// virtual bases, which is why we don't have to consider them here. 863 /// 864 /// The standard forbids (at run time) casting a derived MP to a base 865 /// MP when the derived MP does not point to a member of the base. 866 /// This is why -1 is a reasonable choice for null data member 867 /// pointers. 868 llvm::Value * 869 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF, 870 const CastExpr *E, 871 llvm::Value *src) { 872 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 873 E->getCastKind() == CK_BaseToDerivedMemberPointer || 874 E->getCastKind() == CK_ReinterpretMemberPointer); 875 876 // Under Itanium, reinterprets don't require any additional processing. 877 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 878 879 // Use constant emission if we can. 880 if (isa<llvm::Constant>(src)) 881 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src)); 882 883 llvm::Constant *adj = getMemberPointerAdjustment(E); 884 if (!adj) return src; 885 886 CGBuilderTy &Builder = CGF.Builder; 887 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 888 889 const MemberPointerType *destTy = 890 E->getType()->castAs<MemberPointerType>(); 891 892 // For member data pointers, this is just a matter of adding the 893 // offset if the source is non-null. 894 if (destTy->isMemberDataPointer()) { 895 llvm::Value *dst; 896 if (isDerivedToBase) 897 dst = Builder.CreateNSWSub(src, adj, "adj"); 898 else 899 dst = Builder.CreateNSWAdd(src, adj, "adj"); 900 901 // Null check. 902 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType()); 903 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull"); 904 return Builder.CreateSelect(isNull, src, dst); 905 } 906 907 // The this-adjustment is left-shifted by 1 on ARM. 908 if (UseARMMethodPtrABI) { 909 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 910 offset <<= 1; 911 adj = llvm::ConstantInt::get(adj->getType(), offset); 912 } 913 914 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj"); 915 llvm::Value *dstAdj; 916 if (isDerivedToBase) 917 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj"); 918 else 919 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj"); 920 921 return Builder.CreateInsertValue(src, dstAdj, 1); 922 } 923 924 llvm::Constant * 925 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E, 926 llvm::Constant *src) { 927 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 928 E->getCastKind() == CK_BaseToDerivedMemberPointer || 929 E->getCastKind() == CK_ReinterpretMemberPointer); 930 931 // Under Itanium, reinterprets don't require any additional processing. 932 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 933 934 // If the adjustment is trivial, we don't need to do anything. 935 llvm::Constant *adj = getMemberPointerAdjustment(E); 936 if (!adj) return src; 937 938 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 939 940 const MemberPointerType *destTy = 941 E->getType()->castAs<MemberPointerType>(); 942 943 // For member data pointers, this is just a matter of adding the 944 // offset if the source is non-null. 945 if (destTy->isMemberDataPointer()) { 946 // null maps to null. 947 if (src->isAllOnesValue()) return src; 948 949 if (isDerivedToBase) 950 return llvm::ConstantExpr::getNSWSub(src, adj); 951 else 952 return llvm::ConstantExpr::getNSWAdd(src, adj); 953 } 954 955 // The this-adjustment is left-shifted by 1 on ARM. 956 if (UseARMMethodPtrABI) { 957 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 958 offset <<= 1; 959 adj = llvm::ConstantInt::get(adj->getType(), offset); 960 } 961 962 llvm::Constant *srcAdj = src->getAggregateElement(1); 963 llvm::Constant *dstAdj; 964 if (isDerivedToBase) 965 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj); 966 else 967 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj); 968 969 llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1); 970 assert(res != nullptr && "Folding must succeed"); 971 return res; 972 } 973 974 llvm::Constant * 975 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) { 976 // Itanium C++ ABI 2.3: 977 // A NULL pointer is represented as -1. 978 if (MPT->isMemberDataPointer()) 979 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true); 980 981 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0); 982 llvm::Constant *Values[2] = { Zero, Zero }; 983 return llvm::ConstantStruct::getAnon(Values); 984 } 985 986 llvm::Constant * 987 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT, 988 CharUnits offset) { 989 // Itanium C++ ABI 2.3: 990 // A pointer to data member is an offset from the base address of 991 // the class object containing it, represented as a ptrdiff_t 992 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()); 993 } 994 995 llvm::Constant * 996 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) { 997 return BuildMemberPointer(MD, CharUnits::Zero()); 998 } 999 1000 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, 1001 CharUnits ThisAdjustment) { 1002 assert(MD->isInstance() && "Member function must not be static!"); 1003 1004 CodeGenTypes &Types = CGM.getTypes(); 1005 1006 // Get the function pointer (or index if this is a virtual function). 1007 llvm::Constant *MemPtr[2]; 1008 if (MD->isVirtual()) { 1009 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD); 1010 uint64_t VTableOffset; 1011 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1012 // Multiply by 4-byte relative offsets. 1013 VTableOffset = Index * 4; 1014 } else { 1015 const ASTContext &Context = getContext(); 1016 CharUnits PointerWidth = Context.toCharUnitsFromBits( 1017 Context.getTargetInfo().getPointerWidth(0)); 1018 VTableOffset = Index * PointerWidth.getQuantity(); 1019 } 1020 1021 if (UseARMMethodPtrABI) { 1022 // ARM C++ ABI 3.2.1: 1023 // This ABI specifies that adj contains twice the this 1024 // adjustment, plus 1 if the member function is virtual. The 1025 // least significant bit of adj then makes exactly the same 1026 // discrimination as the least significant bit of ptr does for 1027 // Itanium. 1028 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset); 1029 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 1030 2 * ThisAdjustment.getQuantity() + 1); 1031 } else { 1032 // Itanium C++ ABI 2.3: 1033 // For a virtual function, [the pointer field] is 1 plus the 1034 // virtual table offset (in bytes) of the function, 1035 // represented as a ptrdiff_t. 1036 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1); 1037 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 1038 ThisAdjustment.getQuantity()); 1039 } 1040 } else { 1041 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 1042 llvm::Type *Ty; 1043 // Check whether the function has a computable LLVM signature. 1044 if (Types.isFuncTypeConvertible(FPT)) { 1045 // The function has a computable LLVM signature; use the correct type. 1046 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD)); 1047 } else { 1048 // Use an arbitrary non-function type to tell GetAddrOfFunction that the 1049 // function type is incomplete. 1050 Ty = CGM.PtrDiffTy; 1051 } 1052 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty); 1053 1054 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy); 1055 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 1056 (UseARMMethodPtrABI ? 2 : 1) * 1057 ThisAdjustment.getQuantity()); 1058 } 1059 1060 return llvm::ConstantStruct::getAnon(MemPtr); 1061 } 1062 1063 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP, 1064 QualType MPType) { 1065 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>(); 1066 const ValueDecl *MPD = MP.getMemberPointerDecl(); 1067 if (!MPD) 1068 return EmitNullMemberPointer(MPT); 1069 1070 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP); 1071 1072 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) 1073 return BuildMemberPointer(MD, ThisAdjustment); 1074 1075 CharUnits FieldOffset = 1076 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD)); 1077 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset); 1078 } 1079 1080 /// The comparison algorithm is pretty easy: the member pointers are 1081 /// the same if they're either bitwise identical *or* both null. 1082 /// 1083 /// ARM is different here only because null-ness is more complicated. 1084 llvm::Value * 1085 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF, 1086 llvm::Value *L, 1087 llvm::Value *R, 1088 const MemberPointerType *MPT, 1089 bool Inequality) { 1090 CGBuilderTy &Builder = CGF.Builder; 1091 1092 llvm::ICmpInst::Predicate Eq; 1093 llvm::Instruction::BinaryOps And, Or; 1094 if (Inequality) { 1095 Eq = llvm::ICmpInst::ICMP_NE; 1096 And = llvm::Instruction::Or; 1097 Or = llvm::Instruction::And; 1098 } else { 1099 Eq = llvm::ICmpInst::ICMP_EQ; 1100 And = llvm::Instruction::And; 1101 Or = llvm::Instruction::Or; 1102 } 1103 1104 // Member data pointers are easy because there's a unique null 1105 // value, so it just comes down to bitwise equality. 1106 if (MPT->isMemberDataPointer()) 1107 return Builder.CreateICmp(Eq, L, R); 1108 1109 // For member function pointers, the tautologies are more complex. 1110 // The Itanium tautology is: 1111 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj)) 1112 // The ARM tautology is: 1113 // (L == R) <==> (L.ptr == R.ptr && 1114 // (L.adj == R.adj || 1115 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0))) 1116 // The inequality tautologies have exactly the same structure, except 1117 // applying De Morgan's laws. 1118 1119 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr"); 1120 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr"); 1121 1122 // This condition tests whether L.ptr == R.ptr. This must always be 1123 // true for equality to hold. 1124 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr"); 1125 1126 // This condition, together with the assumption that L.ptr == R.ptr, 1127 // tests whether the pointers are both null. ARM imposes an extra 1128 // condition. 1129 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType()); 1130 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null"); 1131 1132 // This condition tests whether L.adj == R.adj. If this isn't 1133 // true, the pointers are unequal unless they're both null. 1134 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj"); 1135 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj"); 1136 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj"); 1137 1138 // Null member function pointers on ARM clear the low bit of Adj, 1139 // so the zero condition has to check that neither low bit is set. 1140 if (UseARMMethodPtrABI) { 1141 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1); 1142 1143 // Compute (l.adj | r.adj) & 1 and test it against zero. 1144 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj"); 1145 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One); 1146 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero, 1147 "cmp.or.adj"); 1148 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero); 1149 } 1150 1151 // Tie together all our conditions. 1152 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq); 1153 Result = Builder.CreateBinOp(And, PtrEq, Result, 1154 Inequality ? "memptr.ne" : "memptr.eq"); 1155 return Result; 1156 } 1157 1158 llvm::Value * 1159 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 1160 llvm::Value *MemPtr, 1161 const MemberPointerType *MPT) { 1162 CGBuilderTy &Builder = CGF.Builder; 1163 1164 /// For member data pointers, this is just a check against -1. 1165 if (MPT->isMemberDataPointer()) { 1166 assert(MemPtr->getType() == CGM.PtrDiffTy); 1167 llvm::Value *NegativeOne = 1168 llvm::Constant::getAllOnesValue(MemPtr->getType()); 1169 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool"); 1170 } 1171 1172 // In Itanium, a member function pointer is not null if 'ptr' is not null. 1173 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr"); 1174 1175 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0); 1176 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool"); 1177 1178 // On ARM, a member function pointer is also non-null if the low bit of 'adj' 1179 // (the virtual bit) is set. 1180 if (UseARMMethodPtrABI) { 1181 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1); 1182 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj"); 1183 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit"); 1184 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero, 1185 "memptr.isvirtual"); 1186 Result = Builder.CreateOr(Result, IsVirtual); 1187 } 1188 1189 return Result; 1190 } 1191 1192 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const { 1193 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl(); 1194 if (!RD) 1195 return false; 1196 1197 // If C++ prohibits us from making a copy, return by address. 1198 if (!RD->canPassInRegisters()) { 1199 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType()); 1200 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 1201 return true; 1202 } 1203 return false; 1204 } 1205 1206 /// The Itanium ABI requires non-zero initialization only for data 1207 /// member pointers, for which '0' is a valid offset. 1208 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) { 1209 return MPT->isMemberFunctionPointer(); 1210 } 1211 1212 /// The Itanium ABI always places an offset to the complete object 1213 /// at entry -2 in the vtable. 1214 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF, 1215 const CXXDeleteExpr *DE, 1216 Address Ptr, 1217 QualType ElementType, 1218 const CXXDestructorDecl *Dtor) { 1219 bool UseGlobalDelete = DE->isGlobalDelete(); 1220 if (UseGlobalDelete) { 1221 // Derive the complete-object pointer, which is what we need 1222 // to pass to the deallocation function. 1223 1224 // Grab the vtable pointer as an intptr_t*. 1225 auto *ClassDecl = 1226 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl()); 1227 llvm::Value *VTable = 1228 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl); 1229 1230 // Track back to entry -2 and pull out the offset there. 1231 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64( 1232 CGF.IntPtrTy, VTable, -2, "complete-offset.ptr"); 1233 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr, CGF.getPointerAlign()); 1234 1235 // Apply the offset. 1236 llvm::Value *CompletePtr = 1237 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy); 1238 CompletePtr = 1239 CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset); 1240 1241 // If we're supposed to call the global delete, make sure we do so 1242 // even if the destructor throws. 1243 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr, 1244 ElementType); 1245 } 1246 1247 // FIXME: Provide a source location here even though there's no 1248 // CXXMemberCallExpr for dtor call. 1249 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting; 1250 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE); 1251 1252 if (UseGlobalDelete) 1253 CGF.PopCleanupBlock(); 1254 } 1255 1256 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) { 1257 // void __cxa_rethrow(); 1258 1259 llvm::FunctionType *FTy = 1260 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 1261 1262 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow"); 1263 1264 if (isNoReturn) 1265 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None); 1266 else 1267 CGF.EmitRuntimeCallOrInvoke(Fn); 1268 } 1269 1270 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) { 1271 // void *__cxa_allocate_exception(size_t thrown_size); 1272 1273 llvm::FunctionType *FTy = 1274 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false); 1275 1276 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception"); 1277 } 1278 1279 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) { 1280 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo, 1281 // void (*dest) (void *)); 1282 1283 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy }; 1284 llvm::FunctionType *FTy = 1285 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false); 1286 1287 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw"); 1288 } 1289 1290 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) { 1291 QualType ThrowType = E->getSubExpr()->getType(); 1292 // Now allocate the exception object. 1293 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType()); 1294 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity(); 1295 1296 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM); 1297 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall( 1298 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception"); 1299 1300 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment(); 1301 CGF.EmitAnyExprToExn( 1302 E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign)); 1303 1304 // Now throw the exception. 1305 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType, 1306 /*ForEH=*/true); 1307 1308 // The address of the destructor. If the exception type has a 1309 // trivial destructor (or isn't a record), we just pass null. 1310 llvm::Constant *Dtor = nullptr; 1311 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) { 1312 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl()); 1313 if (!Record->hasTrivialDestructor()) { 1314 CXXDestructorDecl *DtorD = Record->getDestructor(); 1315 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete)); 1316 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy); 1317 } 1318 } 1319 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy); 1320 1321 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor }; 1322 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args); 1323 } 1324 1325 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) { 1326 // void *__dynamic_cast(const void *sub, 1327 // const abi::__class_type_info *src, 1328 // const abi::__class_type_info *dst, 1329 // std::ptrdiff_t src2dst_offset); 1330 1331 llvm::Type *Int8PtrTy = CGF.Int8PtrTy; 1332 llvm::Type *PtrDiffTy = 1333 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1334 1335 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy }; 1336 1337 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false); 1338 1339 // Mark the function as nounwind readonly. 1340 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind, 1341 llvm::Attribute::ReadOnly }; 1342 llvm::AttributeList Attrs = llvm::AttributeList::get( 1343 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs); 1344 1345 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs); 1346 } 1347 1348 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) { 1349 // void __cxa_bad_cast(); 1350 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1351 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast"); 1352 } 1353 1354 /// Compute the src2dst_offset hint as described in the 1355 /// Itanium C++ ABI [2.9.7] 1356 static CharUnits computeOffsetHint(ASTContext &Context, 1357 const CXXRecordDecl *Src, 1358 const CXXRecordDecl *Dst) { 1359 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, 1360 /*DetectVirtual=*/false); 1361 1362 // If Dst is not derived from Src we can skip the whole computation below and 1363 // return that Src is not a public base of Dst. Record all inheritance paths. 1364 if (!Dst->isDerivedFrom(Src, Paths)) 1365 return CharUnits::fromQuantity(-2ULL); 1366 1367 unsigned NumPublicPaths = 0; 1368 CharUnits Offset; 1369 1370 // Now walk all possible inheritance paths. 1371 for (const CXXBasePath &Path : Paths) { 1372 if (Path.Access != AS_public) // Ignore non-public inheritance. 1373 continue; 1374 1375 ++NumPublicPaths; 1376 1377 for (const CXXBasePathElement &PathElement : Path) { 1378 // If the path contains a virtual base class we can't give any hint. 1379 // -1: no hint. 1380 if (PathElement.Base->isVirtual()) 1381 return CharUnits::fromQuantity(-1ULL); 1382 1383 if (NumPublicPaths > 1) // Won't use offsets, skip computation. 1384 continue; 1385 1386 // Accumulate the base class offsets. 1387 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class); 1388 Offset += L.getBaseClassOffset( 1389 PathElement.Base->getType()->getAsCXXRecordDecl()); 1390 } 1391 } 1392 1393 // -2: Src is not a public base of Dst. 1394 if (NumPublicPaths == 0) 1395 return CharUnits::fromQuantity(-2ULL); 1396 1397 // -3: Src is a multiple public base type but never a virtual base type. 1398 if (NumPublicPaths > 1) 1399 return CharUnits::fromQuantity(-3ULL); 1400 1401 // Otherwise, the Src type is a unique public nonvirtual base type of Dst. 1402 // Return the offset of Src from the origin of Dst. 1403 return Offset; 1404 } 1405 1406 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) { 1407 // void __cxa_bad_typeid(); 1408 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1409 1410 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); 1411 } 1412 1413 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref, 1414 QualType SrcRecordTy) { 1415 return IsDeref; 1416 } 1417 1418 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) { 1419 llvm::FunctionCallee Fn = getBadTypeidFn(CGF); 1420 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn); 1421 Call->setDoesNotReturn(); 1422 CGF.Builder.CreateUnreachable(); 1423 } 1424 1425 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF, 1426 QualType SrcRecordTy, 1427 Address ThisPtr, 1428 llvm::Type *StdTypeInfoPtrTy) { 1429 auto *ClassDecl = 1430 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl()); 1431 llvm::Value *Value = 1432 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl); 1433 1434 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1435 // Load the type info. 1436 Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy); 1437 Value = CGF.Builder.CreateCall( 1438 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}), 1439 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)}); 1440 1441 // Setup to dereference again since this is a proxy we accessed. 1442 Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo()); 1443 } else { 1444 // Load the type info. 1445 Value = 1446 CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL); 1447 } 1448 return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value, 1449 CGF.getPointerAlign()); 1450 } 1451 1452 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, 1453 QualType SrcRecordTy) { 1454 return SrcIsPtr; 1455 } 1456 1457 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall( 1458 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy, 1459 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) { 1460 llvm::Type *PtrDiffLTy = 1461 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1462 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1463 1464 llvm::Value *SrcRTTI = 1465 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType()); 1466 llvm::Value *DestRTTI = 1467 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType()); 1468 1469 // Compute the offset hint. 1470 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); 1471 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl(); 1472 llvm::Value *OffsetHint = llvm::ConstantInt::get( 1473 PtrDiffLTy, 1474 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity()); 1475 1476 // Emit the call to __dynamic_cast. 1477 llvm::Value *Value = ThisAddr.getPointer(); 1478 Value = CGF.EmitCastToVoidPtr(Value); 1479 1480 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint}; 1481 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args); 1482 Value = CGF.Builder.CreateBitCast(Value, DestLTy); 1483 1484 /// C++ [expr.dynamic.cast]p9: 1485 /// A failed cast to reference type throws std::bad_cast 1486 if (DestTy->isReferenceType()) { 1487 llvm::BasicBlock *BadCastBlock = 1488 CGF.createBasicBlock("dynamic_cast.bad_cast"); 1489 1490 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value); 1491 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd); 1492 1493 CGF.EmitBlock(BadCastBlock); 1494 EmitBadCastCall(CGF); 1495 } 1496 1497 return Value; 1498 } 1499 1500 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, 1501 Address ThisAddr, 1502 QualType SrcRecordTy, 1503 QualType DestTy) { 1504 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1505 auto *ClassDecl = 1506 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl()); 1507 llvm::Value *OffsetToTop; 1508 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1509 // Get the vtable pointer. 1510 llvm::Value *VTable = 1511 CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl); 1512 1513 // Get the offset-to-top from the vtable. 1514 OffsetToTop = 1515 CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U); 1516 OffsetToTop = CGF.Builder.CreateAlignedLoad( 1517 CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top"); 1518 } else { 1519 llvm::Type *PtrDiffLTy = 1520 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1521 1522 // Get the vtable pointer. 1523 llvm::Value *VTable = 1524 CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl); 1525 1526 // Get the offset-to-top from the vtable. 1527 OffsetToTop = 1528 CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL); 1529 OffsetToTop = CGF.Builder.CreateAlignedLoad( 1530 PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top"); 1531 } 1532 // Finally, add the offset to the pointer. 1533 llvm::Value *Value = ThisAddr.getPointer(); 1534 Value = CGF.EmitCastToVoidPtr(Value); 1535 Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop); 1536 return CGF.Builder.CreateBitCast(Value, DestLTy); 1537 } 1538 1539 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) { 1540 llvm::FunctionCallee Fn = getBadCastFn(CGF); 1541 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn); 1542 Call->setDoesNotReturn(); 1543 CGF.Builder.CreateUnreachable(); 1544 return true; 1545 } 1546 1547 llvm::Value * 1548 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF, 1549 Address This, 1550 const CXXRecordDecl *ClassDecl, 1551 const CXXRecordDecl *BaseClassDecl) { 1552 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl); 1553 CharUnits VBaseOffsetOffset = 1554 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl, 1555 BaseClassDecl); 1556 llvm::Value *VBaseOffsetPtr = 1557 CGF.Builder.CreateConstGEP1_64( 1558 CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(), 1559 "vbase.offset.ptr"); 1560 1561 llvm::Value *VBaseOffset; 1562 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1563 VBaseOffsetPtr = 1564 CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo()); 1565 VBaseOffset = CGF.Builder.CreateAlignedLoad( 1566 CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4), 1567 "vbase.offset"); 1568 } else { 1569 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr, 1570 CGM.PtrDiffTy->getPointerTo()); 1571 VBaseOffset = CGF.Builder.CreateAlignedLoad( 1572 CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset"); 1573 } 1574 return VBaseOffset; 1575 } 1576 1577 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) { 1578 // Just make sure we're in sync with TargetCXXABI. 1579 assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); 1580 1581 // The constructor used for constructing this as a base class; 1582 // ignores virtual bases. 1583 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base)); 1584 1585 // The constructor used for constructing this as a complete class; 1586 // constructs the virtual bases, then calls the base constructor. 1587 if (!D->getParent()->isAbstract()) { 1588 // We don't need to emit the complete ctor if the class is abstract. 1589 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete)); 1590 } 1591 } 1592 1593 CGCXXABI::AddedStructorArgCounts 1594 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD, 1595 SmallVectorImpl<CanQualType> &ArgTys) { 1596 ASTContext &Context = getContext(); 1597 1598 // All parameters are already in place except VTT, which goes after 'this'. 1599 // These are Clang types, so we don't need to worry about sret yet. 1600 1601 // Check if we need to add a VTT parameter (which has type void **). 1602 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base 1603 : GD.getDtorType() == Dtor_Base) && 1604 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) { 1605 ArgTys.insert(ArgTys.begin() + 1, 1606 Context.getPointerType(Context.VoidPtrTy)); 1607 return AddedStructorArgCounts::prefix(1); 1608 } 1609 return AddedStructorArgCounts{}; 1610 } 1611 1612 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) { 1613 // The destructor used for destructing this as a base class; ignores 1614 // virtual bases. 1615 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base)); 1616 1617 // The destructor used for destructing this as a most-derived class; 1618 // call the base destructor and then destructs any virtual bases. 1619 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete)); 1620 1621 // The destructor in a virtual table is always a 'deleting' 1622 // destructor, which calls the complete destructor and then uses the 1623 // appropriate operator delete. 1624 if (D->isVirtual()) 1625 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting)); 1626 } 1627 1628 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF, 1629 QualType &ResTy, 1630 FunctionArgList &Params) { 1631 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl()); 1632 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)); 1633 1634 // Check if we need a VTT parameter as well. 1635 if (NeedsVTTParameter(CGF.CurGD)) { 1636 ASTContext &Context = getContext(); 1637 1638 // FIXME: avoid the fake decl 1639 QualType T = Context.getPointerType(Context.VoidPtrTy); 1640 auto *VTTDecl = ImplicitParamDecl::Create( 1641 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"), 1642 T, ImplicitParamDecl::CXXVTT); 1643 Params.insert(Params.begin() + 1, VTTDecl); 1644 getStructorImplicitParamDecl(CGF) = VTTDecl; 1645 } 1646 } 1647 1648 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { 1649 // Naked functions have no prolog. 1650 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>()) 1651 return; 1652 1653 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue 1654 /// adjustments are required, because they are all handled by thunks. 1655 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF)); 1656 1657 /// Initialize the 'vtt' slot if needed. 1658 if (getStructorImplicitParamDecl(CGF)) { 1659 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad( 1660 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt"); 1661 } 1662 1663 /// If this is a function that the ABI specifies returns 'this', initialize 1664 /// the return slot to 'this' at the start of the function. 1665 /// 1666 /// Unlike the setting of return types, this is done within the ABI 1667 /// implementation instead of by clients of CGCXXABI because: 1668 /// 1) getThisValue is currently protected 1669 /// 2) in theory, an ABI could implement 'this' returns some other way; 1670 /// HasThisReturn only specifies a contract, not the implementation 1671 if (HasThisReturn(CGF.CurGD)) 1672 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue); 1673 } 1674 1675 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs( 1676 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, 1677 bool ForVirtualBase, bool Delegating) { 1678 if (!NeedsVTTParameter(GlobalDecl(D, Type))) 1679 return AddedStructorArgs{}; 1680 1681 // Insert the implicit 'vtt' argument as the second argument. 1682 llvm::Value *VTT = 1683 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating); 1684 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); 1685 return AddedStructorArgs::prefix({{VTT, VTTTy}}); 1686 } 1687 1688 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam( 1689 CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, 1690 bool ForVirtualBase, bool Delegating) { 1691 GlobalDecl GD(DD, Type); 1692 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating); 1693 } 1694 1695 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF, 1696 const CXXDestructorDecl *DD, 1697 CXXDtorType Type, bool ForVirtualBase, 1698 bool Delegating, Address This, 1699 QualType ThisTy) { 1700 GlobalDecl GD(DD, Type); 1701 llvm::Value *VTT = 1702 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating); 1703 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); 1704 1705 CGCallee Callee; 1706 if (getContext().getLangOpts().AppleKext && 1707 Type != Dtor_Base && DD->isVirtual()) 1708 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent()); 1709 else 1710 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD); 1711 1712 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy, 1713 nullptr); 1714 } 1715 1716 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT, 1717 const CXXRecordDecl *RD) { 1718 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits()); 1719 if (VTable->hasInitializer()) 1720 return; 1721 1722 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext(); 1723 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD); 1724 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD); 1725 llvm::Constant *RTTI = 1726 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD)); 1727 1728 // Create and set the initializer. 1729 ConstantInitBuilder builder(CGM); 1730 auto components = builder.beginStruct(); 1731 CGVT.createVTableInitializer(components, VTLayout, RTTI, 1732 llvm::GlobalValue::isLocalLinkage(Linkage)); 1733 components.finishAndSetAsInitializer(VTable); 1734 1735 // Set the correct linkage. 1736 VTable->setLinkage(Linkage); 1737 1738 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker()) 1739 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName())); 1740 1741 // Set the right visibility. 1742 CGM.setGVProperties(VTable, RD); 1743 1744 // If this is the magic class __cxxabiv1::__fundamental_type_info, 1745 // we will emit the typeinfo for the fundamental types. This is the 1746 // same behaviour as GCC. 1747 const DeclContext *DC = RD->getDeclContext(); 1748 if (RD->getIdentifier() && 1749 RD->getIdentifier()->isStr("__fundamental_type_info") && 1750 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() && 1751 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") && 1752 DC->getParent()->isTranslationUnit()) 1753 EmitFundamentalRTTIDescriptors(RD); 1754 1755 // Always emit type metadata on non-available_externally definitions, and on 1756 // available_externally definitions if we are performing whole program 1757 // devirtualization. For WPD we need the type metadata on all vtable 1758 // definitions to ensure we associate derived classes with base classes 1759 // defined in headers but with a strong definition only in a shared library. 1760 if (!VTable->isDeclarationForLinker() || 1761 CGM.getCodeGenOpts().WholeProgramVTables) { 1762 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout); 1763 // For available_externally definitions, add the vtable to 1764 // @llvm.compiler.used so that it isn't deleted before whole program 1765 // analysis. 1766 if (VTable->isDeclarationForLinker()) { 1767 assert(CGM.getCodeGenOpts().WholeProgramVTables); 1768 CGM.addCompilerUsedGlobal(VTable); 1769 } 1770 } 1771 1772 if (VTContext.isRelativeLayout() && !VTable->isDSOLocal()) 1773 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName()); 1774 } 1775 1776 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField( 1777 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) { 1778 if (Vptr.NearestVBase == nullptr) 1779 return false; 1780 return NeedsVTTParameter(CGF.CurGD); 1781 } 1782 1783 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor( 1784 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, 1785 const CXXRecordDecl *NearestVBase) { 1786 1787 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && 1788 NeedsVTTParameter(CGF.CurGD)) { 1789 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base, 1790 NearestVBase); 1791 } 1792 return getVTableAddressPoint(Base, VTableClass); 1793 } 1794 1795 llvm::Constant * 1796 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, 1797 const CXXRecordDecl *VTableClass) { 1798 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits()); 1799 1800 // Find the appropriate vtable within the vtable group, and the address point 1801 // within that vtable. 1802 VTableLayout::AddressPointLocation AddressPoint = 1803 CGM.getItaniumVTableContext() 1804 .getVTableLayout(VTableClass) 1805 .getAddressPoint(Base); 1806 llvm::Value *Indices[] = { 1807 llvm::ConstantInt::get(CGM.Int32Ty, 0), 1808 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex), 1809 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex), 1810 }; 1811 1812 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable, 1813 Indices, /*InBounds=*/true, 1814 /*InRangeIndex=*/1); 1815 } 1816 1817 // Check whether all the non-inline virtual methods for the class have the 1818 // specified attribute. 1819 template <typename T> 1820 static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) { 1821 bool FoundNonInlineVirtualMethodWithAttr = false; 1822 for (const auto *D : RD->noload_decls()) { 1823 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 1824 if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() || 1825 FD->doesThisDeclarationHaveABody()) 1826 continue; 1827 if (!D->hasAttr<T>()) 1828 return false; 1829 FoundNonInlineVirtualMethodWithAttr = true; 1830 } 1831 } 1832 1833 // We didn't find any non-inline virtual methods missing the attribute. We 1834 // will return true when we found at least one non-inline virtual with the 1835 // attribute. (This lets our caller know that the attribute needs to be 1836 // propagated up to the vtable.) 1837 return FoundNonInlineVirtualMethodWithAttr; 1838 } 1839 1840 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT( 1841 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, 1842 const CXXRecordDecl *NearestVBase) { 1843 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && 1844 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT"); 1845 1846 // Get the secondary vpointer index. 1847 uint64_t VirtualPointerIndex = 1848 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base); 1849 1850 /// Load the VTT. 1851 llvm::Value *VTT = CGF.LoadCXXVTT(); 1852 if (VirtualPointerIndex) 1853 VTT = CGF.Builder.CreateConstInBoundsGEP1_64( 1854 CGF.VoidPtrTy, VTT, VirtualPointerIndex); 1855 1856 // And load the address point from the VTT. 1857 return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT, 1858 CGF.getPointerAlign()); 1859 } 1860 1861 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr( 1862 BaseSubobject Base, const CXXRecordDecl *VTableClass) { 1863 return getVTableAddressPoint(Base, VTableClass); 1864 } 1865 1866 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, 1867 CharUnits VPtrOffset) { 1868 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets"); 1869 1870 llvm::GlobalVariable *&VTable = VTables[RD]; 1871 if (VTable) 1872 return VTable; 1873 1874 // Queue up this vtable for possible deferred emission. 1875 CGM.addDeferredVTable(RD); 1876 1877 SmallString<256> Name; 1878 llvm::raw_svector_ostream Out(Name); 1879 getMangleContext().mangleCXXVTable(RD, Out); 1880 1881 const VTableLayout &VTLayout = 1882 CGM.getItaniumVTableContext().getVTableLayout(RD); 1883 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout); 1884 1885 // Use pointer alignment for the vtable. Otherwise we would align them based 1886 // on the size of the initializer which doesn't make sense as only single 1887 // values are read. 1888 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout() 1889 ? 32 1890 : CGM.getTarget().getPointerAlign(0); 1891 1892 VTable = CGM.CreateOrReplaceCXXRuntimeVariable( 1893 Name, VTableType, llvm::GlobalValue::ExternalLinkage, 1894 getContext().toCharUnitsFromBits(PAlign).getQuantity()); 1895 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 1896 1897 // In MS C++ if you have a class with virtual functions in which you are using 1898 // selective member import/export, then all virtual functions must be exported 1899 // unless they are inline, otherwise a link error will result. To match this 1900 // behavior, for such classes, we dllimport the vtable if it is defined 1901 // externally and all the non-inline virtual methods are marked dllimport, and 1902 // we dllexport the vtable if it is defined in this TU and all the non-inline 1903 // virtual methods are marked dllexport. 1904 if (CGM.getTarget().hasPS4DLLImportExport()) { 1905 if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) { 1906 if (CGM.getVTables().isVTableExternal(RD)) { 1907 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) 1908 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); 1909 } else { 1910 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) 1911 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); 1912 } 1913 } 1914 } 1915 CGM.setGVProperties(VTable, RD); 1916 1917 return VTable; 1918 } 1919 1920 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF, 1921 GlobalDecl GD, 1922 Address This, 1923 llvm::Type *Ty, 1924 SourceLocation Loc) { 1925 llvm::Type *TyPtr = Ty->getPointerTo(); 1926 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl()); 1927 llvm::Value *VTable = CGF.GetVTablePtr( 1928 This, TyPtr->getPointerTo(), MethodDecl->getParent()); 1929 1930 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD); 1931 llvm::Value *VFunc; 1932 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) { 1933 VFunc = CGF.EmitVTableTypeCheckedLoad( 1934 MethodDecl->getParent(), VTable, TyPtr, 1935 VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8); 1936 } else { 1937 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc); 1938 1939 llvm::Value *VFuncLoad; 1940 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1941 VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy); 1942 llvm::Value *Load = CGF.Builder.CreateCall( 1943 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}), 1944 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)}); 1945 VFuncLoad = CGF.Builder.CreateBitCast(Load, TyPtr); 1946 } else { 1947 VTable = 1948 CGF.Builder.CreateBitCast(VTable, TyPtr->getPointerTo()); 1949 llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64( 1950 TyPtr, VTable, VTableIndex, "vfn"); 1951 VFuncLoad = 1952 CGF.Builder.CreateAlignedLoad(TyPtr, VTableSlotPtr, 1953 CGF.getPointerAlign()); 1954 } 1955 1956 // Add !invariant.load md to virtual function load to indicate that 1957 // function didn't change inside vtable. 1958 // It's safe to add it without -fstrict-vtable-pointers, but it would not 1959 // help in devirtualization because it will only matter if we will have 2 1960 // the same virtual function loads from the same vtable load, which won't 1961 // happen without enabled devirtualization with -fstrict-vtable-pointers. 1962 if (CGM.getCodeGenOpts().OptimizationLevel > 0 && 1963 CGM.getCodeGenOpts().StrictVTablePointers) { 1964 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) { 1965 VFuncLoadInstr->setMetadata( 1966 llvm::LLVMContext::MD_invariant_load, 1967 llvm::MDNode::get(CGM.getLLVMContext(), 1968 llvm::ArrayRef<llvm::Metadata *>())); 1969 } 1970 } 1971 VFunc = VFuncLoad; 1972 } 1973 1974 CGCallee Callee(GD, VFunc); 1975 return Callee; 1976 } 1977 1978 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall( 1979 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType, 1980 Address This, DeleteOrMemberCallExpr E) { 1981 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>(); 1982 auto *D = E.dyn_cast<const CXXDeleteExpr *>(); 1983 assert((CE != nullptr) ^ (D != nullptr)); 1984 assert(CE == nullptr || CE->arg_begin() == CE->arg_end()); 1985 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete); 1986 1987 GlobalDecl GD(Dtor, DtorType); 1988 const CGFunctionInfo *FInfo = 1989 &CGM.getTypes().arrangeCXXStructorDeclaration(GD); 1990 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo); 1991 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty); 1992 1993 QualType ThisTy; 1994 if (CE) { 1995 ThisTy = CE->getObjectType(); 1996 } else { 1997 ThisTy = D->getDestroyedType(); 1998 } 1999 2000 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr, 2001 QualType(), nullptr); 2002 return nullptr; 2003 } 2004 2005 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) { 2006 CodeGenVTables &VTables = CGM.getVTables(); 2007 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD); 2008 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD); 2009 } 2010 2011 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass( 2012 const CXXRecordDecl *RD) const { 2013 // We don't emit available_externally vtables if we are in -fapple-kext mode 2014 // because kext mode does not permit devirtualization. 2015 if (CGM.getLangOpts().AppleKext) 2016 return false; 2017 2018 // If the vtable is hidden then it is not safe to emit an available_externally 2019 // copy of vtable. 2020 if (isVTableHidden(RD)) 2021 return false; 2022 2023 if (CGM.getCodeGenOpts().ForceEmitVTables) 2024 return true; 2025 2026 // If we don't have any not emitted inline virtual function then we are safe 2027 // to emit an available_externally copy of vtable. 2028 // FIXME we can still emit a copy of the vtable if we 2029 // can emit definition of the inline functions. 2030 if (hasAnyUnusedVirtualInlineFunction(RD)) 2031 return false; 2032 2033 // For a class with virtual bases, we must also be able to speculatively 2034 // emit the VTT, because CodeGen doesn't have separate notions of "can emit 2035 // the vtable" and "can emit the VTT". For a base subobject, this means we 2036 // need to be able to emit non-virtual base vtables. 2037 if (RD->getNumVBases()) { 2038 for (const auto &B : RD->bases()) { 2039 auto *BRD = B.getType()->getAsCXXRecordDecl(); 2040 assert(BRD && "no class for base specifier"); 2041 if (B.isVirtual() || !BRD->isDynamicClass()) 2042 continue; 2043 if (!canSpeculativelyEmitVTableAsBaseClass(BRD)) 2044 return false; 2045 } 2046 } 2047 2048 return true; 2049 } 2050 2051 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const { 2052 if (!canSpeculativelyEmitVTableAsBaseClass(RD)) 2053 return false; 2054 2055 // For a complete-object vtable (or more specifically, for the VTT), we need 2056 // to be able to speculatively emit the vtables of all dynamic virtual bases. 2057 for (const auto &B : RD->vbases()) { 2058 auto *BRD = B.getType()->getAsCXXRecordDecl(); 2059 assert(BRD && "no class for base specifier"); 2060 if (!BRD->isDynamicClass()) 2061 continue; 2062 if (!canSpeculativelyEmitVTableAsBaseClass(BRD)) 2063 return false; 2064 } 2065 2066 return true; 2067 } 2068 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF, 2069 Address InitialPtr, 2070 int64_t NonVirtualAdjustment, 2071 int64_t VirtualAdjustment, 2072 bool IsReturnAdjustment) { 2073 if (!NonVirtualAdjustment && !VirtualAdjustment) 2074 return InitialPtr.getPointer(); 2075 2076 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty); 2077 2078 // In a base-to-derived cast, the non-virtual adjustment is applied first. 2079 if (NonVirtualAdjustment && !IsReturnAdjustment) { 2080 V = CGF.Builder.CreateConstInBoundsByteGEP(V, 2081 CharUnits::fromQuantity(NonVirtualAdjustment)); 2082 } 2083 2084 // Perform the virtual adjustment if we have one. 2085 llvm::Value *ResultPtr; 2086 if (VirtualAdjustment) { 2087 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy); 2088 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr); 2089 2090 llvm::Value *Offset; 2091 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64( 2092 CGF.Int8Ty, VTablePtr, VirtualAdjustment); 2093 if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) { 2094 // Load the adjustment offset from the vtable as a 32-bit int. 2095 OffsetPtr = 2096 CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo()); 2097 Offset = 2098 CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr, 2099 CharUnits::fromQuantity(4)); 2100 } else { 2101 llvm::Type *PtrDiffTy = 2102 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 2103 2104 OffsetPtr = 2105 CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo()); 2106 2107 // Load the adjustment offset from the vtable. 2108 Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr, 2109 CGF.getPointerAlign()); 2110 } 2111 // Adjust our pointer. 2112 ResultPtr = CGF.Builder.CreateInBoundsGEP( 2113 V.getElementType(), V.getPointer(), Offset); 2114 } else { 2115 ResultPtr = V.getPointer(); 2116 } 2117 2118 // In a derived-to-base conversion, the non-virtual adjustment is 2119 // applied second. 2120 if (NonVirtualAdjustment && IsReturnAdjustment) { 2121 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr, 2122 NonVirtualAdjustment); 2123 } 2124 2125 // Cast back to the original type. 2126 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType()); 2127 } 2128 2129 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF, 2130 Address This, 2131 const ThisAdjustment &TA) { 2132 return performTypeAdjustment(CGF, This, TA.NonVirtual, 2133 TA.Virtual.Itanium.VCallOffsetOffset, 2134 /*IsReturnAdjustment=*/false); 2135 } 2136 2137 llvm::Value * 2138 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret, 2139 const ReturnAdjustment &RA) { 2140 return performTypeAdjustment(CGF, Ret, RA.NonVirtual, 2141 RA.Virtual.Itanium.VBaseOffsetOffset, 2142 /*IsReturnAdjustment=*/true); 2143 } 2144 2145 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF, 2146 RValue RV, QualType ResultType) { 2147 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl())) 2148 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType); 2149 2150 // Destructor thunks in the ARM ABI have indeterminate results. 2151 llvm::Type *T = CGF.ReturnValue.getElementType(); 2152 RValue Undef = RValue::get(llvm::UndefValue::get(T)); 2153 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType); 2154 } 2155 2156 /************************** Array allocation cookies **************************/ 2157 2158 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) { 2159 // The array cookie is a size_t; pad that up to the element alignment. 2160 // The cookie is actually right-justified in that space. 2161 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes), 2162 CGM.getContext().getPreferredTypeAlignInChars(elementType)); 2163 } 2164 2165 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 2166 Address NewPtr, 2167 llvm::Value *NumElements, 2168 const CXXNewExpr *expr, 2169 QualType ElementType) { 2170 assert(requiresArrayCookie(expr)); 2171 2172 unsigned AS = NewPtr.getAddressSpace(); 2173 2174 ASTContext &Ctx = getContext(); 2175 CharUnits SizeSize = CGF.getSizeSize(); 2176 2177 // The size of the cookie. 2178 CharUnits CookieSize = 2179 std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType)); 2180 assert(CookieSize == getArrayCookieSizeImpl(ElementType)); 2181 2182 // Compute an offset to the cookie. 2183 Address CookiePtr = NewPtr; 2184 CharUnits CookieOffset = CookieSize - SizeSize; 2185 if (!CookieOffset.isZero()) 2186 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset); 2187 2188 // Write the number of elements into the appropriate slot. 2189 Address NumElementsPtr = 2190 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy); 2191 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr); 2192 2193 // Handle the array cookie specially in ASan. 2194 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 && 2195 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() || 2196 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) { 2197 // The store to the CookiePtr does not need to be instrumented. 2198 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI); 2199 llvm::FunctionType *FTy = 2200 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false); 2201 llvm::FunctionCallee F = 2202 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie"); 2203 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer()); 2204 } 2205 2206 // Finally, compute a pointer to the actual data buffer by skipping 2207 // over the cookie completely. 2208 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize); 2209 } 2210 2211 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 2212 Address allocPtr, 2213 CharUnits cookieSize) { 2214 // The element size is right-justified in the cookie. 2215 Address numElementsPtr = allocPtr; 2216 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize(); 2217 if (!numElementsOffset.isZero()) 2218 numElementsPtr = 2219 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset); 2220 2221 unsigned AS = allocPtr.getAddressSpace(); 2222 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy); 2223 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0) 2224 return CGF.Builder.CreateLoad(numElementsPtr); 2225 // In asan mode emit a function call instead of a regular load and let the 2226 // run-time deal with it: if the shadow is properly poisoned return the 2227 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs. 2228 // We can't simply ignore this load using nosanitize metadata because 2229 // the metadata may be lost. 2230 llvm::FunctionType *FTy = 2231 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false); 2232 llvm::FunctionCallee F = 2233 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie"); 2234 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer()); 2235 } 2236 2237 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) { 2238 // ARM says that the cookie is always: 2239 // struct array_cookie { 2240 // std::size_t element_size; // element_size != 0 2241 // std::size_t element_count; 2242 // }; 2243 // But the base ABI doesn't give anything an alignment greater than 2244 // 8, so we can dismiss this as typical ABI-author blindness to 2245 // actual language complexity and round up to the element alignment. 2246 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes), 2247 CGM.getContext().getTypeAlignInChars(elementType)); 2248 } 2249 2250 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 2251 Address newPtr, 2252 llvm::Value *numElements, 2253 const CXXNewExpr *expr, 2254 QualType elementType) { 2255 assert(requiresArrayCookie(expr)); 2256 2257 // The cookie is always at the start of the buffer. 2258 Address cookie = newPtr; 2259 2260 // The first element is the element size. 2261 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy); 2262 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy, 2263 getContext().getTypeSizeInChars(elementType).getQuantity()); 2264 CGF.Builder.CreateStore(elementSize, cookie); 2265 2266 // The second element is the element count. 2267 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1); 2268 CGF.Builder.CreateStore(numElements, cookie); 2269 2270 // Finally, compute a pointer to the actual data buffer by skipping 2271 // over the cookie completely. 2272 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType); 2273 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize); 2274 } 2275 2276 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 2277 Address allocPtr, 2278 CharUnits cookieSize) { 2279 // The number of elements is at offset sizeof(size_t) relative to 2280 // the allocated pointer. 2281 Address numElementsPtr 2282 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize()); 2283 2284 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy); 2285 return CGF.Builder.CreateLoad(numElementsPtr); 2286 } 2287 2288 /*********************** Static local initialization **************************/ 2289 2290 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM, 2291 llvm::PointerType *GuardPtrTy) { 2292 // int __cxa_guard_acquire(__guard *guard_object); 2293 llvm::FunctionType *FTy = 2294 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy), 2295 GuardPtrTy, /*isVarArg=*/false); 2296 return CGM.CreateRuntimeFunction( 2297 FTy, "__cxa_guard_acquire", 2298 llvm::AttributeList::get(CGM.getLLVMContext(), 2299 llvm::AttributeList::FunctionIndex, 2300 llvm::Attribute::NoUnwind)); 2301 } 2302 2303 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM, 2304 llvm::PointerType *GuardPtrTy) { 2305 // void __cxa_guard_release(__guard *guard_object); 2306 llvm::FunctionType *FTy = 2307 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 2308 return CGM.CreateRuntimeFunction( 2309 FTy, "__cxa_guard_release", 2310 llvm::AttributeList::get(CGM.getLLVMContext(), 2311 llvm::AttributeList::FunctionIndex, 2312 llvm::Attribute::NoUnwind)); 2313 } 2314 2315 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM, 2316 llvm::PointerType *GuardPtrTy) { 2317 // void __cxa_guard_abort(__guard *guard_object); 2318 llvm::FunctionType *FTy = 2319 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 2320 return CGM.CreateRuntimeFunction( 2321 FTy, "__cxa_guard_abort", 2322 llvm::AttributeList::get(CGM.getLLVMContext(), 2323 llvm::AttributeList::FunctionIndex, 2324 llvm::Attribute::NoUnwind)); 2325 } 2326 2327 namespace { 2328 struct CallGuardAbort final : EHScopeStack::Cleanup { 2329 llvm::GlobalVariable *Guard; 2330 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {} 2331 2332 void Emit(CodeGenFunction &CGF, Flags flags) override { 2333 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()), 2334 Guard); 2335 } 2336 }; 2337 } 2338 2339 /// The ARM code here follows the Itanium code closely enough that we 2340 /// just special-case it at particular places. 2341 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, 2342 const VarDecl &D, 2343 llvm::GlobalVariable *var, 2344 bool shouldPerformInit) { 2345 CGBuilderTy &Builder = CGF.Builder; 2346 2347 // Inline variables that weren't instantiated from variable templates have 2348 // partially-ordered initialization within their translation unit. 2349 bool NonTemplateInline = 2350 D.isInline() && 2351 !isTemplateInstantiation(D.getTemplateSpecializationKind()); 2352 2353 // We only need to use thread-safe statics for local non-TLS variables and 2354 // inline variables; other global initialization is always single-threaded 2355 // or (through lazy dynamic loading in multiple threads) unsequenced. 2356 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics && 2357 (D.isLocalVarDecl() || NonTemplateInline) && 2358 !D.getTLSKind(); 2359 2360 // If we have a global variable with internal linkage and thread-safe statics 2361 // are disabled, we can just let the guard variable be of type i8. 2362 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage(); 2363 2364 llvm::IntegerType *guardTy; 2365 CharUnits guardAlignment; 2366 if (useInt8GuardVariable) { 2367 guardTy = CGF.Int8Ty; 2368 guardAlignment = CharUnits::One(); 2369 } else { 2370 // Guard variables are 64 bits in the generic ABI and size width on ARM 2371 // (i.e. 32-bit on AArch32, 64-bit on AArch64). 2372 if (UseARMGuardVarABI) { 2373 guardTy = CGF.SizeTy; 2374 guardAlignment = CGF.getSizeAlign(); 2375 } else { 2376 guardTy = CGF.Int64Ty; 2377 guardAlignment = CharUnits::fromQuantity( 2378 CGM.getDataLayout().getABITypeAlignment(guardTy)); 2379 } 2380 } 2381 llvm::PointerType *guardPtrTy = guardTy->getPointerTo( 2382 CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace()); 2383 2384 // Create the guard variable if we don't already have it (as we 2385 // might if we're double-emitting this function body). 2386 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D); 2387 if (!guard) { 2388 // Mangle the name for the guard. 2389 SmallString<256> guardName; 2390 { 2391 llvm::raw_svector_ostream out(guardName); 2392 getMangleContext().mangleStaticGuardVariable(&D, out); 2393 } 2394 2395 // Create the guard variable with a zero-initializer. 2396 // Just absorb linkage and visibility from the guarded variable. 2397 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy, 2398 false, var->getLinkage(), 2399 llvm::ConstantInt::get(guardTy, 0), 2400 guardName.str()); 2401 guard->setDSOLocal(var->isDSOLocal()); 2402 guard->setVisibility(var->getVisibility()); 2403 // If the variable is thread-local, so is its guard variable. 2404 guard->setThreadLocalMode(var->getThreadLocalMode()); 2405 guard->setAlignment(guardAlignment.getAsAlign()); 2406 2407 // The ABI says: "It is suggested that it be emitted in the same COMDAT 2408 // group as the associated data object." In practice, this doesn't work for 2409 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm. 2410 llvm::Comdat *C = var->getComdat(); 2411 if (!D.isLocalVarDecl() && C && 2412 (CGM.getTarget().getTriple().isOSBinFormatELF() || 2413 CGM.getTarget().getTriple().isOSBinFormatWasm())) { 2414 guard->setComdat(C); 2415 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) { 2416 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName())); 2417 } 2418 2419 CGM.setStaticLocalDeclGuardAddress(&D, guard); 2420 } 2421 2422 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment); 2423 2424 // Test whether the variable has completed initialization. 2425 // 2426 // Itanium C++ ABI 3.3.2: 2427 // The following is pseudo-code showing how these functions can be used: 2428 // if (obj_guard.first_byte == 0) { 2429 // if ( __cxa_guard_acquire (&obj_guard) ) { 2430 // try { 2431 // ... initialize the object ...; 2432 // } catch (...) { 2433 // __cxa_guard_abort (&obj_guard); 2434 // throw; 2435 // } 2436 // ... queue object destructor with __cxa_atexit() ...; 2437 // __cxa_guard_release (&obj_guard); 2438 // } 2439 // } 2440 2441 // Load the first byte of the guard variable. 2442 llvm::LoadInst *LI = 2443 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty)); 2444 2445 // Itanium ABI: 2446 // An implementation supporting thread-safety on multiprocessor 2447 // systems must also guarantee that references to the initialized 2448 // object do not occur before the load of the initialization flag. 2449 // 2450 // In LLVM, we do this by marking the load Acquire. 2451 if (threadsafe) 2452 LI->setAtomic(llvm::AtomicOrdering::Acquire); 2453 2454 // For ARM, we should only check the first bit, rather than the entire byte: 2455 // 2456 // ARM C++ ABI 3.2.3.1: 2457 // To support the potential use of initialization guard variables 2458 // as semaphores that are the target of ARM SWP and LDREX/STREX 2459 // synchronizing instructions we define a static initialization 2460 // guard variable to be a 4-byte aligned, 4-byte word with the 2461 // following inline access protocol. 2462 // #define INITIALIZED 1 2463 // if ((obj_guard & INITIALIZED) != INITIALIZED) { 2464 // if (__cxa_guard_acquire(&obj_guard)) 2465 // ... 2466 // } 2467 // 2468 // and similarly for ARM64: 2469 // 2470 // ARM64 C++ ABI 3.2.2: 2471 // This ABI instead only specifies the value bit 0 of the static guard 2472 // variable; all other bits are platform defined. Bit 0 shall be 0 when the 2473 // variable is not initialized and 1 when it is. 2474 llvm::Value *V = 2475 (UseARMGuardVarABI && !useInt8GuardVariable) 2476 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1)) 2477 : LI; 2478 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized"); 2479 2480 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check"); 2481 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); 2482 2483 // Check if the first byte of the guard variable is zero. 2484 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock, 2485 CodeGenFunction::GuardKind::VariableGuard, &D); 2486 2487 CGF.EmitBlock(InitCheckBlock); 2488 2489 // Variables used when coping with thread-safe statics and exceptions. 2490 if (threadsafe) { 2491 // Call __cxa_guard_acquire. 2492 llvm::Value *V 2493 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard); 2494 2495 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); 2496 2497 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"), 2498 InitBlock, EndBlock); 2499 2500 // Call __cxa_guard_abort along the exceptional edge. 2501 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard); 2502 2503 CGF.EmitBlock(InitBlock); 2504 } 2505 2506 // Emit the initializer and add a global destructor if appropriate. 2507 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit); 2508 2509 if (threadsafe) { 2510 // Pop the guard-abort cleanup if we pushed one. 2511 CGF.PopCleanupBlock(); 2512 2513 // Call __cxa_guard_release. This cannot throw. 2514 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy), 2515 guardAddr.getPointer()); 2516 } else { 2517 // Store 1 into the first byte of the guard variable after initialization is 2518 // complete. 2519 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1), 2520 Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty)); 2521 } 2522 2523 CGF.EmitBlock(EndBlock); 2524 } 2525 2526 /// Register a global destructor using __cxa_atexit. 2527 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, 2528 llvm::FunctionCallee dtor, 2529 llvm::Constant *addr, bool TLS) { 2530 assert(!CGF.getTarget().getTriple().isOSAIX() && 2531 "unexpected call to emitGlobalDtorWithCXAAtExit"); 2532 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) && 2533 "__cxa_atexit is disabled"); 2534 const char *Name = "__cxa_atexit"; 2535 if (TLS) { 2536 const llvm::Triple &T = CGF.getTarget().getTriple(); 2537 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit"; 2538 } 2539 2540 // We're assuming that the destructor function is something we can 2541 // reasonably call with the default CC. Go ahead and cast it to the 2542 // right prototype. 2543 llvm::Type *dtorTy = 2544 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo(); 2545 2546 // Preserve address space of addr. 2547 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0; 2548 auto AddrInt8PtrTy = 2549 AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy; 2550 2551 // Create a variable that binds the atexit to this shared object. 2552 llvm::Constant *handle = 2553 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle"); 2554 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts()); 2555 GV->setVisibility(llvm::GlobalValue::HiddenVisibility); 2556 2557 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); 2558 llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()}; 2559 llvm::FunctionType *atexitTy = 2560 llvm::FunctionType::get(CGF.IntTy, paramTys, false); 2561 2562 // Fetch the actual function. 2563 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name); 2564 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee())) 2565 fn->setDoesNotThrow(); 2566 2567 if (!addr) 2568 // addr is null when we are trying to register a dtor annotated with 2569 // __attribute__((destructor)) in a constructor function. Using null here is 2570 // okay because this argument is just passed back to the destructor 2571 // function. 2572 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy); 2573 2574 llvm::Value *args[] = {llvm::ConstantExpr::getBitCast( 2575 cast<llvm::Constant>(dtor.getCallee()), dtorTy), 2576 llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy), 2577 handle}; 2578 CGF.EmitNounwindRuntimeCall(atexit, args); 2579 } 2580 2581 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM, 2582 StringRef FnName) { 2583 // Create a function that registers/unregisters destructors that have the same 2584 // priority. 2585 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false); 2586 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction( 2587 FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation()); 2588 2589 return GlobalInitOrCleanupFn; 2590 } 2591 2592 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() { 2593 for (const auto &I : DtorsUsingAtExit) { 2594 int Priority = I.first; 2595 std::string GlobalCleanupFnName = 2596 std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority); 2597 2598 llvm::Function *GlobalCleanupFn = 2599 createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName); 2600 2601 CodeGenFunction CGF(*this); 2602 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn, 2603 getTypes().arrangeNullaryFunction(), FunctionArgList(), 2604 SourceLocation(), SourceLocation()); 2605 auto AL = ApplyDebugLocation::CreateArtificial(CGF); 2606 2607 // Get the destructor function type, void(*)(void). 2608 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false); 2609 llvm::Type *dtorTy = dtorFuncTy->getPointerTo(); 2610 2611 // Destructor functions are run/unregistered in non-ascending 2612 // order of their priorities. 2613 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second; 2614 auto itv = Dtors.rbegin(); 2615 while (itv != Dtors.rend()) { 2616 llvm::Function *Dtor = *itv; 2617 2618 // We're assuming that the destructor function is something we can 2619 // reasonably call with the correct CC. Go ahead and cast it to the 2620 // right prototype. 2621 llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy); 2622 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor); 2623 llvm::Value *NeedsDestruct = 2624 CGF.Builder.CreateIsNull(V, "needs_destruct"); 2625 2626 llvm::BasicBlock *DestructCallBlock = 2627 CGF.createBasicBlock("destruct.call"); 2628 llvm::BasicBlock *EndBlock = CGF.createBasicBlock( 2629 (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end"); 2630 // Check if unatexit returns a value of 0. If it does, jump to 2631 // DestructCallBlock, otherwise jump to EndBlock directly. 2632 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock); 2633 2634 CGF.EmitBlock(DestructCallBlock); 2635 2636 // Emit the call to casted Dtor. 2637 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor); 2638 // Make sure the call and the callee agree on calling convention. 2639 CI->setCallingConv(Dtor->getCallingConv()); 2640 2641 CGF.EmitBlock(EndBlock); 2642 2643 itv++; 2644 } 2645 2646 CGF.FinishFunction(); 2647 AddGlobalDtor(GlobalCleanupFn, Priority); 2648 } 2649 } 2650 2651 void CodeGenModule::registerGlobalDtorsWithAtExit() { 2652 for (const auto &I : DtorsUsingAtExit) { 2653 int Priority = I.first; 2654 std::string GlobalInitFnName = 2655 std::string("__GLOBAL_init_") + llvm::to_string(Priority); 2656 llvm::Function *GlobalInitFn = 2657 createGlobalInitOrCleanupFn(*this, GlobalInitFnName); 2658 2659 CodeGenFunction CGF(*this); 2660 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn, 2661 getTypes().arrangeNullaryFunction(), FunctionArgList(), 2662 SourceLocation(), SourceLocation()); 2663 auto AL = ApplyDebugLocation::CreateArtificial(CGF); 2664 2665 // Since constructor functions are run in non-descending order of their 2666 // priorities, destructors are registered in non-descending order of their 2667 // priorities, and since destructor functions are run in the reverse order 2668 // of their registration, destructor functions are run in non-ascending 2669 // order of their priorities. 2670 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second; 2671 for (auto *Dtor : Dtors) { 2672 // Register the destructor function calling __cxa_atexit if it is 2673 // available. Otherwise fall back on calling atexit. 2674 if (getCodeGenOpts().CXAAtExit) { 2675 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false); 2676 } else { 2677 // Get the destructor function type, void(*)(void). 2678 llvm::Type *dtorTy = 2679 llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo(); 2680 2681 // We're assuming that the destructor function is something we can 2682 // reasonably call with the correct CC. Go ahead and cast it to the 2683 // right prototype. 2684 CGF.registerGlobalDtorWithAtExit( 2685 llvm::ConstantExpr::getBitCast(Dtor, dtorTy)); 2686 } 2687 } 2688 2689 CGF.FinishFunction(); 2690 AddGlobalCtor(GlobalInitFn, Priority, nullptr); 2691 } 2692 2693 if (getCXXABI().useSinitAndSterm()) 2694 unregisterGlobalDtorsWithUnAtExit(); 2695 } 2696 2697 /// Register a global destructor as best as we know how. 2698 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 2699 llvm::FunctionCallee dtor, 2700 llvm::Constant *addr) { 2701 if (D.isNoDestroy(CGM.getContext())) 2702 return; 2703 2704 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit 2705 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage 2706 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled. 2707 // We can always use __cxa_thread_atexit. 2708 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind()) 2709 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind()); 2710 2711 // In Apple kexts, we want to add a global destructor entry. 2712 // FIXME: shouldn't this be guarded by some variable? 2713 if (CGM.getLangOpts().AppleKext) { 2714 // Generate a global destructor entry. 2715 return CGM.AddCXXDtorEntry(dtor, addr); 2716 } 2717 2718 CGF.registerGlobalDtorWithAtExit(D, dtor, addr); 2719 } 2720 2721 static bool isThreadWrapperReplaceable(const VarDecl *VD, 2722 CodeGen::CodeGenModule &CGM) { 2723 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!"); 2724 // Darwin prefers to have references to thread local variables to go through 2725 // the thread wrapper instead of directly referencing the backing variable. 2726 return VD->getTLSKind() == VarDecl::TLS_Dynamic && 2727 CGM.getTarget().getTriple().isOSDarwin(); 2728 } 2729 2730 /// Get the appropriate linkage for the wrapper function. This is essentially 2731 /// the weak form of the variable's linkage; every translation unit which needs 2732 /// the wrapper emits a copy, and we want the linker to merge them. 2733 static llvm::GlobalValue::LinkageTypes 2734 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) { 2735 llvm::GlobalValue::LinkageTypes VarLinkage = 2736 CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false); 2737 2738 // For internal linkage variables, we don't need an external or weak wrapper. 2739 if (llvm::GlobalValue::isLocalLinkage(VarLinkage)) 2740 return VarLinkage; 2741 2742 // If the thread wrapper is replaceable, give it appropriate linkage. 2743 if (isThreadWrapperReplaceable(VD, CGM)) 2744 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) && 2745 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage)) 2746 return VarLinkage; 2747 return llvm::GlobalValue::WeakODRLinkage; 2748 } 2749 2750 llvm::Function * 2751 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD, 2752 llvm::Value *Val) { 2753 // Mangle the name for the thread_local wrapper function. 2754 SmallString<256> WrapperName; 2755 { 2756 llvm::raw_svector_ostream Out(WrapperName); 2757 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out); 2758 } 2759 2760 // FIXME: If VD is a definition, we should regenerate the function attributes 2761 // before returning. 2762 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName)) 2763 return cast<llvm::Function>(V); 2764 2765 QualType RetQT = VD->getType(); 2766 if (RetQT->isReferenceType()) 2767 RetQT = RetQT.getNonReferenceType(); 2768 2769 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration( 2770 getContext().getPointerType(RetQT), FunctionArgList()); 2771 2772 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI); 2773 llvm::Function *Wrapper = 2774 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM), 2775 WrapperName.str(), &CGM.getModule()); 2776 2777 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker()) 2778 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName())); 2779 2780 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false); 2781 2782 // Always resolve references to the wrapper at link time. 2783 if (!Wrapper->hasLocalLinkage()) 2784 if (!isThreadWrapperReplaceable(VD, CGM) || 2785 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) || 2786 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) || 2787 VD->getVisibility() == HiddenVisibility) 2788 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility); 2789 2790 if (isThreadWrapperReplaceable(VD, CGM)) { 2791 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2792 Wrapper->addFnAttr(llvm::Attribute::NoUnwind); 2793 } 2794 2795 ThreadWrappers.push_back({VD, Wrapper}); 2796 return Wrapper; 2797 } 2798 2799 void ItaniumCXXABI::EmitThreadLocalInitFuncs( 2800 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals, 2801 ArrayRef<llvm::Function *> CXXThreadLocalInits, 2802 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) { 2803 llvm::Function *InitFunc = nullptr; 2804 2805 // Separate initializers into those with ordered (or partially-ordered) 2806 // initialization and those with unordered initialization. 2807 llvm::SmallVector<llvm::Function *, 8> OrderedInits; 2808 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits; 2809 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) { 2810 if (isTemplateInstantiation( 2811 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind())) 2812 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] = 2813 CXXThreadLocalInits[I]; 2814 else 2815 OrderedInits.push_back(CXXThreadLocalInits[I]); 2816 } 2817 2818 if (!OrderedInits.empty()) { 2819 // Generate a guarded initialization function. 2820 llvm::FunctionType *FTy = 2821 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 2822 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 2823 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI, 2824 SourceLocation(), 2825 /*TLS=*/true); 2826 llvm::GlobalVariable *Guard = new llvm::GlobalVariable( 2827 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false, 2828 llvm::GlobalVariable::InternalLinkage, 2829 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard"); 2830 Guard->setThreadLocal(true); 2831 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel()); 2832 2833 CharUnits GuardAlign = CharUnits::One(); 2834 Guard->setAlignment(GuardAlign.getAsAlign()); 2835 2836 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc( 2837 InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign)); 2838 // On Darwin platforms, use CXX_FAST_TLS calling convention. 2839 if (CGM.getTarget().getTriple().isOSDarwin()) { 2840 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2841 InitFunc->addFnAttr(llvm::Attribute::NoUnwind); 2842 } 2843 } 2844 2845 // Create declarations for thread wrappers for all thread-local variables 2846 // with non-discardable definitions in this translation unit. 2847 for (const VarDecl *VD : CXXThreadLocals) { 2848 if (VD->hasDefinition() && 2849 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) { 2850 llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD)); 2851 getOrCreateThreadLocalWrapper(VD, GV); 2852 } 2853 } 2854 2855 // Emit all referenced thread wrappers. 2856 for (auto VDAndWrapper : ThreadWrappers) { 2857 const VarDecl *VD = VDAndWrapper.first; 2858 llvm::GlobalVariable *Var = 2859 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD))); 2860 llvm::Function *Wrapper = VDAndWrapper.second; 2861 2862 // Some targets require that all access to thread local variables go through 2863 // the thread wrapper. This means that we cannot attempt to create a thread 2864 // wrapper or a thread helper. 2865 if (!VD->hasDefinition()) { 2866 if (isThreadWrapperReplaceable(VD, CGM)) { 2867 Wrapper->setLinkage(llvm::Function::ExternalLinkage); 2868 continue; 2869 } 2870 2871 // If this isn't a TU in which this variable is defined, the thread 2872 // wrapper is discardable. 2873 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage) 2874 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage); 2875 } 2876 2877 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper); 2878 2879 // Mangle the name for the thread_local initialization function. 2880 SmallString<256> InitFnName; 2881 { 2882 llvm::raw_svector_ostream Out(InitFnName); 2883 getMangleContext().mangleItaniumThreadLocalInit(VD, Out); 2884 } 2885 2886 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false); 2887 2888 // If we have a definition for the variable, emit the initialization 2889 // function as an alias to the global Init function (if any). Otherwise, 2890 // produce a declaration of the initialization function. 2891 llvm::GlobalValue *Init = nullptr; 2892 bool InitIsInitFunc = false; 2893 bool HasConstantInitialization = false; 2894 if (!usesThreadWrapperFunction(VD)) { 2895 HasConstantInitialization = true; 2896 } else if (VD->hasDefinition()) { 2897 InitIsInitFunc = true; 2898 llvm::Function *InitFuncToUse = InitFunc; 2899 if (isTemplateInstantiation(VD->getTemplateSpecializationKind())) 2900 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl()); 2901 if (InitFuncToUse) 2902 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(), 2903 InitFuncToUse); 2904 } else { 2905 // Emit a weak global function referring to the initialization function. 2906 // This function will not exist if the TU defining the thread_local 2907 // variable in question does not need any dynamic initialization for 2908 // its thread_local variables. 2909 Init = llvm::Function::Create(InitFnTy, 2910 llvm::GlobalVariable::ExternalWeakLinkage, 2911 InitFnName.str(), &CGM.getModule()); 2912 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 2913 CGM.SetLLVMFunctionAttributes( 2914 GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false); 2915 } 2916 2917 if (Init) { 2918 Init->setVisibility(Var->getVisibility()); 2919 // Don't mark an extern_weak function DSO local on windows. 2920 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage()) 2921 Init->setDSOLocal(Var->isDSOLocal()); 2922 } 2923 2924 llvm::LLVMContext &Context = CGM.getModule().getContext(); 2925 2926 // The linker on AIX is not happy with missing weak symbols. However, 2927 // other TUs will not know whether the initialization routine exists 2928 // so create an empty, init function to satisfy the linker. 2929 // This is needed whenever a thread wrapper function is not used, and 2930 // also when the symbol is weak. 2931 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() && 2932 isEmittedWithConstantInitializer(VD, true) && 2933 !mayNeedDestruction(VD)) { 2934 // Init should be null. If it were non-null, then the logic above would 2935 // either be defining the function to be an alias or declaring the 2936 // function with the expectation that the definition of the variable 2937 // is elsewhere. 2938 assert(Init == nullptr && "Expected Init to be null."); 2939 2940 llvm::Function *Func = llvm::Function::Create( 2941 InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule()); 2942 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 2943 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, 2944 cast<llvm::Function>(Func), 2945 /*IsThunk=*/false); 2946 // Create a function body that just returns 2947 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func); 2948 CGBuilderTy Builder(CGM, Entry); 2949 Builder.CreateRetVoid(); 2950 } 2951 2952 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper); 2953 CGBuilderTy Builder(CGM, Entry); 2954 if (HasConstantInitialization) { 2955 // No dynamic initialization to invoke. 2956 } else if (InitIsInitFunc) { 2957 if (Init) { 2958 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init); 2959 if (isThreadWrapperReplaceable(VD, CGM)) { 2960 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2961 llvm::Function *Fn = 2962 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee()); 2963 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2964 } 2965 } 2966 } else if (CGM.getTriple().isOSAIX()) { 2967 // On AIX, except if constinit and also neither of class type or of 2968 // (possibly multi-dimensional) array of class type, thread_local vars 2969 // will have init routines regardless of whether they are 2970 // const-initialized. Since the routine is guaranteed to exist, we can 2971 // unconditionally call it without testing for its existance. This 2972 // avoids potentially unresolved weak symbols which the AIX linker 2973 // isn't happy with. 2974 Builder.CreateCall(InitFnTy, Init); 2975 } else { 2976 // Don't know whether we have an init function. Call it if it exists. 2977 llvm::Value *Have = Builder.CreateIsNotNull(Init); 2978 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 2979 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 2980 Builder.CreateCondBr(Have, InitBB, ExitBB); 2981 2982 Builder.SetInsertPoint(InitBB); 2983 Builder.CreateCall(InitFnTy, Init); 2984 Builder.CreateBr(ExitBB); 2985 2986 Builder.SetInsertPoint(ExitBB); 2987 } 2988 2989 // For a reference, the result of the wrapper function is a pointer to 2990 // the referenced object. 2991 llvm::Value *Val = Var; 2992 if (VD->getType()->isReferenceType()) { 2993 CharUnits Align = CGM.getContext().getDeclAlign(VD); 2994 Val = Builder.CreateAlignedLoad(Var->getValueType(), Var, Align); 2995 } 2996 if (Val->getType() != Wrapper->getReturnType()) 2997 Val = Builder.CreatePointerBitCastOrAddrSpaceCast( 2998 Val, Wrapper->getReturnType(), ""); 2999 Builder.CreateRet(Val); 3000 } 3001 } 3002 3003 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, 3004 const VarDecl *VD, 3005 QualType LValType) { 3006 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD); 3007 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val); 3008 3009 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper); 3010 CallVal->setCallingConv(Wrapper->getCallingConv()); 3011 3012 LValue LV; 3013 if (VD->getType()->isReferenceType()) 3014 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType); 3015 else 3016 LV = CGF.MakeAddrLValue(CallVal, LValType, 3017 CGF.getContext().getDeclAlign(VD)); 3018 // FIXME: need setObjCGCLValueClass? 3019 return LV; 3020 } 3021 3022 /// Return whether the given global decl needs a VTT parameter, which it does 3023 /// if it's a base constructor or destructor with virtual bases. 3024 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { 3025 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 3026 3027 // We don't have any virtual bases, just return early. 3028 if (!MD->getParent()->getNumVBases()) 3029 return false; 3030 3031 // Check if we have a base constructor. 3032 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base) 3033 return true; 3034 3035 // Check if we have a base destructor. 3036 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) 3037 return true; 3038 3039 return false; 3040 } 3041 3042 namespace { 3043 class ItaniumRTTIBuilder { 3044 CodeGenModule &CGM; // Per-module state. 3045 llvm::LLVMContext &VMContext; 3046 const ItaniumCXXABI &CXXABI; // Per-module state. 3047 3048 /// Fields - The fields of the RTTI descriptor currently being built. 3049 SmallVector<llvm::Constant *, 16> Fields; 3050 3051 /// GetAddrOfTypeName - Returns the mangled type name of the given type. 3052 llvm::GlobalVariable * 3053 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage); 3054 3055 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI 3056 /// descriptor of the given type. 3057 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty); 3058 3059 /// BuildVTablePointer - Build the vtable pointer for the given type. 3060 void BuildVTablePointer(const Type *Ty); 3061 3062 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single 3063 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b. 3064 void BuildSIClassTypeInfo(const CXXRecordDecl *RD); 3065 3066 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for 3067 /// classes with bases that do not satisfy the abi::__si_class_type_info 3068 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. 3069 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD); 3070 3071 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used 3072 /// for pointer types. 3073 void BuildPointerTypeInfo(QualType PointeeTy); 3074 3075 /// BuildObjCObjectTypeInfo - Build the appropriate kind of 3076 /// type_info for an object type. 3077 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty); 3078 3079 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info 3080 /// struct, used for member pointer types. 3081 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty); 3082 3083 public: 3084 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI) 3085 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {} 3086 3087 // Pointer type info flags. 3088 enum { 3089 /// PTI_Const - Type has const qualifier. 3090 PTI_Const = 0x1, 3091 3092 /// PTI_Volatile - Type has volatile qualifier. 3093 PTI_Volatile = 0x2, 3094 3095 /// PTI_Restrict - Type has restrict qualifier. 3096 PTI_Restrict = 0x4, 3097 3098 /// PTI_Incomplete - Type is incomplete. 3099 PTI_Incomplete = 0x8, 3100 3101 /// PTI_ContainingClassIncomplete - Containing class is incomplete. 3102 /// (in pointer to member). 3103 PTI_ContainingClassIncomplete = 0x10, 3104 3105 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS). 3106 //PTI_TransactionSafe = 0x20, 3107 3108 /// PTI_Noexcept - Pointee is noexcept function (C++1z). 3109 PTI_Noexcept = 0x40, 3110 }; 3111 3112 // VMI type info flags. 3113 enum { 3114 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance. 3115 VMI_NonDiamondRepeat = 0x1, 3116 3117 /// VMI_DiamondShaped - Class is diamond shaped. 3118 VMI_DiamondShaped = 0x2 3119 }; 3120 3121 // Base class type info flags. 3122 enum { 3123 /// BCTI_Virtual - Base class is virtual. 3124 BCTI_Virtual = 0x1, 3125 3126 /// BCTI_Public - Base class is public. 3127 BCTI_Public = 0x2 3128 }; 3129 3130 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or 3131 /// link to an existing RTTI descriptor if one already exists. 3132 llvm::Constant *BuildTypeInfo(QualType Ty); 3133 3134 /// BuildTypeInfo - Build the RTTI type info struct for the given type. 3135 llvm::Constant *BuildTypeInfo( 3136 QualType Ty, 3137 llvm::GlobalVariable::LinkageTypes Linkage, 3138 llvm::GlobalValue::VisibilityTypes Visibility, 3139 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass); 3140 }; 3141 } 3142 3143 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName( 3144 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) { 3145 SmallString<256> Name; 3146 llvm::raw_svector_ostream Out(Name); 3147 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out); 3148 3149 // We know that the mangled name of the type starts at index 4 of the 3150 // mangled name of the typename, so we can just index into it in order to 3151 // get the mangled name of the type. 3152 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext, 3153 Name.substr(4)); 3154 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy); 3155 3156 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable( 3157 Name, Init->getType(), Linkage, Align.getQuantity()); 3158 3159 GV->setInitializer(Init); 3160 3161 return GV; 3162 } 3163 3164 llvm::Constant * 3165 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) { 3166 // Mangle the RTTI name. 3167 SmallString<256> Name; 3168 llvm::raw_svector_ostream Out(Name); 3169 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 3170 3171 // Look for an existing global. 3172 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name); 3173 3174 if (!GV) { 3175 // Create a new global variable. 3176 // Note for the future: If we would ever like to do deferred emission of 3177 // RTTI, check if emitting vtables opportunistically need any adjustment. 3178 3179 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy, 3180 /*isConstant=*/true, 3181 llvm::GlobalValue::ExternalLinkage, nullptr, 3182 Name); 3183 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 3184 CGM.setGVProperties(GV, RD); 3185 // Import the typeinfo symbol when all non-inline virtual methods are 3186 // imported. 3187 if (CGM.getTarget().hasPS4DLLImportExport()) { 3188 if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) { 3189 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); 3190 CGM.setDSOLocal(GV); 3191 } 3192 } 3193 } 3194 3195 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy); 3196 } 3197 3198 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type 3199 /// info for that type is defined in the standard library. 3200 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) { 3201 // Itanium C++ ABI 2.9.2: 3202 // Basic type information (e.g. for "int", "bool", etc.) will be kept in 3203 // the run-time support library. Specifically, the run-time support 3204 // library should contain type_info objects for the types X, X* and 3205 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char, 3206 // unsigned char, signed char, short, unsigned short, int, unsigned int, 3207 // long, unsigned long, long long, unsigned long long, float, double, 3208 // long double, char16_t, char32_t, and the IEEE 754r decimal and 3209 // half-precision floating point types. 3210 // 3211 // GCC also emits RTTI for __int128. 3212 // FIXME: We do not emit RTTI information for decimal types here. 3213 3214 // Types added here must also be added to EmitFundamentalRTTIDescriptors. 3215 switch (Ty->getKind()) { 3216 case BuiltinType::Void: 3217 case BuiltinType::NullPtr: 3218 case BuiltinType::Bool: 3219 case BuiltinType::WChar_S: 3220 case BuiltinType::WChar_U: 3221 case BuiltinType::Char_U: 3222 case BuiltinType::Char_S: 3223 case BuiltinType::UChar: 3224 case BuiltinType::SChar: 3225 case BuiltinType::Short: 3226 case BuiltinType::UShort: 3227 case BuiltinType::Int: 3228 case BuiltinType::UInt: 3229 case BuiltinType::Long: 3230 case BuiltinType::ULong: 3231 case BuiltinType::LongLong: 3232 case BuiltinType::ULongLong: 3233 case BuiltinType::Half: 3234 case BuiltinType::Float: 3235 case BuiltinType::Double: 3236 case BuiltinType::LongDouble: 3237 case BuiltinType::Float16: 3238 case BuiltinType::Float128: 3239 case BuiltinType::Ibm128: 3240 case BuiltinType::Char8: 3241 case BuiltinType::Char16: 3242 case BuiltinType::Char32: 3243 case BuiltinType::Int128: 3244 case BuiltinType::UInt128: 3245 return true; 3246 3247 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 3248 case BuiltinType::Id: 3249 #include "clang/Basic/OpenCLImageTypes.def" 3250 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 3251 case BuiltinType::Id: 3252 #include "clang/Basic/OpenCLExtensionTypes.def" 3253 case BuiltinType::OCLSampler: 3254 case BuiltinType::OCLEvent: 3255 case BuiltinType::OCLClkEvent: 3256 case BuiltinType::OCLQueue: 3257 case BuiltinType::OCLReserveID: 3258 #define SVE_TYPE(Name, Id, SingletonId) \ 3259 case BuiltinType::Id: 3260 #include "clang/Basic/AArch64SVEACLETypes.def" 3261 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 3262 case BuiltinType::Id: 3263 #include "clang/Basic/PPCTypes.def" 3264 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 3265 #include "clang/Basic/RISCVVTypes.def" 3266 case BuiltinType::ShortAccum: 3267 case BuiltinType::Accum: 3268 case BuiltinType::LongAccum: 3269 case BuiltinType::UShortAccum: 3270 case BuiltinType::UAccum: 3271 case BuiltinType::ULongAccum: 3272 case BuiltinType::ShortFract: 3273 case BuiltinType::Fract: 3274 case BuiltinType::LongFract: 3275 case BuiltinType::UShortFract: 3276 case BuiltinType::UFract: 3277 case BuiltinType::ULongFract: 3278 case BuiltinType::SatShortAccum: 3279 case BuiltinType::SatAccum: 3280 case BuiltinType::SatLongAccum: 3281 case BuiltinType::SatUShortAccum: 3282 case BuiltinType::SatUAccum: 3283 case BuiltinType::SatULongAccum: 3284 case BuiltinType::SatShortFract: 3285 case BuiltinType::SatFract: 3286 case BuiltinType::SatLongFract: 3287 case BuiltinType::SatUShortFract: 3288 case BuiltinType::SatUFract: 3289 case BuiltinType::SatULongFract: 3290 case BuiltinType::BFloat16: 3291 return false; 3292 3293 case BuiltinType::Dependent: 3294 #define BUILTIN_TYPE(Id, SingletonId) 3295 #define PLACEHOLDER_TYPE(Id, SingletonId) \ 3296 case BuiltinType::Id: 3297 #include "clang/AST/BuiltinTypes.def" 3298 llvm_unreachable("asking for RRTI for a placeholder type!"); 3299 3300 case BuiltinType::ObjCId: 3301 case BuiltinType::ObjCClass: 3302 case BuiltinType::ObjCSel: 3303 llvm_unreachable("FIXME: Objective-C types are unsupported!"); 3304 } 3305 3306 llvm_unreachable("Invalid BuiltinType Kind!"); 3307 } 3308 3309 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) { 3310 QualType PointeeTy = PointerTy->getPointeeType(); 3311 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy); 3312 if (!BuiltinTy) 3313 return false; 3314 3315 // Check the qualifiers. 3316 Qualifiers Quals = PointeeTy.getQualifiers(); 3317 Quals.removeConst(); 3318 3319 if (!Quals.empty()) 3320 return false; 3321 3322 return TypeInfoIsInStandardLibrary(BuiltinTy); 3323 } 3324 3325 /// IsStandardLibraryRTTIDescriptor - Returns whether the type 3326 /// information for the given type exists in the standard library. 3327 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) { 3328 // Type info for builtin types is defined in the standard library. 3329 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty)) 3330 return TypeInfoIsInStandardLibrary(BuiltinTy); 3331 3332 // Type info for some pointer types to builtin types is defined in the 3333 // standard library. 3334 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty)) 3335 return TypeInfoIsInStandardLibrary(PointerTy); 3336 3337 return false; 3338 } 3339 3340 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for 3341 /// the given type exists somewhere else, and that we should not emit the type 3342 /// information in this translation unit. Assumes that it is not a 3343 /// standard-library type. 3344 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, 3345 QualType Ty) { 3346 ASTContext &Context = CGM.getContext(); 3347 3348 // If RTTI is disabled, assume it might be disabled in the 3349 // translation unit that defines any potential key function, too. 3350 if (!Context.getLangOpts().RTTI) return false; 3351 3352 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 3353 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); 3354 if (!RD->hasDefinition()) 3355 return false; 3356 3357 if (!RD->isDynamicClass()) 3358 return false; 3359 3360 // FIXME: this may need to be reconsidered if the key function 3361 // changes. 3362 // N.B. We must always emit the RTTI data ourselves if there exists a key 3363 // function. 3364 bool IsDLLImport = RD->hasAttr<DLLImportAttr>(); 3365 3366 // Don't import the RTTI but emit it locally. 3367 if (CGM.getTriple().isWindowsGNUEnvironment()) 3368 return false; 3369 3370 if (CGM.getVTables().isVTableExternal(RD)) { 3371 if (CGM.getTarget().hasPS4DLLImportExport()) 3372 return true; 3373 3374 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment() 3375 ? false 3376 : true; 3377 } 3378 if (IsDLLImport) 3379 return true; 3380 } 3381 3382 return false; 3383 } 3384 3385 /// IsIncompleteClassType - Returns whether the given record type is incomplete. 3386 static bool IsIncompleteClassType(const RecordType *RecordTy) { 3387 return !RecordTy->getDecl()->isCompleteDefinition(); 3388 } 3389 3390 /// ContainsIncompleteClassType - Returns whether the given type contains an 3391 /// incomplete class type. This is true if 3392 /// 3393 /// * The given type is an incomplete class type. 3394 /// * The given type is a pointer type whose pointee type contains an 3395 /// incomplete class type. 3396 /// * The given type is a member pointer type whose class is an incomplete 3397 /// class type. 3398 /// * The given type is a member pointer type whoise pointee type contains an 3399 /// incomplete class type. 3400 /// is an indirect or direct pointer to an incomplete class type. 3401 static bool ContainsIncompleteClassType(QualType Ty) { 3402 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 3403 if (IsIncompleteClassType(RecordTy)) 3404 return true; 3405 } 3406 3407 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty)) 3408 return ContainsIncompleteClassType(PointerTy->getPointeeType()); 3409 3410 if (const MemberPointerType *MemberPointerTy = 3411 dyn_cast<MemberPointerType>(Ty)) { 3412 // Check if the class type is incomplete. 3413 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass()); 3414 if (IsIncompleteClassType(ClassType)) 3415 return true; 3416 3417 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType()); 3418 } 3419 3420 return false; 3421 } 3422 3423 // CanUseSingleInheritance - Return whether the given record decl has a "single, 3424 // public, non-virtual base at offset zero (i.e. the derived class is dynamic 3425 // iff the base is)", according to Itanium C++ ABI, 2.95p6b. 3426 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) { 3427 // Check the number of bases. 3428 if (RD->getNumBases() != 1) 3429 return false; 3430 3431 // Get the base. 3432 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(); 3433 3434 // Check that the base is not virtual. 3435 if (Base->isVirtual()) 3436 return false; 3437 3438 // Check that the base is public. 3439 if (Base->getAccessSpecifier() != AS_public) 3440 return false; 3441 3442 // Check that the class is dynamic iff the base is. 3443 auto *BaseDecl = 3444 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl()); 3445 if (!BaseDecl->isEmpty() && 3446 BaseDecl->isDynamicClass() != RD->isDynamicClass()) 3447 return false; 3448 3449 return true; 3450 } 3451 3452 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) { 3453 // abi::__class_type_info. 3454 static const char * const ClassTypeInfo = 3455 "_ZTVN10__cxxabiv117__class_type_infoE"; 3456 // abi::__si_class_type_info. 3457 static const char * const SIClassTypeInfo = 3458 "_ZTVN10__cxxabiv120__si_class_type_infoE"; 3459 // abi::__vmi_class_type_info. 3460 static const char * const VMIClassTypeInfo = 3461 "_ZTVN10__cxxabiv121__vmi_class_type_infoE"; 3462 3463 const char *VTableName = nullptr; 3464 3465 switch (Ty->getTypeClass()) { 3466 #define TYPE(Class, Base) 3467 #define ABSTRACT_TYPE(Class, Base) 3468 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 3469 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3470 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 3471 #include "clang/AST/TypeNodes.inc" 3472 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 3473 3474 case Type::LValueReference: 3475 case Type::RValueReference: 3476 llvm_unreachable("References shouldn't get here"); 3477 3478 case Type::Auto: 3479 case Type::DeducedTemplateSpecialization: 3480 llvm_unreachable("Undeduced type shouldn't get here"); 3481 3482 case Type::Pipe: 3483 llvm_unreachable("Pipe types shouldn't get here"); 3484 3485 case Type::Builtin: 3486 case Type::BitInt: 3487 // GCC treats vector and complex types as fundamental types. 3488 case Type::Vector: 3489 case Type::ExtVector: 3490 case Type::ConstantMatrix: 3491 case Type::Complex: 3492 case Type::Atomic: 3493 // FIXME: GCC treats block pointers as fundamental types?! 3494 case Type::BlockPointer: 3495 // abi::__fundamental_type_info. 3496 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE"; 3497 break; 3498 3499 case Type::ConstantArray: 3500 case Type::IncompleteArray: 3501 case Type::VariableArray: 3502 // abi::__array_type_info. 3503 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE"; 3504 break; 3505 3506 case Type::FunctionNoProto: 3507 case Type::FunctionProto: 3508 // abi::__function_type_info. 3509 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE"; 3510 break; 3511 3512 case Type::Enum: 3513 // abi::__enum_type_info. 3514 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE"; 3515 break; 3516 3517 case Type::Record: { 3518 const CXXRecordDecl *RD = 3519 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl()); 3520 3521 if (!RD->hasDefinition() || !RD->getNumBases()) { 3522 VTableName = ClassTypeInfo; 3523 } else if (CanUseSingleInheritance(RD)) { 3524 VTableName = SIClassTypeInfo; 3525 } else { 3526 VTableName = VMIClassTypeInfo; 3527 } 3528 3529 break; 3530 } 3531 3532 case Type::ObjCObject: 3533 // Ignore protocol qualifiers. 3534 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr(); 3535 3536 // Handle id and Class. 3537 if (isa<BuiltinType>(Ty)) { 3538 VTableName = ClassTypeInfo; 3539 break; 3540 } 3541 3542 assert(isa<ObjCInterfaceType>(Ty)); 3543 LLVM_FALLTHROUGH; 3544 3545 case Type::ObjCInterface: 3546 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) { 3547 VTableName = SIClassTypeInfo; 3548 } else { 3549 VTableName = ClassTypeInfo; 3550 } 3551 break; 3552 3553 case Type::ObjCObjectPointer: 3554 case Type::Pointer: 3555 // abi::__pointer_type_info. 3556 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE"; 3557 break; 3558 3559 case Type::MemberPointer: 3560 // abi::__pointer_to_member_type_info. 3561 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE"; 3562 break; 3563 } 3564 3565 llvm::Constant *VTable = nullptr; 3566 3567 // Check if the alias exists. If it doesn't, then get or create the global. 3568 if (CGM.getItaniumVTableContext().isRelativeLayout()) 3569 VTable = CGM.getModule().getNamedAlias(VTableName); 3570 if (!VTable) 3571 VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy); 3572 3573 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts())); 3574 3575 llvm::Type *PtrDiffTy = 3576 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()); 3577 3578 // The vtable address point is 2. 3579 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 3580 // The vtable address point is 8 bytes after its start: 3581 // 4 for the offset to top + 4 for the relative offset to rtti. 3582 llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8); 3583 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy); 3584 VTable = 3585 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight); 3586 } else { 3587 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2); 3588 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, 3589 Two); 3590 } 3591 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy); 3592 3593 Fields.push_back(VTable); 3594 } 3595 3596 /// Return the linkage that the type info and type info name constants 3597 /// should have for the given type. 3598 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM, 3599 QualType Ty) { 3600 // Itanium C++ ABI 2.9.5p7: 3601 // In addition, it and all of the intermediate abi::__pointer_type_info 3602 // structs in the chain down to the abi::__class_type_info for the 3603 // incomplete class type must be prevented from resolving to the 3604 // corresponding type_info structs for the complete class type, possibly 3605 // by making them local static objects. Finally, a dummy class RTTI is 3606 // generated for the incomplete type that will not resolve to the final 3607 // complete class RTTI (because the latter need not exist), possibly by 3608 // making it a local static object. 3609 if (ContainsIncompleteClassType(Ty)) 3610 return llvm::GlobalValue::InternalLinkage; 3611 3612 switch (Ty->getLinkage()) { 3613 case NoLinkage: 3614 case InternalLinkage: 3615 case UniqueExternalLinkage: 3616 return llvm::GlobalValue::InternalLinkage; 3617 3618 case VisibleNoLinkage: 3619 case ModuleInternalLinkage: 3620 case ModuleLinkage: 3621 case ExternalLinkage: 3622 // RTTI is not enabled, which means that this type info struct is going 3623 // to be used for exception handling. Give it linkonce_odr linkage. 3624 if (!CGM.getLangOpts().RTTI) 3625 return llvm::GlobalValue::LinkOnceODRLinkage; 3626 3627 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) { 3628 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl()); 3629 if (RD->hasAttr<WeakAttr>()) 3630 return llvm::GlobalValue::WeakODRLinkage; 3631 if (CGM.getTriple().isWindowsItaniumEnvironment()) 3632 if (RD->hasAttr<DLLImportAttr>() && 3633 ShouldUseExternalRTTIDescriptor(CGM, Ty)) 3634 return llvm::GlobalValue::ExternalLinkage; 3635 // MinGW always uses LinkOnceODRLinkage for type info. 3636 if (RD->isDynamicClass() && 3637 !CGM.getContext() 3638 .getTargetInfo() 3639 .getTriple() 3640 .isWindowsGNUEnvironment()) 3641 return CGM.getVTableLinkage(RD); 3642 } 3643 3644 return llvm::GlobalValue::LinkOnceODRLinkage; 3645 } 3646 3647 llvm_unreachable("Invalid linkage!"); 3648 } 3649 3650 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) { 3651 // We want to operate on the canonical type. 3652 Ty = Ty.getCanonicalType(); 3653 3654 // Check if we've already emitted an RTTI descriptor for this type. 3655 SmallString<256> Name; 3656 llvm::raw_svector_ostream Out(Name); 3657 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 3658 3659 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name); 3660 if (OldGV && !OldGV->isDeclaration()) { 3661 assert(!OldGV->hasAvailableExternallyLinkage() && 3662 "available_externally typeinfos not yet implemented"); 3663 3664 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy); 3665 } 3666 3667 // Check if there is already an external RTTI descriptor for this type. 3668 if (IsStandardLibraryRTTIDescriptor(Ty) || 3669 ShouldUseExternalRTTIDescriptor(CGM, Ty)) 3670 return GetAddrOfExternalRTTIDescriptor(Ty); 3671 3672 // Emit the standard library with external linkage. 3673 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty); 3674 3675 // Give the type_info object and name the formal visibility of the 3676 // type itself. 3677 llvm::GlobalValue::VisibilityTypes llvmVisibility; 3678 if (llvm::GlobalValue::isLocalLinkage(Linkage)) 3679 // If the linkage is local, only default visibility makes sense. 3680 llvmVisibility = llvm::GlobalValue::DefaultVisibility; 3681 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) == 3682 ItaniumCXXABI::RUK_NonUniqueHidden) 3683 llvmVisibility = llvm::GlobalValue::HiddenVisibility; 3684 else 3685 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility()); 3686 3687 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass = 3688 llvm::GlobalValue::DefaultStorageClass; 3689 if (auto RD = Ty->getAsCXXRecordDecl()) { 3690 if ((CGM.getTriple().isWindowsItaniumEnvironment() && 3691 RD->hasAttr<DLLExportAttr>()) || 3692 (CGM.shouldMapVisibilityToDLLExport(RD) && 3693 !llvm::GlobalValue::isLocalLinkage(Linkage) && 3694 llvmVisibility == llvm::GlobalValue::DefaultVisibility)) 3695 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass; 3696 } 3697 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass); 3698 } 3699 3700 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo( 3701 QualType Ty, 3702 llvm::GlobalVariable::LinkageTypes Linkage, 3703 llvm::GlobalValue::VisibilityTypes Visibility, 3704 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) { 3705 // Add the vtable pointer. 3706 BuildVTablePointer(cast<Type>(Ty)); 3707 3708 // And the name. 3709 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage); 3710 llvm::Constant *TypeNameField; 3711 3712 // If we're supposed to demote the visibility, be sure to set a flag 3713 // to use a string comparison for type_info comparisons. 3714 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness = 3715 CXXABI.classifyRTTIUniqueness(Ty, Linkage); 3716 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) { 3717 // The flag is the sign bit, which on ARM64 is defined to be clear 3718 // for global pointers. This is very ARM64-specific. 3719 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty); 3720 llvm::Constant *flag = 3721 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63); 3722 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag); 3723 TypeNameField = 3724 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy); 3725 } else { 3726 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy); 3727 } 3728 Fields.push_back(TypeNameField); 3729 3730 switch (Ty->getTypeClass()) { 3731 #define TYPE(Class, Base) 3732 #define ABSTRACT_TYPE(Class, Base) 3733 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 3734 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3735 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 3736 #include "clang/AST/TypeNodes.inc" 3737 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 3738 3739 // GCC treats vector types as fundamental types. 3740 case Type::Builtin: 3741 case Type::Vector: 3742 case Type::ExtVector: 3743 case Type::ConstantMatrix: 3744 case Type::Complex: 3745 case Type::BlockPointer: 3746 // Itanium C++ ABI 2.9.5p4: 3747 // abi::__fundamental_type_info adds no data members to std::type_info. 3748 break; 3749 3750 case Type::LValueReference: 3751 case Type::RValueReference: 3752 llvm_unreachable("References shouldn't get here"); 3753 3754 case Type::Auto: 3755 case Type::DeducedTemplateSpecialization: 3756 llvm_unreachable("Undeduced type shouldn't get here"); 3757 3758 case Type::Pipe: 3759 break; 3760 3761 case Type::BitInt: 3762 break; 3763 3764 case Type::ConstantArray: 3765 case Type::IncompleteArray: 3766 case Type::VariableArray: 3767 // Itanium C++ ABI 2.9.5p5: 3768 // abi::__array_type_info adds no data members to std::type_info. 3769 break; 3770 3771 case Type::FunctionNoProto: 3772 case Type::FunctionProto: 3773 // Itanium C++ ABI 2.9.5p5: 3774 // abi::__function_type_info adds no data members to std::type_info. 3775 break; 3776 3777 case Type::Enum: 3778 // Itanium C++ ABI 2.9.5p5: 3779 // abi::__enum_type_info adds no data members to std::type_info. 3780 break; 3781 3782 case Type::Record: { 3783 const CXXRecordDecl *RD = 3784 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl()); 3785 if (!RD->hasDefinition() || !RD->getNumBases()) { 3786 // We don't need to emit any fields. 3787 break; 3788 } 3789 3790 if (CanUseSingleInheritance(RD)) 3791 BuildSIClassTypeInfo(RD); 3792 else 3793 BuildVMIClassTypeInfo(RD); 3794 3795 break; 3796 } 3797 3798 case Type::ObjCObject: 3799 case Type::ObjCInterface: 3800 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty)); 3801 break; 3802 3803 case Type::ObjCObjectPointer: 3804 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); 3805 break; 3806 3807 case Type::Pointer: 3808 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType()); 3809 break; 3810 3811 case Type::MemberPointer: 3812 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty)); 3813 break; 3814 3815 case Type::Atomic: 3816 // No fields, at least for the moment. 3817 break; 3818 } 3819 3820 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields); 3821 3822 SmallString<256> Name; 3823 llvm::raw_svector_ostream Out(Name); 3824 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 3825 llvm::Module &M = CGM.getModule(); 3826 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name); 3827 llvm::GlobalVariable *GV = 3828 new llvm::GlobalVariable(M, Init->getType(), 3829 /*isConstant=*/true, Linkage, Init, Name); 3830 3831 // Export the typeinfo in the same circumstances as the vtable is exported. 3832 auto GVDLLStorageClass = DLLStorageClass; 3833 if (CGM.getTarget().hasPS4DLLImportExport()) { 3834 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 3835 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); 3836 if (RD->hasAttr<DLLExportAttr>() || 3837 CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) { 3838 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass; 3839 } 3840 } 3841 } 3842 3843 // If there's already an old global variable, replace it with the new one. 3844 if (OldGV) { 3845 GV->takeName(OldGV); 3846 llvm::Constant *NewPtr = 3847 llvm::ConstantExpr::getBitCast(GV, OldGV->getType()); 3848 OldGV->replaceAllUsesWith(NewPtr); 3849 OldGV->eraseFromParent(); 3850 } 3851 3852 if (CGM.supportsCOMDAT() && GV->isWeakForLinker()) 3853 GV->setComdat(M.getOrInsertComdat(GV->getName())); 3854 3855 CharUnits Align = 3856 CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0)); 3857 GV->setAlignment(Align.getAsAlign()); 3858 3859 // The Itanium ABI specifies that type_info objects must be globally 3860 // unique, with one exception: if the type is an incomplete class 3861 // type or a (possibly indirect) pointer to one. That exception 3862 // affects the general case of comparing type_info objects produced 3863 // by the typeid operator, which is why the comparison operators on 3864 // std::type_info generally use the type_info name pointers instead 3865 // of the object addresses. However, the language's built-in uses 3866 // of RTTI generally require class types to be complete, even when 3867 // manipulating pointers to those class types. This allows the 3868 // implementation of dynamic_cast to rely on address equality tests, 3869 // which is much faster. 3870 3871 // All of this is to say that it's important that both the type_info 3872 // object and the type_info name be uniqued when weakly emitted. 3873 3874 TypeName->setVisibility(Visibility); 3875 CGM.setDSOLocal(TypeName); 3876 3877 GV->setVisibility(Visibility); 3878 CGM.setDSOLocal(GV); 3879 3880 TypeName->setDLLStorageClass(DLLStorageClass); 3881 GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport() 3882 ? GVDLLStorageClass 3883 : DLLStorageClass); 3884 3885 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition); 3886 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition); 3887 3888 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy); 3889 } 3890 3891 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info 3892 /// for the given Objective-C object type. 3893 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) { 3894 // Drop qualifiers. 3895 const Type *T = OT->getBaseType().getTypePtr(); 3896 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T)); 3897 3898 // The builtin types are abi::__class_type_infos and don't require 3899 // extra fields. 3900 if (isa<BuiltinType>(T)) return; 3901 3902 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl(); 3903 ObjCInterfaceDecl *Super = Class->getSuperClass(); 3904 3905 // Root classes are also __class_type_info. 3906 if (!Super) return; 3907 3908 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super); 3909 3910 // Everything else is single inheritance. 3911 llvm::Constant *BaseTypeInfo = 3912 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy); 3913 Fields.push_back(BaseTypeInfo); 3914 } 3915 3916 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single 3917 /// inheritance, according to the Itanium C++ ABI, 2.95p6b. 3918 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) { 3919 // Itanium C++ ABI 2.9.5p6b: 3920 // It adds to abi::__class_type_info a single member pointing to the 3921 // type_info structure for the base type, 3922 llvm::Constant *BaseTypeInfo = 3923 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType()); 3924 Fields.push_back(BaseTypeInfo); 3925 } 3926 3927 namespace { 3928 /// SeenBases - Contains virtual and non-virtual bases seen when traversing 3929 /// a class hierarchy. 3930 struct SeenBases { 3931 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases; 3932 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases; 3933 }; 3934 } 3935 3936 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in 3937 /// abi::__vmi_class_type_info. 3938 /// 3939 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, 3940 SeenBases &Bases) { 3941 3942 unsigned Flags = 0; 3943 3944 auto *BaseDecl = 3945 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl()); 3946 3947 if (Base->isVirtual()) { 3948 // Mark the virtual base as seen. 3949 if (!Bases.VirtualBases.insert(BaseDecl).second) { 3950 // If this virtual base has been seen before, then the class is diamond 3951 // shaped. 3952 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped; 3953 } else { 3954 if (Bases.NonVirtualBases.count(BaseDecl)) 3955 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 3956 } 3957 } else { 3958 // Mark the non-virtual base as seen. 3959 if (!Bases.NonVirtualBases.insert(BaseDecl).second) { 3960 // If this non-virtual base has been seen before, then the class has non- 3961 // diamond shaped repeated inheritance. 3962 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 3963 } else { 3964 if (Bases.VirtualBases.count(BaseDecl)) 3965 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 3966 } 3967 } 3968 3969 // Walk all bases. 3970 for (const auto &I : BaseDecl->bases()) 3971 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); 3972 3973 return Flags; 3974 } 3975 3976 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) { 3977 unsigned Flags = 0; 3978 SeenBases Bases; 3979 3980 // Walk all bases. 3981 for (const auto &I : RD->bases()) 3982 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); 3983 3984 return Flags; 3985 } 3986 3987 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for 3988 /// classes with bases that do not satisfy the abi::__si_class_type_info 3989 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. 3990 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) { 3991 llvm::Type *UnsignedIntLTy = 3992 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 3993 3994 // Itanium C++ ABI 2.9.5p6c: 3995 // __flags is a word with flags describing details about the class 3996 // structure, which may be referenced by using the __flags_masks 3997 // enumeration. These flags refer to both direct and indirect bases. 3998 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD); 3999 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 4000 4001 // Itanium C++ ABI 2.9.5p6c: 4002 // __base_count is a word with the number of direct proper base class 4003 // descriptions that follow. 4004 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases())); 4005 4006 if (!RD->getNumBases()) 4007 return; 4008 4009 // Now add the base class descriptions. 4010 4011 // Itanium C++ ABI 2.9.5p6c: 4012 // __base_info[] is an array of base class descriptions -- one for every 4013 // direct proper base. Each description is of the type: 4014 // 4015 // struct abi::__base_class_type_info { 4016 // public: 4017 // const __class_type_info *__base_type; 4018 // long __offset_flags; 4019 // 4020 // enum __offset_flags_masks { 4021 // __virtual_mask = 0x1, 4022 // __public_mask = 0x2, 4023 // __offset_shift = 8 4024 // }; 4025 // }; 4026 4027 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long 4028 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on 4029 // LLP64 platforms. 4030 // FIXME: Consider updating libc++abi to match, and extend this logic to all 4031 // LLP64 platforms. 4032 QualType OffsetFlagsTy = CGM.getContext().LongTy; 4033 const TargetInfo &TI = CGM.getContext().getTargetInfo(); 4034 if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth()) 4035 OffsetFlagsTy = CGM.getContext().LongLongTy; 4036 llvm::Type *OffsetFlagsLTy = 4037 CGM.getTypes().ConvertType(OffsetFlagsTy); 4038 4039 for (const auto &Base : RD->bases()) { 4040 // The __base_type member points to the RTTI for the base type. 4041 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType())); 4042 4043 auto *BaseDecl = 4044 cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl()); 4045 4046 int64_t OffsetFlags = 0; 4047 4048 // All but the lower 8 bits of __offset_flags are a signed offset. 4049 // For a non-virtual base, this is the offset in the object of the base 4050 // subobject. For a virtual base, this is the offset in the virtual table of 4051 // the virtual base offset for the virtual base referenced (negative). 4052 CharUnits Offset; 4053 if (Base.isVirtual()) 4054 Offset = 4055 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl); 4056 else { 4057 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); 4058 Offset = Layout.getBaseClassOffset(BaseDecl); 4059 }; 4060 4061 OffsetFlags = uint64_t(Offset.getQuantity()) << 8; 4062 4063 // The low-order byte of __offset_flags contains flags, as given by the 4064 // masks from the enumeration __offset_flags_masks. 4065 if (Base.isVirtual()) 4066 OffsetFlags |= BCTI_Virtual; 4067 if (Base.getAccessSpecifier() == AS_public) 4068 OffsetFlags |= BCTI_Public; 4069 4070 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags)); 4071 } 4072 } 4073 4074 /// Compute the flags for a __pbase_type_info, and remove the corresponding 4075 /// pieces from \p Type. 4076 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) { 4077 unsigned Flags = 0; 4078 4079 if (Type.isConstQualified()) 4080 Flags |= ItaniumRTTIBuilder::PTI_Const; 4081 if (Type.isVolatileQualified()) 4082 Flags |= ItaniumRTTIBuilder::PTI_Volatile; 4083 if (Type.isRestrictQualified()) 4084 Flags |= ItaniumRTTIBuilder::PTI_Restrict; 4085 Type = Type.getUnqualifiedType(); 4086 4087 // Itanium C++ ABI 2.9.5p7: 4088 // When the abi::__pbase_type_info is for a direct or indirect pointer to an 4089 // incomplete class type, the incomplete target type flag is set. 4090 if (ContainsIncompleteClassType(Type)) 4091 Flags |= ItaniumRTTIBuilder::PTI_Incomplete; 4092 4093 if (auto *Proto = Type->getAs<FunctionProtoType>()) { 4094 if (Proto->isNothrow()) { 4095 Flags |= ItaniumRTTIBuilder::PTI_Noexcept; 4096 Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None); 4097 } 4098 } 4099 4100 return Flags; 4101 } 4102 4103 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, 4104 /// used for pointer types. 4105 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) { 4106 // Itanium C++ ABI 2.9.5p7: 4107 // __flags is a flag word describing the cv-qualification and other 4108 // attributes of the type pointed to 4109 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy); 4110 4111 llvm::Type *UnsignedIntLTy = 4112 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 4113 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 4114 4115 // Itanium C++ ABI 2.9.5p7: 4116 // __pointee is a pointer to the std::type_info derivation for the 4117 // unqualified type being pointed to. 4118 llvm::Constant *PointeeTypeInfo = 4119 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy); 4120 Fields.push_back(PointeeTypeInfo); 4121 } 4122 4123 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info 4124 /// struct, used for member pointer types. 4125 void 4126 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) { 4127 QualType PointeeTy = Ty->getPointeeType(); 4128 4129 // Itanium C++ ABI 2.9.5p7: 4130 // __flags is a flag word describing the cv-qualification and other 4131 // attributes of the type pointed to. 4132 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy); 4133 4134 const RecordType *ClassType = cast<RecordType>(Ty->getClass()); 4135 if (IsIncompleteClassType(ClassType)) 4136 Flags |= PTI_ContainingClassIncomplete; 4137 4138 llvm::Type *UnsignedIntLTy = 4139 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 4140 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 4141 4142 // Itanium C++ ABI 2.9.5p7: 4143 // __pointee is a pointer to the std::type_info derivation for the 4144 // unqualified type being pointed to. 4145 llvm::Constant *PointeeTypeInfo = 4146 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy); 4147 Fields.push_back(PointeeTypeInfo); 4148 4149 // Itanium C++ ABI 2.9.5p9: 4150 // __context is a pointer to an abi::__class_type_info corresponding to the 4151 // class type containing the member pointed to 4152 // (e.g., the "A" in "int A::*"). 4153 Fields.push_back( 4154 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0))); 4155 } 4156 4157 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) { 4158 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty); 4159 } 4160 4161 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) { 4162 // Types added here must also be added to TypeInfoIsInStandardLibrary. 4163 QualType FundamentalTypes[] = { 4164 getContext().VoidTy, getContext().NullPtrTy, 4165 getContext().BoolTy, getContext().WCharTy, 4166 getContext().CharTy, getContext().UnsignedCharTy, 4167 getContext().SignedCharTy, getContext().ShortTy, 4168 getContext().UnsignedShortTy, getContext().IntTy, 4169 getContext().UnsignedIntTy, getContext().LongTy, 4170 getContext().UnsignedLongTy, getContext().LongLongTy, 4171 getContext().UnsignedLongLongTy, getContext().Int128Ty, 4172 getContext().UnsignedInt128Ty, getContext().HalfTy, 4173 getContext().FloatTy, getContext().DoubleTy, 4174 getContext().LongDoubleTy, getContext().Float128Ty, 4175 getContext().Char8Ty, getContext().Char16Ty, 4176 getContext().Char32Ty 4177 }; 4178 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass = 4179 RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD) 4180 ? llvm::GlobalValue::DLLExportStorageClass 4181 : llvm::GlobalValue::DefaultStorageClass; 4182 llvm::GlobalValue::VisibilityTypes Visibility = 4183 CodeGenModule::GetLLVMVisibility(RD->getVisibility()); 4184 for (const QualType &FundamentalType : FundamentalTypes) { 4185 QualType PointerType = getContext().getPointerType(FundamentalType); 4186 QualType PointerTypeConst = getContext().getPointerType( 4187 FundamentalType.withConst()); 4188 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst}) 4189 ItaniumRTTIBuilder(*this).BuildTypeInfo( 4190 Type, llvm::GlobalValue::ExternalLinkage, 4191 Visibility, DLLStorageClass); 4192 } 4193 } 4194 4195 /// What sort of uniqueness rules should we use for the RTTI for the 4196 /// given type? 4197 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness( 4198 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const { 4199 if (shouldRTTIBeUnique()) 4200 return RUK_Unique; 4201 4202 // It's only necessary for linkonce_odr or weak_odr linkage. 4203 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage && 4204 Linkage != llvm::GlobalValue::WeakODRLinkage) 4205 return RUK_Unique; 4206 4207 // It's only necessary with default visibility. 4208 if (CanTy->getVisibility() != DefaultVisibility) 4209 return RUK_Unique; 4210 4211 // If we're not required to publish this symbol, hide it. 4212 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage) 4213 return RUK_NonUniqueHidden; 4214 4215 // If we're required to publish this symbol, as we might be under an 4216 // explicit instantiation, leave it with default visibility but 4217 // enable string-comparisons. 4218 assert(Linkage == llvm::GlobalValue::WeakODRLinkage); 4219 return RUK_NonUniqueVisible; 4220 } 4221 4222 // Find out how to codegen the complete destructor and constructor 4223 namespace { 4224 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT }; 4225 } 4226 static StructorCodegen getCodegenToUse(CodeGenModule &CGM, 4227 const CXXMethodDecl *MD) { 4228 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases) 4229 return StructorCodegen::Emit; 4230 4231 // The complete and base structors are not equivalent if there are any virtual 4232 // bases, so emit separate functions. 4233 if (MD->getParent()->getNumVBases()) 4234 return StructorCodegen::Emit; 4235 4236 GlobalDecl AliasDecl; 4237 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) { 4238 AliasDecl = GlobalDecl(DD, Dtor_Complete); 4239 } else { 4240 const auto *CD = cast<CXXConstructorDecl>(MD); 4241 AliasDecl = GlobalDecl(CD, Ctor_Complete); 4242 } 4243 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl); 4244 4245 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage)) 4246 return StructorCodegen::RAUW; 4247 4248 // FIXME: Should we allow available_externally aliases? 4249 if (!llvm::GlobalAlias::isValidLinkage(Linkage)) 4250 return StructorCodegen::RAUW; 4251 4252 if (llvm::GlobalValue::isWeakForLinker(Linkage)) { 4253 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5). 4254 if (CGM.getTarget().getTriple().isOSBinFormatELF() || 4255 CGM.getTarget().getTriple().isOSBinFormatWasm()) 4256 return StructorCodegen::COMDAT; 4257 return StructorCodegen::Emit; 4258 } 4259 4260 return StructorCodegen::Alias; 4261 } 4262 4263 static void emitConstructorDestructorAlias(CodeGenModule &CGM, 4264 GlobalDecl AliasDecl, 4265 GlobalDecl TargetDecl) { 4266 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl); 4267 4268 StringRef MangledName = CGM.getMangledName(AliasDecl); 4269 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName); 4270 if (Entry && !Entry->isDeclaration()) 4271 return; 4272 4273 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl)); 4274 4275 // Create the alias with no name. 4276 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee); 4277 4278 // Constructors and destructors are always unnamed_addr. 4279 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 4280 4281 // Switch any previous uses to the alias. 4282 if (Entry) { 4283 assert(Entry->getType() == Aliasee->getType() && 4284 "declaration exists with different type"); 4285 Alias->takeName(Entry); 4286 Entry->replaceAllUsesWith(Alias); 4287 Entry->eraseFromParent(); 4288 } else { 4289 Alias->setName(MangledName); 4290 } 4291 4292 // Finally, set up the alias with its proper name and attributes. 4293 CGM.SetCommonAttributes(AliasDecl, Alias); 4294 } 4295 4296 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) { 4297 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 4298 auto *CD = dyn_cast<CXXConstructorDecl>(MD); 4299 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD); 4300 4301 StructorCodegen CGType = getCodegenToUse(CGM, MD); 4302 4303 if (CD ? GD.getCtorType() == Ctor_Complete 4304 : GD.getDtorType() == Dtor_Complete) { 4305 GlobalDecl BaseDecl; 4306 if (CD) 4307 BaseDecl = GD.getWithCtorType(Ctor_Base); 4308 else 4309 BaseDecl = GD.getWithDtorType(Dtor_Base); 4310 4311 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) { 4312 emitConstructorDestructorAlias(CGM, GD, BaseDecl); 4313 return; 4314 } 4315 4316 if (CGType == StructorCodegen::RAUW) { 4317 StringRef MangledName = CGM.getMangledName(GD); 4318 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl); 4319 CGM.addReplacement(MangledName, Aliasee); 4320 return; 4321 } 4322 } 4323 4324 // The base destructor is equivalent to the base destructor of its 4325 // base class if there is exactly one non-virtual base class with a 4326 // non-trivial destructor, there are no fields with a non-trivial 4327 // destructor, and the body of the destructor is trivial. 4328 if (DD && GD.getDtorType() == Dtor_Base && 4329 CGType != StructorCodegen::COMDAT && 4330 !CGM.TryEmitBaseDestructorAsAlias(DD)) 4331 return; 4332 4333 // FIXME: The deleting destructor is equivalent to the selected operator 4334 // delete if: 4335 // * either the delete is a destroying operator delete or the destructor 4336 // would be trivial if it weren't virtual, 4337 // * the conversion from the 'this' parameter to the first parameter of the 4338 // destructor is equivalent to a bitcast, 4339 // * the destructor does not have an implicit "this" return, and 4340 // * the operator delete has the same calling convention and IR function type 4341 // as the destructor. 4342 // In such cases we should try to emit the deleting dtor as an alias to the 4343 // selected 'operator delete'. 4344 4345 llvm::Function *Fn = CGM.codegenCXXStructor(GD); 4346 4347 if (CGType == StructorCodegen::COMDAT) { 4348 SmallString<256> Buffer; 4349 llvm::raw_svector_ostream Out(Buffer); 4350 if (DD) 4351 getMangleContext().mangleCXXDtorComdat(DD, Out); 4352 else 4353 getMangleContext().mangleCXXCtorComdat(CD, Out); 4354 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str()); 4355 Fn->setComdat(C); 4356 } else { 4357 CGM.maybeSetTrivialComdat(*MD, *Fn); 4358 } 4359 } 4360 4361 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) { 4362 // void *__cxa_begin_catch(void*); 4363 llvm::FunctionType *FTy = llvm::FunctionType::get( 4364 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false); 4365 4366 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch"); 4367 } 4368 4369 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) { 4370 // void __cxa_end_catch(); 4371 llvm::FunctionType *FTy = 4372 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 4373 4374 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch"); 4375 } 4376 4377 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) { 4378 // void *__cxa_get_exception_ptr(void*); 4379 llvm::FunctionType *FTy = llvm::FunctionType::get( 4380 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false); 4381 4382 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr"); 4383 } 4384 4385 namespace { 4386 /// A cleanup to call __cxa_end_catch. In many cases, the caught 4387 /// exception type lets us state definitively that the thrown exception 4388 /// type does not have a destructor. In particular: 4389 /// - Catch-alls tell us nothing, so we have to conservatively 4390 /// assume that the thrown exception might have a destructor. 4391 /// - Catches by reference behave according to their base types. 4392 /// - Catches of non-record types will only trigger for exceptions 4393 /// of non-record types, which never have destructors. 4394 /// - Catches of record types can trigger for arbitrary subclasses 4395 /// of the caught type, so we have to assume the actual thrown 4396 /// exception type might have a throwing destructor, even if the 4397 /// caught type's destructor is trivial or nothrow. 4398 struct CallEndCatch final : EHScopeStack::Cleanup { 4399 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {} 4400 bool MightThrow; 4401 4402 void Emit(CodeGenFunction &CGF, Flags flags) override { 4403 if (!MightThrow) { 4404 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM)); 4405 return; 4406 } 4407 4408 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM)); 4409 } 4410 }; 4411 } 4412 4413 /// Emits a call to __cxa_begin_catch and enters a cleanup to call 4414 /// __cxa_end_catch. 4415 /// 4416 /// \param EndMightThrow - true if __cxa_end_catch might throw 4417 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF, 4418 llvm::Value *Exn, 4419 bool EndMightThrow) { 4420 llvm::CallInst *call = 4421 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn); 4422 4423 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow); 4424 4425 return call; 4426 } 4427 4428 /// A "special initializer" callback for initializing a catch 4429 /// parameter during catch initialization. 4430 static void InitCatchParam(CodeGenFunction &CGF, 4431 const VarDecl &CatchParam, 4432 Address ParamAddr, 4433 SourceLocation Loc) { 4434 // Load the exception from where the landing pad saved it. 4435 llvm::Value *Exn = CGF.getExceptionFromSlot(); 4436 4437 CanQualType CatchType = 4438 CGF.CGM.getContext().getCanonicalType(CatchParam.getType()); 4439 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType); 4440 4441 // If we're catching by reference, we can just cast the object 4442 // pointer to the appropriate pointer. 4443 if (isa<ReferenceType>(CatchType)) { 4444 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType(); 4445 bool EndCatchMightThrow = CaughtType->isRecordType(); 4446 4447 // __cxa_begin_catch returns the adjusted object pointer. 4448 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow); 4449 4450 // We have no way to tell the personality function that we're 4451 // catching by reference, so if we're catching a pointer, 4452 // __cxa_begin_catch will actually return that pointer by value. 4453 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) { 4454 QualType PointeeType = PT->getPointeeType(); 4455 4456 // When catching by reference, generally we should just ignore 4457 // this by-value pointer and use the exception object instead. 4458 if (!PointeeType->isRecordType()) { 4459 4460 // Exn points to the struct _Unwind_Exception header, which 4461 // we have to skip past in order to reach the exception data. 4462 unsigned HeaderSize = 4463 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException(); 4464 AdjustedExn = 4465 CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize); 4466 4467 // However, if we're catching a pointer-to-record type that won't 4468 // work, because the personality function might have adjusted 4469 // the pointer. There's actually no way for us to fully satisfy 4470 // the language/ABI contract here: we can't use Exn because it 4471 // might have the wrong adjustment, but we can't use the by-value 4472 // pointer because it's off by a level of abstraction. 4473 // 4474 // The current solution is to dump the adjusted pointer into an 4475 // alloca, which breaks language semantics (because changing the 4476 // pointer doesn't change the exception) but at least works. 4477 // The better solution would be to filter out non-exact matches 4478 // and rethrow them, but this is tricky because the rethrow 4479 // really needs to be catchable by other sites at this landing 4480 // pad. The best solution is to fix the personality function. 4481 } else { 4482 // Pull the pointer for the reference type off. 4483 llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType); 4484 4485 // Create the temporary and write the adjusted pointer into it. 4486 Address ExnPtrTmp = 4487 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp"); 4488 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); 4489 CGF.Builder.CreateStore(Casted, ExnPtrTmp); 4490 4491 // Bind the reference to the temporary. 4492 AdjustedExn = ExnPtrTmp.getPointer(); 4493 } 4494 } 4495 4496 llvm::Value *ExnCast = 4497 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref"); 4498 CGF.Builder.CreateStore(ExnCast, ParamAddr); 4499 return; 4500 } 4501 4502 // Scalars and complexes. 4503 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType); 4504 if (TEK != TEK_Aggregate) { 4505 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false); 4506 4507 // If the catch type is a pointer type, __cxa_begin_catch returns 4508 // the pointer by value. 4509 if (CatchType->hasPointerRepresentation()) { 4510 llvm::Value *CastExn = 4511 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted"); 4512 4513 switch (CatchType.getQualifiers().getObjCLifetime()) { 4514 case Qualifiers::OCL_Strong: 4515 CastExn = CGF.EmitARCRetainNonBlock(CastExn); 4516 LLVM_FALLTHROUGH; 4517 4518 case Qualifiers::OCL_None: 4519 case Qualifiers::OCL_ExplicitNone: 4520 case Qualifiers::OCL_Autoreleasing: 4521 CGF.Builder.CreateStore(CastExn, ParamAddr); 4522 return; 4523 4524 case Qualifiers::OCL_Weak: 4525 CGF.EmitARCInitWeak(ParamAddr, CastExn); 4526 return; 4527 } 4528 llvm_unreachable("bad ownership qualifier!"); 4529 } 4530 4531 // Otherwise, it returns a pointer into the exception object. 4532 4533 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok 4534 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); 4535 4536 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType); 4537 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType); 4538 switch (TEK) { 4539 case TEK_Complex: 4540 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV, 4541 /*init*/ true); 4542 return; 4543 case TEK_Scalar: { 4544 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc); 4545 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true); 4546 return; 4547 } 4548 case TEK_Aggregate: 4549 llvm_unreachable("evaluation kind filtered out!"); 4550 } 4551 llvm_unreachable("bad evaluation kind"); 4552 } 4553 4554 assert(isa<RecordType>(CatchType) && "unexpected catch type!"); 4555 auto catchRD = CatchType->getAsCXXRecordDecl(); 4556 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD); 4557 4558 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok 4559 4560 // Check for a copy expression. If we don't have a copy expression, 4561 // that means a trivial copy is okay. 4562 const Expr *copyExpr = CatchParam.getInit(); 4563 if (!copyExpr) { 4564 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true); 4565 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy), 4566 LLVMCatchTy, caughtExnAlignment); 4567 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType); 4568 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType); 4569 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap); 4570 return; 4571 } 4572 4573 // We have to call __cxa_get_exception_ptr to get the adjusted 4574 // pointer before copying. 4575 llvm::CallInst *rawAdjustedExn = 4576 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn); 4577 4578 // Cast that to the appropriate type. 4579 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy), 4580 LLVMCatchTy, caughtExnAlignment); 4581 4582 // The copy expression is defined in terms of an OpaqueValueExpr. 4583 // Find it and map it to the adjusted expression. 4584 CodeGenFunction::OpaqueValueMapping 4585 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr), 4586 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType())); 4587 4588 // Call the copy ctor in a terminate scope. 4589 CGF.EHStack.pushTerminate(); 4590 4591 // Perform the copy construction. 4592 CGF.EmitAggExpr(copyExpr, 4593 AggValueSlot::forAddr(ParamAddr, Qualifiers(), 4594 AggValueSlot::IsNotDestructed, 4595 AggValueSlot::DoesNotNeedGCBarriers, 4596 AggValueSlot::IsNotAliased, 4597 AggValueSlot::DoesNotOverlap)); 4598 4599 // Leave the terminate scope. 4600 CGF.EHStack.popTerminate(); 4601 4602 // Undo the opaque value mapping. 4603 opaque.pop(); 4604 4605 // Finally we can call __cxa_begin_catch. 4606 CallBeginCatch(CGF, Exn, true); 4607 } 4608 4609 /// Begins a catch statement by initializing the catch variable and 4610 /// calling __cxa_begin_catch. 4611 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF, 4612 const CXXCatchStmt *S) { 4613 // We have to be very careful with the ordering of cleanups here: 4614 // C++ [except.throw]p4: 4615 // The destruction [of the exception temporary] occurs 4616 // immediately after the destruction of the object declared in 4617 // the exception-declaration in the handler. 4618 // 4619 // So the precise ordering is: 4620 // 1. Construct catch variable. 4621 // 2. __cxa_begin_catch 4622 // 3. Enter __cxa_end_catch cleanup 4623 // 4. Enter dtor cleanup 4624 // 4625 // We do this by using a slightly abnormal initialization process. 4626 // Delegation sequence: 4627 // - ExitCXXTryStmt opens a RunCleanupsScope 4628 // - EmitAutoVarAlloca creates the variable and debug info 4629 // - InitCatchParam initializes the variable from the exception 4630 // - CallBeginCatch calls __cxa_begin_catch 4631 // - CallBeginCatch enters the __cxa_end_catch cleanup 4632 // - EmitAutoVarCleanups enters the variable destructor cleanup 4633 // - EmitCXXTryStmt emits the code for the catch body 4634 // - EmitCXXTryStmt close the RunCleanupsScope 4635 4636 VarDecl *CatchParam = S->getExceptionDecl(); 4637 if (!CatchParam) { 4638 llvm::Value *Exn = CGF.getExceptionFromSlot(); 4639 CallBeginCatch(CGF, Exn, true); 4640 return; 4641 } 4642 4643 // Emit the local. 4644 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam); 4645 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc()); 4646 CGF.EmitAutoVarCleanups(var); 4647 } 4648 4649 /// Get or define the following function: 4650 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn 4651 /// This code is used only in C++. 4652 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) { 4653 llvm::FunctionType *fnTy = 4654 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false); 4655 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction( 4656 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true); 4657 llvm::Function *fn = 4658 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts()); 4659 if (fn->empty()) { 4660 fn->setDoesNotThrow(); 4661 fn->setDoesNotReturn(); 4662 4663 // What we really want is to massively penalize inlining without 4664 // forbidding it completely. The difference between that and 4665 // 'noinline' is negligible. 4666 fn->addFnAttr(llvm::Attribute::NoInline); 4667 4668 // Allow this function to be shared across translation units, but 4669 // we don't want it to turn into an exported symbol. 4670 fn->setLinkage(llvm::Function::LinkOnceODRLinkage); 4671 fn->setVisibility(llvm::Function::HiddenVisibility); 4672 if (CGM.supportsCOMDAT()) 4673 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName())); 4674 4675 // Set up the function. 4676 llvm::BasicBlock *entry = 4677 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn); 4678 CGBuilderTy builder(CGM, entry); 4679 4680 // Pull the exception pointer out of the parameter list. 4681 llvm::Value *exn = &*fn->arg_begin(); 4682 4683 // Call __cxa_begin_catch(exn). 4684 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn); 4685 catchCall->setDoesNotThrow(); 4686 catchCall->setCallingConv(CGM.getRuntimeCC()); 4687 4688 // Call std::terminate(). 4689 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn()); 4690 termCall->setDoesNotThrow(); 4691 termCall->setDoesNotReturn(); 4692 termCall->setCallingConv(CGM.getRuntimeCC()); 4693 4694 // std::terminate cannot return. 4695 builder.CreateUnreachable(); 4696 } 4697 return fnRef; 4698 } 4699 4700 llvm::CallInst * 4701 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF, 4702 llvm::Value *Exn) { 4703 // In C++, we want to call __cxa_begin_catch() before terminating. 4704 if (Exn) { 4705 assert(CGF.CGM.getLangOpts().CPlusPlus); 4706 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn); 4707 } 4708 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn()); 4709 } 4710 4711 std::pair<llvm::Value *, const CXXRecordDecl *> 4712 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This, 4713 const CXXRecordDecl *RD) { 4714 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD}; 4715 } 4716 4717 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF, 4718 const CXXCatchStmt *C) { 4719 if (CGF.getTarget().hasFeature("exception-handling")) 4720 CGF.EHStack.pushCleanup<CatchRetScope>( 4721 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad)); 4722 ItaniumCXXABI::emitBeginCatch(CGF, C); 4723 } 4724 4725 llvm::CallInst * 4726 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF, 4727 llvm::Value *Exn) { 4728 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on 4729 // the violating exception to mark it handled, but it is currently hard to do 4730 // with wasm EH instruction structure with catch/catch_all, we just call 4731 // std::terminate and ignore the violating exception as in CGCXXABI. 4732 // TODO Consider code transformation that makes calling __clang_call_terminate 4733 // possible. 4734 return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn); 4735 } 4736 4737 /// Register a global destructor as best as we know how. 4738 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 4739 llvm::FunctionCallee Dtor, 4740 llvm::Constant *Addr) { 4741 if (D.getTLSKind() != VarDecl::TLS_None) { 4742 // atexit routine expects "int(*)(int,...)" 4743 llvm::FunctionType *FTy = 4744 llvm::FunctionType::get(CGM.IntTy, CGM.IntTy, true); 4745 llvm::PointerType *FpTy = FTy->getPointerTo(); 4746 4747 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...); 4748 llvm::FunctionType *AtExitTy = 4749 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, FpTy}, true); 4750 4751 // Fetch the actual function. 4752 llvm::FunctionCallee AtExit = 4753 CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np"); 4754 4755 // Create __dtor function for the var decl. 4756 llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit); 4757 4758 // Register above __dtor with atexit(). 4759 // First param is flags and must be 0, second param is function ptr 4760 llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy); 4761 CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub}); 4762 4763 // Cannot unregister TLS __dtor so done 4764 return; 4765 } 4766 4767 // Create __dtor function for the var decl. 4768 llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr); 4769 4770 // Register above __dtor with atexit(). 4771 CGF.registerGlobalDtorWithAtExit(DtorStub); 4772 4773 // Emit __finalize function to unregister __dtor and (as appropriate) call 4774 // __dtor. 4775 emitCXXStermFinalizer(D, DtorStub, Addr); 4776 } 4777 4778 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub, 4779 llvm::Constant *addr) { 4780 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false); 4781 SmallString<256> FnName; 4782 { 4783 llvm::raw_svector_ostream Out(FnName); 4784 getMangleContext().mangleDynamicStermFinalizer(&D, Out); 4785 } 4786 4787 // Create the finalization action associated with a variable. 4788 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 4789 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction( 4790 FTy, FnName.str(), FI, D.getLocation()); 4791 4792 CodeGenFunction CGF(CGM); 4793 4794 CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI, 4795 FunctionArgList(), D.getLocation(), 4796 D.getInit()->getExprLoc()); 4797 4798 // The unatexit subroutine unregisters __dtor functions that were previously 4799 // registered by the atexit subroutine. If the referenced function is found, 4800 // the unatexit returns a value of 0, meaning that the cleanup is still 4801 // pending (and we should call the __dtor function). 4802 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub); 4803 4804 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct"); 4805 4806 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call"); 4807 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end"); 4808 4809 // Check if unatexit returns a value of 0. If it does, jump to 4810 // DestructCallBlock, otherwise jump to EndBlock directly. 4811 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock); 4812 4813 CGF.EmitBlock(DestructCallBlock); 4814 4815 // Emit the call to dtorStub. 4816 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub); 4817 4818 // Make sure the call and the callee agree on calling convention. 4819 CI->setCallingConv(dtorStub->getCallingConv()); 4820 4821 CGF.EmitBlock(EndBlock); 4822 4823 CGF.FinishFunction(); 4824 4825 if (auto *IPA = D.getAttr<InitPriorityAttr>()) { 4826 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer, 4827 IPA->getPriority()); 4828 } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) || 4829 getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) { 4830 // According to C++ [basic.start.init]p2, class template static data 4831 // members (i.e., implicitly or explicitly instantiated specializations) 4832 // have unordered initialization. As a consequence, we can put them into 4833 // their own llvm.global_dtors entry. 4834 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535); 4835 } else { 4836 CGM.AddCXXStermFinalizerEntry(StermFinalizer); 4837 } 4838 } 4839