1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/TemplateBase.h" 45 #include "clang/AST/TemplateName.h" 46 #include "clang/AST/Type.h" 47 #include "clang/AST/TypeLoc.h" 48 #include "clang/AST/UnresolvedSet.h" 49 #include "clang/AST/VTableBuilder.h" 50 #include "clang/Basic/AddressSpaces.h" 51 #include "clang/Basic/Builtins.h" 52 #include "clang/Basic/CommentOptions.h" 53 #include "clang/Basic/ExceptionSpecificationType.h" 54 #include "clang/Basic/IdentifierTable.h" 55 #include "clang/Basic/LLVM.h" 56 #include "clang/Basic/LangOptions.h" 57 #include "clang/Basic/Linkage.h" 58 #include "clang/Basic/Module.h" 59 #include "clang/Basic/NoSanitizeList.h" 60 #include "clang/Basic/ObjCRuntime.h" 61 #include "clang/Basic/SourceLocation.h" 62 #include "clang/Basic/SourceManager.h" 63 #include "clang/Basic/Specifiers.h" 64 #include "clang/Basic/TargetCXXABI.h" 65 #include "clang/Basic/TargetInfo.h" 66 #include "clang/Basic/XRayLists.h" 67 #include "llvm/ADT/APFixedPoint.h" 68 #include "llvm/ADT/APInt.h" 69 #include "llvm/ADT/APSInt.h" 70 #include "llvm/ADT/ArrayRef.h" 71 #include "llvm/ADT/DenseMap.h" 72 #include "llvm/ADT/DenseSet.h" 73 #include "llvm/ADT/FoldingSet.h" 74 #include "llvm/ADT/PointerUnion.h" 75 #include "llvm/ADT/STLExtras.h" 76 #include "llvm/ADT/SmallPtrSet.h" 77 #include "llvm/ADT/SmallVector.h" 78 #include "llvm/ADT/StringExtras.h" 79 #include "llvm/ADT/StringRef.h" 80 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 81 #include "llvm/Support/Capacity.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ErrorHandling.h" 85 #include "llvm/Support/MD5.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/raw_ostream.h" 88 #include "llvm/TargetParser/Triple.h" 89 #include <algorithm> 90 #include <cassert> 91 #include <cstddef> 92 #include <cstdint> 93 #include <cstdlib> 94 #include <map> 95 #include <memory> 96 #include <optional> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace clang; 102 103 enum FloatingRank { 104 BFloat16Rank, 105 Float16Rank, 106 HalfRank, 107 FloatRank, 108 DoubleRank, 109 LongDoubleRank, 110 Float128Rank, 111 Ibm128Rank 112 }; 113 114 /// \returns location that is relevant when searching for Doc comments related 115 /// to \p D. 116 static SourceLocation getDeclLocForCommentSearch(const Decl *D, 117 SourceManager &SourceMgr) { 118 assert(D); 119 120 // User can not attach documentation to implicit declarations. 121 if (D->isImplicit()) 122 return {}; 123 124 // User can not attach documentation to implicit instantiations. 125 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 126 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 127 return {}; 128 } 129 130 if (const auto *VD = dyn_cast<VarDecl>(D)) { 131 if (VD->isStaticDataMember() && 132 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 133 return {}; 134 } 135 136 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 137 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 138 return {}; 139 } 140 141 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 142 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 143 if (TSK == TSK_ImplicitInstantiation || 144 TSK == TSK_Undeclared) 145 return {}; 146 } 147 148 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 149 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 150 return {}; 151 } 152 if (const auto *TD = dyn_cast<TagDecl>(D)) { 153 // When tag declaration (but not definition!) is part of the 154 // decl-specifier-seq of some other declaration, it doesn't get comment 155 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 156 return {}; 157 } 158 // TODO: handle comments for function parameters properly. 159 if (isa<ParmVarDecl>(D)) 160 return {}; 161 162 // TODO: we could look up template parameter documentation in the template 163 // documentation. 164 if (isa<TemplateTypeParmDecl>(D) || 165 isa<NonTypeTemplateParmDecl>(D) || 166 isa<TemplateTemplateParmDecl>(D)) 167 return {}; 168 169 // Find declaration location. 170 // For Objective-C declarations we generally don't expect to have multiple 171 // declarators, thus use declaration starting location as the "declaration 172 // location". 173 // For all other declarations multiple declarators are used quite frequently, 174 // so we use the location of the identifier as the "declaration location". 175 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 176 isa<ObjCPropertyDecl>(D) || 177 isa<RedeclarableTemplateDecl>(D) || 178 isa<ClassTemplateSpecializationDecl>(D) || 179 // Allow association with Y across {} in `typedef struct X {} Y`. 180 isa<TypedefDecl>(D)) 181 return D->getBeginLoc(); 182 183 const SourceLocation DeclLoc = D->getLocation(); 184 if (DeclLoc.isMacroID()) { 185 // There are (at least) three types of macros we care about here. 186 // 187 // 1. Macros that are used in the definition of a type outside the macro, 188 // with a comment attached at the macro call site. 189 // ``` 190 // #define MAKE_NAME(Foo) Name##Foo 191 // 192 // /// Comment is here, where we use the macro. 193 // struct MAKE_NAME(Foo) { 194 // int a; 195 // int b; 196 // }; 197 // ``` 198 // 2. Macros that define whole things along with the comment. 199 // ``` 200 // #define MAKE_METHOD(name) \ 201 // /** Comment is here, inside the macro. */ \ 202 // void name() {} 203 // 204 // struct S { 205 // MAKE_METHOD(f) 206 // } 207 // ``` 208 // 3. Macros that both declare a type and name a decl outside the macro. 209 // ``` 210 // /// Comment is here, where we use the macro. 211 // typedef NS_ENUM(NSInteger, Size) { 212 // SizeWidth, 213 // SizeHeight 214 // }; 215 // ``` 216 // In this case NS_ENUM declares am enum type, and uses the same name for 217 // the typedef declaration that appears outside the macro. The comment 218 // here should be applied to both declarations inside and outside the 219 // macro. 220 // 221 // We have found a Decl name that comes from inside a macro, but 222 // Decl::getLocation() returns the place where the macro is being called. 223 // If the declaration (and not just the name) resides inside the macro, 224 // then we want to map Decl::getLocation() into the macro to where the 225 // declaration and its attached comment (if any) were written. 226 // 227 // This mapping into the macro is done by mapping the location to its 228 // spelling location, however even if the declaration is inside a macro, 229 // the name's spelling can come from a macro argument (case 2 above). In 230 // this case mapping the location to the spelling location finds the 231 // argument's position (at `f` in MAKE_METHOD(`f`) above), which is not 232 // where the declaration and its comment are located. 233 // 234 // To avoid this issue, we make use of Decl::getBeginLocation() instead. 235 // While the declaration's position is where the name is written, the 236 // comment is always attached to the begining of the declaration, not to 237 // the name. 238 // 239 // In the first case, the begin location of the decl is outside the macro, 240 // at the location of `typedef`. This is where the comment is found as 241 // well. The begin location is not inside a macro, so it's spelling 242 // location is the same. 243 // 244 // In the second case, the begin location of the decl is the call to the 245 // macro, at `MAKE_METHOD`. However its spelling location is inside the 246 // the macro at the location of `void`. This is where the comment is found 247 // again. 248 // 249 // In the third case, there's no correct single behaviour. We want to use 250 // the comment outside the macro for the definition that's inside the macro. 251 // There is also a definition outside the macro, and we want the comment to 252 // apply to both. The cases we care about here is NS_ENUM() and 253 // NS_OPTIONS(). In general, if an enum is defined inside a macro, we should 254 // try to find the comment there. 255 256 // This is handling case 3 for NS_ENUM() and NS_OPTIONS(), which define 257 // enum types inside the macro. 258 if (isa<EnumDecl>(D)) { 259 SourceLocation MacroCallLoc = SourceMgr.getExpansionLoc(DeclLoc); 260 if (auto BufferRef = 261 SourceMgr.getBufferOrNone(SourceMgr.getFileID(MacroCallLoc)); 262 BufferRef.has_value()) { 263 llvm::StringRef buffer = BufferRef->getBuffer().substr( 264 SourceMgr.getFileOffset(MacroCallLoc)); 265 if (buffer.starts_with("NS_ENUM(") || 266 buffer.starts_with("NS_OPTIONS(")) { 267 // We want to use the comment on the call to NS_ENUM and NS_OPTIONS 268 // macros for the types defined inside the macros, which is at the 269 // expansion location. 270 return MacroCallLoc; 271 } 272 } 273 } 274 return SourceMgr.getSpellingLoc(D->getBeginLoc()); 275 } 276 277 return DeclLoc; 278 } 279 280 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 281 const Decl *D, const SourceLocation RepresentativeLocForDecl, 282 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 283 // If the declaration doesn't map directly to a location in a file, we 284 // can't find the comment. 285 if (RepresentativeLocForDecl.isInvalid() || 286 !RepresentativeLocForDecl.isFileID()) 287 return nullptr; 288 289 // If there are no comments anywhere, we won't find anything. 290 if (CommentsInTheFile.empty()) 291 return nullptr; 292 293 // Decompose the location for the declaration and find the beginning of the 294 // file buffer. 295 const std::pair<FileID, unsigned> DeclLocDecomp = 296 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 297 298 // Slow path. 299 auto OffsetCommentBehindDecl = 300 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 301 302 // First check whether we have a trailing comment. 303 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 304 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 305 if ((CommentBehindDecl->isDocumentation() || 306 LangOpts.CommentOpts.ParseAllComments) && 307 CommentBehindDecl->isTrailingComment() && 308 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 309 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 310 311 // Check that Doxygen trailing comment comes after the declaration, starts 312 // on the same line and in the same file as the declaration. 313 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 314 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 315 OffsetCommentBehindDecl->first)) { 316 return CommentBehindDecl; 317 } 318 } 319 } 320 321 // The comment just after the declaration was not a trailing comment. 322 // Let's look at the previous comment. 323 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 324 return nullptr; 325 326 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 327 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 328 329 // Check that we actually have a non-member Doxygen comment. 330 if (!(CommentBeforeDecl->isDocumentation() || 331 LangOpts.CommentOpts.ParseAllComments) || 332 CommentBeforeDecl->isTrailingComment()) 333 return nullptr; 334 335 // Decompose the end of the comment. 336 const unsigned CommentEndOffset = 337 Comments.getCommentEndOffset(CommentBeforeDecl); 338 339 // Get the corresponding buffer. 340 bool Invalid = false; 341 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 342 &Invalid).data(); 343 if (Invalid) 344 return nullptr; 345 346 // Extract text between the comment and declaration. 347 StringRef Text(Buffer + CommentEndOffset, 348 DeclLocDecomp.second - CommentEndOffset); 349 350 // There should be no other declarations or preprocessor directives between 351 // comment and declaration. 352 if (Text.find_last_of(";{}#@") != StringRef::npos) 353 return nullptr; 354 355 return CommentBeforeDecl; 356 } 357 358 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 359 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 360 361 // If the declaration doesn't map directly to a location in a file, we 362 // can't find the comment. 363 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 364 return nullptr; 365 366 if (ExternalSource && !CommentsLoaded) { 367 ExternalSource->ReadComments(); 368 CommentsLoaded = true; 369 } 370 371 if (Comments.empty()) 372 return nullptr; 373 374 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 375 if (!File.isValid()) { 376 return nullptr; 377 } 378 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 379 if (!CommentsInThisFile || CommentsInThisFile->empty()) 380 return nullptr; 381 382 return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile); 383 } 384 385 void ASTContext::addComment(const RawComment &RC) { 386 assert(LangOpts.RetainCommentsFromSystemHeaders || 387 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 388 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 389 } 390 391 /// If we have a 'templated' declaration for a template, adjust 'D' to 392 /// refer to the actual template. 393 /// If we have an implicit instantiation, adjust 'D' to refer to template. 394 static const Decl &adjustDeclToTemplate(const Decl &D) { 395 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 396 // Is this function declaration part of a function template? 397 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 398 return *FTD; 399 400 // Nothing to do if function is not an implicit instantiation. 401 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 402 return D; 403 404 // Function is an implicit instantiation of a function template? 405 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 406 return *FTD; 407 408 // Function is instantiated from a member definition of a class template? 409 if (const FunctionDecl *MemberDecl = 410 FD->getInstantiatedFromMemberFunction()) 411 return *MemberDecl; 412 413 return D; 414 } 415 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 416 // Static data member is instantiated from a member definition of a class 417 // template? 418 if (VD->isStaticDataMember()) 419 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 420 return *MemberDecl; 421 422 return D; 423 } 424 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 425 // Is this class declaration part of a class template? 426 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 427 return *CTD; 428 429 // Class is an implicit instantiation of a class template or partial 430 // specialization? 431 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 432 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 433 return D; 434 llvm::PointerUnion<ClassTemplateDecl *, 435 ClassTemplatePartialSpecializationDecl *> 436 PU = CTSD->getSpecializedTemplateOrPartial(); 437 return PU.is<ClassTemplateDecl *>() 438 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 439 : *static_cast<const Decl *>( 440 PU.get<ClassTemplatePartialSpecializationDecl *>()); 441 } 442 443 // Class is instantiated from a member definition of a class template? 444 if (const MemberSpecializationInfo *Info = 445 CRD->getMemberSpecializationInfo()) 446 return *Info->getInstantiatedFrom(); 447 448 return D; 449 } 450 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 451 // Enum is instantiated from a member definition of a class template? 452 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 453 return *MemberDecl; 454 455 return D; 456 } 457 // FIXME: Adjust alias templates? 458 return D; 459 } 460 461 const RawComment *ASTContext::getRawCommentForAnyRedecl( 462 const Decl *D, 463 const Decl **OriginalDecl) const { 464 if (!D) { 465 if (OriginalDecl) 466 OriginalDecl = nullptr; 467 return nullptr; 468 } 469 470 D = &adjustDeclToTemplate(*D); 471 472 // Any comment directly attached to D? 473 { 474 auto DeclComment = DeclRawComments.find(D); 475 if (DeclComment != DeclRawComments.end()) { 476 if (OriginalDecl) 477 *OriginalDecl = D; 478 return DeclComment->second; 479 } 480 } 481 482 // Any comment attached to any redeclaration of D? 483 const Decl *CanonicalD = D->getCanonicalDecl(); 484 if (!CanonicalD) 485 return nullptr; 486 487 { 488 auto RedeclComment = RedeclChainComments.find(CanonicalD); 489 if (RedeclComment != RedeclChainComments.end()) { 490 if (OriginalDecl) 491 *OriginalDecl = RedeclComment->second; 492 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 493 assert(CommentAtRedecl != DeclRawComments.end() && 494 "This decl is supposed to have comment attached."); 495 return CommentAtRedecl->second; 496 } 497 } 498 499 // Any redeclarations of D that we haven't checked for comments yet? 500 // We can't use DenseMap::iterator directly since it'd get invalid. 501 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 502 return CommentlessRedeclChains.lookup(CanonicalD); 503 }(); 504 505 for (const auto Redecl : D->redecls()) { 506 assert(Redecl); 507 // Skip all redeclarations that have been checked previously. 508 if (LastCheckedRedecl) { 509 if (LastCheckedRedecl == Redecl) { 510 LastCheckedRedecl = nullptr; 511 } 512 continue; 513 } 514 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 515 if (RedeclComment) { 516 cacheRawCommentForDecl(*Redecl, *RedeclComment); 517 if (OriginalDecl) 518 *OriginalDecl = Redecl; 519 return RedeclComment; 520 } 521 CommentlessRedeclChains[CanonicalD] = Redecl; 522 } 523 524 if (OriginalDecl) 525 *OriginalDecl = nullptr; 526 return nullptr; 527 } 528 529 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 530 const RawComment &Comment) const { 531 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 532 DeclRawComments.try_emplace(&OriginalD, &Comment); 533 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 534 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 535 CommentlessRedeclChains.erase(CanonicalDecl); 536 } 537 538 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 539 SmallVectorImpl<const NamedDecl *> &Redeclared) { 540 const DeclContext *DC = ObjCMethod->getDeclContext(); 541 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 542 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 543 if (!ID) 544 return; 545 // Add redeclared method here. 546 for (const auto *Ext : ID->known_extensions()) { 547 if (ObjCMethodDecl *RedeclaredMethod = 548 Ext->getMethod(ObjCMethod->getSelector(), 549 ObjCMethod->isInstanceMethod())) 550 Redeclared.push_back(RedeclaredMethod); 551 } 552 } 553 } 554 555 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 556 const Preprocessor *PP) { 557 if (Comments.empty() || Decls.empty()) 558 return; 559 560 FileID File; 561 for (Decl *D : Decls) { 562 SourceLocation Loc = D->getLocation(); 563 if (Loc.isValid()) { 564 // See if there are any new comments that are not attached to a decl. 565 // The location doesn't have to be precise - we care only about the file. 566 File = SourceMgr.getDecomposedLoc(Loc).first; 567 break; 568 } 569 } 570 571 if (File.isInvalid()) 572 return; 573 574 auto CommentsInThisFile = Comments.getCommentsInFile(File); 575 if (!CommentsInThisFile || CommentsInThisFile->empty() || 576 CommentsInThisFile->rbegin()->second->isAttached()) 577 return; 578 579 // There is at least one comment not attached to a decl. 580 // Maybe it should be attached to one of Decls? 581 // 582 // Note that this way we pick up not only comments that precede the 583 // declaration, but also comments that *follow* the declaration -- thanks to 584 // the lookahead in the lexer: we've consumed the semicolon and looked 585 // ahead through comments. 586 587 for (const Decl *D : Decls) { 588 assert(D); 589 if (D->isInvalidDecl()) 590 continue; 591 592 D = &adjustDeclToTemplate(*D); 593 594 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 595 596 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 597 continue; 598 599 if (DeclRawComments.count(D) > 0) 600 continue; 601 602 if (RawComment *const DocComment = 603 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) { 604 cacheRawCommentForDecl(*D, *DocComment); 605 comments::FullComment *FC = DocComment->parse(*this, PP, D); 606 ParsedComments[D->getCanonicalDecl()] = FC; 607 } 608 } 609 } 610 611 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 612 const Decl *D) const { 613 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 614 ThisDeclInfo->CommentDecl = D; 615 ThisDeclInfo->IsFilled = false; 616 ThisDeclInfo->fill(); 617 ThisDeclInfo->CommentDecl = FC->getDecl(); 618 if (!ThisDeclInfo->TemplateParameters) 619 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 620 comments::FullComment *CFC = 621 new (*this) comments::FullComment(FC->getBlocks(), 622 ThisDeclInfo); 623 return CFC; 624 } 625 626 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 627 const RawComment *RC = getRawCommentForDeclNoCache(D); 628 return RC ? RC->parse(*this, nullptr, D) : nullptr; 629 } 630 631 comments::FullComment *ASTContext::getCommentForDecl( 632 const Decl *D, 633 const Preprocessor *PP) const { 634 if (!D || D->isInvalidDecl()) 635 return nullptr; 636 D = &adjustDeclToTemplate(*D); 637 638 const Decl *Canonical = D->getCanonicalDecl(); 639 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 640 ParsedComments.find(Canonical); 641 642 if (Pos != ParsedComments.end()) { 643 if (Canonical != D) { 644 comments::FullComment *FC = Pos->second; 645 comments::FullComment *CFC = cloneFullComment(FC, D); 646 return CFC; 647 } 648 return Pos->second; 649 } 650 651 const Decl *OriginalDecl = nullptr; 652 653 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 654 if (!RC) { 655 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 656 SmallVector<const NamedDecl*, 8> Overridden; 657 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 658 if (OMD && OMD->isPropertyAccessor()) 659 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 660 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 661 return cloneFullComment(FC, D); 662 if (OMD) 663 addRedeclaredMethods(OMD, Overridden); 664 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 665 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 666 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 667 return cloneFullComment(FC, D); 668 } 669 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 670 // Attach any tag type's documentation to its typedef if latter 671 // does not have one of its own. 672 QualType QT = TD->getUnderlyingType(); 673 if (const auto *TT = QT->getAs<TagType>()) 674 if (const Decl *TD = TT->getDecl()) 675 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 676 return cloneFullComment(FC, D); 677 } 678 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 679 while (IC->getSuperClass()) { 680 IC = IC->getSuperClass(); 681 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 682 return cloneFullComment(FC, D); 683 } 684 } 685 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 686 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 687 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 688 return cloneFullComment(FC, D); 689 } 690 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 691 if (!(RD = RD->getDefinition())) 692 return nullptr; 693 // Check non-virtual bases. 694 for (const auto &I : RD->bases()) { 695 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 696 continue; 697 QualType Ty = I.getType(); 698 if (Ty.isNull()) 699 continue; 700 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 701 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 702 continue; 703 704 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 705 return cloneFullComment(FC, D); 706 } 707 } 708 // Check virtual bases. 709 for (const auto &I : RD->vbases()) { 710 if (I.getAccessSpecifier() != AS_public) 711 continue; 712 QualType Ty = I.getType(); 713 if (Ty.isNull()) 714 continue; 715 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 716 if (!(VirtualBase= VirtualBase->getDefinition())) 717 continue; 718 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 719 return cloneFullComment(FC, D); 720 } 721 } 722 } 723 return nullptr; 724 } 725 726 // If the RawComment was attached to other redeclaration of this Decl, we 727 // should parse the comment in context of that other Decl. This is important 728 // because comments can contain references to parameter names which can be 729 // different across redeclarations. 730 if (D != OriginalDecl && OriginalDecl) 731 return getCommentForDecl(OriginalDecl, PP); 732 733 comments::FullComment *FC = RC->parse(*this, PP, D); 734 ParsedComments[Canonical] = FC; 735 return FC; 736 } 737 738 void 739 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 740 const ASTContext &C, 741 TemplateTemplateParmDecl *Parm) { 742 ID.AddInteger(Parm->getDepth()); 743 ID.AddInteger(Parm->getPosition()); 744 ID.AddBoolean(Parm->isParameterPack()); 745 746 TemplateParameterList *Params = Parm->getTemplateParameters(); 747 ID.AddInteger(Params->size()); 748 for (TemplateParameterList::const_iterator P = Params->begin(), 749 PEnd = Params->end(); 750 P != PEnd; ++P) { 751 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 752 ID.AddInteger(0); 753 ID.AddBoolean(TTP->isParameterPack()); 754 if (TTP->isExpandedParameterPack()) { 755 ID.AddBoolean(true); 756 ID.AddInteger(TTP->getNumExpansionParameters()); 757 } else 758 ID.AddBoolean(false); 759 continue; 760 } 761 762 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 763 ID.AddInteger(1); 764 ID.AddBoolean(NTTP->isParameterPack()); 765 ID.AddPointer(C.getUnconstrainedType(C.getCanonicalType(NTTP->getType())) 766 .getAsOpaquePtr()); 767 if (NTTP->isExpandedParameterPack()) { 768 ID.AddBoolean(true); 769 ID.AddInteger(NTTP->getNumExpansionTypes()); 770 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 771 QualType T = NTTP->getExpansionType(I); 772 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 773 } 774 } else 775 ID.AddBoolean(false); 776 continue; 777 } 778 779 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 780 ID.AddInteger(2); 781 Profile(ID, C, TTP); 782 } 783 } 784 785 TemplateTemplateParmDecl * 786 ASTContext::getCanonicalTemplateTemplateParmDecl( 787 TemplateTemplateParmDecl *TTP) const { 788 // Check if we already have a canonical template template parameter. 789 llvm::FoldingSetNodeID ID; 790 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 791 void *InsertPos = nullptr; 792 CanonicalTemplateTemplateParm *Canonical 793 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 794 if (Canonical) 795 return Canonical->getParam(); 796 797 // Build a canonical template parameter list. 798 TemplateParameterList *Params = TTP->getTemplateParameters(); 799 SmallVector<NamedDecl *, 4> CanonParams; 800 CanonParams.reserve(Params->size()); 801 for (TemplateParameterList::const_iterator P = Params->begin(), 802 PEnd = Params->end(); 803 P != PEnd; ++P) { 804 // Note that, per C++20 [temp.over.link]/6, when determining whether 805 // template-parameters are equivalent, constraints are ignored. 806 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 807 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create( 808 *this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 809 TTP->getDepth(), TTP->getIndex(), nullptr, false, 810 TTP->isParameterPack(), /*HasTypeConstraint=*/false, 811 TTP->isExpandedParameterPack() 812 ? std::optional<unsigned>(TTP->getNumExpansionParameters()) 813 : std::nullopt); 814 CanonParams.push_back(NewTTP); 815 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 816 QualType T = getUnconstrainedType(getCanonicalType(NTTP->getType())); 817 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 818 NonTypeTemplateParmDecl *Param; 819 if (NTTP->isExpandedParameterPack()) { 820 SmallVector<QualType, 2> ExpandedTypes; 821 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 822 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 823 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 824 ExpandedTInfos.push_back( 825 getTrivialTypeSourceInfo(ExpandedTypes.back())); 826 } 827 828 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 829 SourceLocation(), 830 SourceLocation(), 831 NTTP->getDepth(), 832 NTTP->getPosition(), nullptr, 833 T, 834 TInfo, 835 ExpandedTypes, 836 ExpandedTInfos); 837 } else { 838 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 839 SourceLocation(), 840 SourceLocation(), 841 NTTP->getDepth(), 842 NTTP->getPosition(), nullptr, 843 T, 844 NTTP->isParameterPack(), 845 TInfo); 846 } 847 CanonParams.push_back(Param); 848 } else 849 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 850 cast<TemplateTemplateParmDecl>(*P))); 851 } 852 853 TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create( 854 *this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(), 855 TTP->getPosition(), TTP->isParameterPack(), nullptr, 856 TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(), 857 CanonParams, SourceLocation(), 858 /*RequiresClause=*/nullptr)); 859 860 // Get the new insert position for the node we care about. 861 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 862 assert(!Canonical && "Shouldn't be in the map!"); 863 (void)Canonical; 864 865 // Create the canonical template template parameter entry. 866 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 867 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 868 return CanonTTP; 869 } 870 871 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 872 auto Kind = getTargetInfo().getCXXABI().getKind(); 873 return getLangOpts().CXXABI.value_or(Kind); 874 } 875 876 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 877 if (!LangOpts.CPlusPlus) return nullptr; 878 879 switch (getCXXABIKind()) { 880 case TargetCXXABI::AppleARM64: 881 case TargetCXXABI::Fuchsia: 882 case TargetCXXABI::GenericARM: // Same as Itanium at this level 883 case TargetCXXABI::iOS: 884 case TargetCXXABI::WatchOS: 885 case TargetCXXABI::GenericAArch64: 886 case TargetCXXABI::GenericMIPS: 887 case TargetCXXABI::GenericItanium: 888 case TargetCXXABI::WebAssembly: 889 case TargetCXXABI::XL: 890 return CreateItaniumCXXABI(*this); 891 case TargetCXXABI::Microsoft: 892 return CreateMicrosoftCXXABI(*this); 893 } 894 llvm_unreachable("Invalid CXXABI type!"); 895 } 896 897 interp::Context &ASTContext::getInterpContext() { 898 if (!InterpContext) { 899 InterpContext.reset(new interp::Context(*this)); 900 } 901 return *InterpContext.get(); 902 } 903 904 ParentMapContext &ASTContext::getParentMapContext() { 905 if (!ParentMapCtx) 906 ParentMapCtx.reset(new ParentMapContext(*this)); 907 return *ParentMapCtx.get(); 908 } 909 910 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 911 const LangOptions &LangOpts) { 912 switch (LangOpts.getAddressSpaceMapMangling()) { 913 case LangOptions::ASMM_Target: 914 return TI.useAddressSpaceMapMangling(); 915 case LangOptions::ASMM_On: 916 return true; 917 case LangOptions::ASMM_Off: 918 return false; 919 } 920 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 921 } 922 923 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 924 IdentifierTable &idents, SelectorTable &sels, 925 Builtin::Context &builtins, TranslationUnitKind TUKind) 926 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize), 927 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize), 928 TemplateSpecializationTypes(this_()), 929 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 930 SubstTemplateTemplateParmPacks(this_()), 931 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), 932 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 933 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 934 LangOpts.XRayNeverInstrumentFiles, 935 LangOpts.XRayAttrListFiles, SM)), 936 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 937 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 938 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 939 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 940 CompCategories(this_()), LastSDM(nullptr, 0) { 941 addTranslationUnitDecl(); 942 } 943 944 void ASTContext::cleanup() { 945 // Release the DenseMaps associated with DeclContext objects. 946 // FIXME: Is this the ideal solution? 947 ReleaseDeclContextMaps(); 948 949 // Call all of the deallocation functions on all of their targets. 950 for (auto &Pair : Deallocations) 951 (Pair.first)(Pair.second); 952 Deallocations.clear(); 953 954 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 955 // because they can contain DenseMaps. 956 for (llvm::DenseMap<const ObjCContainerDecl*, 957 const ASTRecordLayout*>::iterator 958 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 959 // Increment in loop to prevent using deallocated memory. 960 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 961 R->Destroy(*this); 962 ObjCLayouts.clear(); 963 964 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 965 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 966 // Increment in loop to prevent using deallocated memory. 967 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 968 R->Destroy(*this); 969 } 970 ASTRecordLayouts.clear(); 971 972 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 973 AEnd = DeclAttrs.end(); 974 A != AEnd; ++A) 975 A->second->~AttrVec(); 976 DeclAttrs.clear(); 977 978 for (const auto &Value : ModuleInitializers) 979 Value.second->~PerModuleInitializers(); 980 ModuleInitializers.clear(); 981 } 982 983 ASTContext::~ASTContext() { cleanup(); } 984 985 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 986 TraversalScope = TopLevelDecls; 987 getParentMapContext().clear(); 988 } 989 990 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 991 Deallocations.push_back({Callback, Data}); 992 } 993 994 void 995 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 996 ExternalSource = std::move(Source); 997 } 998 999 void ASTContext::PrintStats() const { 1000 llvm::errs() << "\n*** AST Context Stats:\n"; 1001 llvm::errs() << " " << Types.size() << " types total.\n"; 1002 1003 unsigned counts[] = { 1004 #define TYPE(Name, Parent) 0, 1005 #define ABSTRACT_TYPE(Name, Parent) 1006 #include "clang/AST/TypeNodes.inc" 1007 0 // Extra 1008 }; 1009 1010 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 1011 Type *T = Types[i]; 1012 counts[(unsigned)T->getTypeClass()]++; 1013 } 1014 1015 unsigned Idx = 0; 1016 unsigned TotalBytes = 0; 1017 #define TYPE(Name, Parent) \ 1018 if (counts[Idx]) \ 1019 llvm::errs() << " " << counts[Idx] << " " << #Name \ 1020 << " types, " << sizeof(Name##Type) << " each " \ 1021 << "(" << counts[Idx] * sizeof(Name##Type) \ 1022 << " bytes)\n"; \ 1023 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 1024 ++Idx; 1025 #define ABSTRACT_TYPE(Name, Parent) 1026 #include "clang/AST/TypeNodes.inc" 1027 1028 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 1029 1030 // Implicit special member functions. 1031 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 1032 << NumImplicitDefaultConstructors 1033 << " implicit default constructors created\n"; 1034 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 1035 << NumImplicitCopyConstructors 1036 << " implicit copy constructors created\n"; 1037 if (getLangOpts().CPlusPlus) 1038 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 1039 << NumImplicitMoveConstructors 1040 << " implicit move constructors created\n"; 1041 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 1042 << NumImplicitCopyAssignmentOperators 1043 << " implicit copy assignment operators created\n"; 1044 if (getLangOpts().CPlusPlus) 1045 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 1046 << NumImplicitMoveAssignmentOperators 1047 << " implicit move assignment operators created\n"; 1048 llvm::errs() << NumImplicitDestructorsDeclared << "/" 1049 << NumImplicitDestructors 1050 << " implicit destructors created\n"; 1051 1052 if (ExternalSource) { 1053 llvm::errs() << "\n"; 1054 ExternalSource->PrintStats(); 1055 } 1056 1057 BumpAlloc.PrintStats(); 1058 } 1059 1060 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1061 bool NotifyListeners) { 1062 if (NotifyListeners) 1063 if (auto *Listener = getASTMutationListener()) 1064 Listener->RedefinedHiddenDefinition(ND, M); 1065 1066 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1067 } 1068 1069 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1070 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1071 if (It == MergedDefModules.end()) 1072 return; 1073 1074 auto &Merged = It->second; 1075 llvm::DenseSet<Module*> Found; 1076 for (Module *&M : Merged) 1077 if (!Found.insert(M).second) 1078 M = nullptr; 1079 llvm::erase_value(Merged, nullptr); 1080 } 1081 1082 ArrayRef<Module *> 1083 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1084 auto MergedIt = 1085 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1086 if (MergedIt == MergedDefModules.end()) 1087 return std::nullopt; 1088 return MergedIt->second; 1089 } 1090 1091 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1092 if (LazyInitializers.empty()) 1093 return; 1094 1095 auto *Source = Ctx.getExternalSource(); 1096 assert(Source && "lazy initializers but no external source"); 1097 1098 auto LazyInits = std::move(LazyInitializers); 1099 LazyInitializers.clear(); 1100 1101 for (auto ID : LazyInits) 1102 Initializers.push_back(Source->GetExternalDecl(ID)); 1103 1104 assert(LazyInitializers.empty() && 1105 "GetExternalDecl for lazy module initializer added more inits"); 1106 } 1107 1108 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1109 // One special case: if we add a module initializer that imports another 1110 // module, and that module's only initializer is an ImportDecl, simplify. 1111 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1112 auto It = ModuleInitializers.find(ID->getImportedModule()); 1113 1114 // Maybe the ImportDecl does nothing at all. (Common case.) 1115 if (It == ModuleInitializers.end()) 1116 return; 1117 1118 // Maybe the ImportDecl only imports another ImportDecl. 1119 auto &Imported = *It->second; 1120 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1121 Imported.resolve(*this); 1122 auto *OnlyDecl = Imported.Initializers.front(); 1123 if (isa<ImportDecl>(OnlyDecl)) 1124 D = OnlyDecl; 1125 } 1126 } 1127 1128 auto *&Inits = ModuleInitializers[M]; 1129 if (!Inits) 1130 Inits = new (*this) PerModuleInitializers; 1131 Inits->Initializers.push_back(D); 1132 } 1133 1134 void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { 1135 auto *&Inits = ModuleInitializers[M]; 1136 if (!Inits) 1137 Inits = new (*this) PerModuleInitializers; 1138 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1139 IDs.begin(), IDs.end()); 1140 } 1141 1142 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1143 auto It = ModuleInitializers.find(M); 1144 if (It == ModuleInitializers.end()) 1145 return std::nullopt; 1146 1147 auto *Inits = It->second; 1148 Inits->resolve(*this); 1149 return Inits->Initializers; 1150 } 1151 1152 void ASTContext::setCurrentNamedModule(Module *M) { 1153 assert(M->isModulePurview()); 1154 assert(!CurrentCXXNamedModule && 1155 "We should set named module for ASTContext for only once"); 1156 CurrentCXXNamedModule = M; 1157 } 1158 1159 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1160 if (!ExternCContext) 1161 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1162 1163 return ExternCContext; 1164 } 1165 1166 BuiltinTemplateDecl * 1167 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1168 const IdentifierInfo *II) const { 1169 auto *BuiltinTemplate = 1170 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1171 BuiltinTemplate->setImplicit(); 1172 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1173 1174 return BuiltinTemplate; 1175 } 1176 1177 BuiltinTemplateDecl * 1178 ASTContext::getMakeIntegerSeqDecl() const { 1179 if (!MakeIntegerSeqDecl) 1180 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1181 getMakeIntegerSeqName()); 1182 return MakeIntegerSeqDecl; 1183 } 1184 1185 BuiltinTemplateDecl * 1186 ASTContext::getTypePackElementDecl() const { 1187 if (!TypePackElementDecl) 1188 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1189 getTypePackElementName()); 1190 return TypePackElementDecl; 1191 } 1192 1193 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1194 RecordDecl::TagKind TK) const { 1195 SourceLocation Loc; 1196 RecordDecl *NewDecl; 1197 if (getLangOpts().CPlusPlus) 1198 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1199 Loc, &Idents.get(Name)); 1200 else 1201 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1202 &Idents.get(Name)); 1203 NewDecl->setImplicit(); 1204 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1205 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1206 return NewDecl; 1207 } 1208 1209 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1210 StringRef Name) const { 1211 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1212 TypedefDecl *NewDecl = TypedefDecl::Create( 1213 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1214 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1215 NewDecl->setImplicit(); 1216 return NewDecl; 1217 } 1218 1219 TypedefDecl *ASTContext::getInt128Decl() const { 1220 if (!Int128Decl) 1221 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1222 return Int128Decl; 1223 } 1224 1225 TypedefDecl *ASTContext::getUInt128Decl() const { 1226 if (!UInt128Decl) 1227 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1228 return UInt128Decl; 1229 } 1230 1231 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1232 auto *Ty = new (*this, TypeAlignment) BuiltinType(K); 1233 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1234 Types.push_back(Ty); 1235 } 1236 1237 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1238 const TargetInfo *AuxTarget) { 1239 assert((!this->Target || this->Target == &Target) && 1240 "Incorrect target reinitialization"); 1241 assert(VoidTy.isNull() && "Context reinitialized?"); 1242 1243 this->Target = &Target; 1244 this->AuxTarget = AuxTarget; 1245 1246 ABI.reset(createCXXABI(Target)); 1247 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1248 1249 // C99 6.2.5p19. 1250 InitBuiltinType(VoidTy, BuiltinType::Void); 1251 1252 // C99 6.2.5p2. 1253 InitBuiltinType(BoolTy, BuiltinType::Bool); 1254 // C99 6.2.5p3. 1255 if (LangOpts.CharIsSigned) 1256 InitBuiltinType(CharTy, BuiltinType::Char_S); 1257 else 1258 InitBuiltinType(CharTy, BuiltinType::Char_U); 1259 // C99 6.2.5p4. 1260 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1261 InitBuiltinType(ShortTy, BuiltinType::Short); 1262 InitBuiltinType(IntTy, BuiltinType::Int); 1263 InitBuiltinType(LongTy, BuiltinType::Long); 1264 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1265 1266 // C99 6.2.5p6. 1267 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1268 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1269 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1270 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1271 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1272 1273 // C99 6.2.5p10. 1274 InitBuiltinType(FloatTy, BuiltinType::Float); 1275 InitBuiltinType(DoubleTy, BuiltinType::Double); 1276 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1277 1278 // GNU extension, __float128 for IEEE quadruple precision 1279 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1280 1281 // __ibm128 for IBM extended precision 1282 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); 1283 1284 // C11 extension ISO/IEC TS 18661-3 1285 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1286 1287 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1288 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1289 InitBuiltinType(AccumTy, BuiltinType::Accum); 1290 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1291 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1292 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1293 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1294 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1295 InitBuiltinType(FractTy, BuiltinType::Fract); 1296 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1297 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1298 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1299 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1300 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1301 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1302 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1303 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1304 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1305 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1306 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1307 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1308 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1309 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1310 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1311 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1312 1313 // GNU extension, 128-bit integers. 1314 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1315 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1316 1317 // C++ 3.9.1p5 1318 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1319 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1320 else // -fshort-wchar makes wchar_t be unsigned. 1321 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1322 if (LangOpts.CPlusPlus && LangOpts.WChar) 1323 WideCharTy = WCharTy; 1324 else { 1325 // C99 (or C++ using -fno-wchar). 1326 WideCharTy = getFromTargetType(Target.getWCharType()); 1327 } 1328 1329 WIntTy = getFromTargetType(Target.getWIntType()); 1330 1331 // C++20 (proposed) 1332 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1333 1334 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1335 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1336 else // C99 1337 Char16Ty = getFromTargetType(Target.getChar16Type()); 1338 1339 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1340 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1341 else // C99 1342 Char32Ty = getFromTargetType(Target.getChar32Type()); 1343 1344 // Placeholder type for type-dependent expressions whose type is 1345 // completely unknown. No code should ever check a type against 1346 // DependentTy and users should never see it; however, it is here to 1347 // help diagnose failures to properly check for type-dependent 1348 // expressions. 1349 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1350 1351 // Placeholder type for functions. 1352 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1353 1354 // Placeholder type for bound members. 1355 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1356 1357 // Placeholder type for pseudo-objects. 1358 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1359 1360 // "any" type; useful for debugger-like clients. 1361 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1362 1363 // Placeholder type for unbridged ARC casts. 1364 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1365 1366 // Placeholder type for builtin functions. 1367 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1368 1369 // Placeholder type for OMP array sections. 1370 if (LangOpts.OpenMP) { 1371 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1372 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1373 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1374 } 1375 if (LangOpts.MatrixTypes) 1376 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1377 1378 // Builtin types for 'id', 'Class', and 'SEL'. 1379 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1380 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1381 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1382 1383 if (LangOpts.OpenCL) { 1384 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1385 InitBuiltinType(SingletonId, BuiltinType::Id); 1386 #include "clang/Basic/OpenCLImageTypes.def" 1387 1388 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1389 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1390 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1391 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1392 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1393 1394 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1395 InitBuiltinType(Id##Ty, BuiltinType::Id); 1396 #include "clang/Basic/OpenCLExtensionTypes.def" 1397 } 1398 1399 if (Target.hasAArch64SVETypes()) { 1400 #define SVE_TYPE(Name, Id, SingletonId) \ 1401 InitBuiltinType(SingletonId, BuiltinType::Id); 1402 #include "clang/Basic/AArch64SVEACLETypes.def" 1403 } 1404 1405 if (Target.getTriple().isPPC64()) { 1406 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1407 InitBuiltinType(Id##Ty, BuiltinType::Id); 1408 #include "clang/Basic/PPCTypes.def" 1409 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1410 InitBuiltinType(Id##Ty, BuiltinType::Id); 1411 #include "clang/Basic/PPCTypes.def" 1412 } 1413 1414 if (Target.hasRISCVVTypes()) { 1415 #define RVV_TYPE(Name, Id, SingletonId) \ 1416 InitBuiltinType(SingletonId, BuiltinType::Id); 1417 #include "clang/Basic/RISCVVTypes.def" 1418 } 1419 1420 if (Target.getTriple().isWasm() && Target.hasFeature("reference-types")) { 1421 #define WASM_TYPE(Name, Id, SingletonId) \ 1422 InitBuiltinType(SingletonId, BuiltinType::Id); 1423 #include "clang/Basic/WebAssemblyReferenceTypes.def" 1424 } 1425 1426 // Builtin type for __objc_yes and __objc_no 1427 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1428 SignedCharTy : BoolTy); 1429 1430 ObjCConstantStringType = QualType(); 1431 1432 ObjCSuperType = QualType(); 1433 1434 // void * type 1435 if (LangOpts.OpenCLGenericAddressSpace) { 1436 auto Q = VoidTy.getQualifiers(); 1437 Q.setAddressSpace(LangAS::opencl_generic); 1438 VoidPtrTy = getPointerType(getCanonicalType( 1439 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1440 } else { 1441 VoidPtrTy = getPointerType(VoidTy); 1442 } 1443 1444 // nullptr type (C++0x 2.14.7) 1445 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1446 1447 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1448 InitBuiltinType(HalfTy, BuiltinType::Half); 1449 1450 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1451 1452 // Builtin type used to help define __builtin_va_list. 1453 VaListTagDecl = nullptr; 1454 1455 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1456 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1457 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1458 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1459 } 1460 } 1461 1462 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1463 return SourceMgr.getDiagnostics(); 1464 } 1465 1466 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1467 AttrVec *&Result = DeclAttrs[D]; 1468 if (!Result) { 1469 void *Mem = Allocate(sizeof(AttrVec)); 1470 Result = new (Mem) AttrVec; 1471 } 1472 1473 return *Result; 1474 } 1475 1476 /// Erase the attributes corresponding to the given declaration. 1477 void ASTContext::eraseDeclAttrs(const Decl *D) { 1478 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1479 if (Pos != DeclAttrs.end()) { 1480 Pos->second->~AttrVec(); 1481 DeclAttrs.erase(Pos); 1482 } 1483 } 1484 1485 // FIXME: Remove ? 1486 MemberSpecializationInfo * 1487 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1488 assert(Var->isStaticDataMember() && "Not a static data member"); 1489 return getTemplateOrSpecializationInfo(Var) 1490 .dyn_cast<MemberSpecializationInfo *>(); 1491 } 1492 1493 ASTContext::TemplateOrSpecializationInfo 1494 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1495 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1496 TemplateOrInstantiation.find(Var); 1497 if (Pos == TemplateOrInstantiation.end()) 1498 return {}; 1499 1500 return Pos->second; 1501 } 1502 1503 void 1504 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1505 TemplateSpecializationKind TSK, 1506 SourceLocation PointOfInstantiation) { 1507 assert(Inst->isStaticDataMember() && "Not a static data member"); 1508 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1509 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1510 Tmpl, TSK, PointOfInstantiation)); 1511 } 1512 1513 void 1514 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1515 TemplateOrSpecializationInfo TSI) { 1516 assert(!TemplateOrInstantiation[Inst] && 1517 "Already noted what the variable was instantiated from"); 1518 TemplateOrInstantiation[Inst] = TSI; 1519 } 1520 1521 NamedDecl * 1522 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1523 return InstantiatedFromUsingDecl.lookup(UUD); 1524 } 1525 1526 void 1527 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1528 assert((isa<UsingDecl>(Pattern) || 1529 isa<UnresolvedUsingValueDecl>(Pattern) || 1530 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1531 "pattern decl is not a using decl"); 1532 assert((isa<UsingDecl>(Inst) || 1533 isa<UnresolvedUsingValueDecl>(Inst) || 1534 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1535 "instantiation did not produce a using decl"); 1536 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1537 InstantiatedFromUsingDecl[Inst] = Pattern; 1538 } 1539 1540 UsingEnumDecl * 1541 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1542 return InstantiatedFromUsingEnumDecl.lookup(UUD); 1543 } 1544 1545 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1546 UsingEnumDecl *Pattern) { 1547 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1548 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1549 } 1550 1551 UsingShadowDecl * 1552 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1553 return InstantiatedFromUsingShadowDecl.lookup(Inst); 1554 } 1555 1556 void 1557 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1558 UsingShadowDecl *Pattern) { 1559 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1560 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1561 } 1562 1563 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1564 return InstantiatedFromUnnamedFieldDecl.lookup(Field); 1565 } 1566 1567 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1568 FieldDecl *Tmpl) { 1569 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1570 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1571 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1572 "Already noted what unnamed field was instantiated from"); 1573 1574 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1575 } 1576 1577 ASTContext::overridden_cxx_method_iterator 1578 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1579 return overridden_methods(Method).begin(); 1580 } 1581 1582 ASTContext::overridden_cxx_method_iterator 1583 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1584 return overridden_methods(Method).end(); 1585 } 1586 1587 unsigned 1588 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1589 auto Range = overridden_methods(Method); 1590 return Range.end() - Range.begin(); 1591 } 1592 1593 ASTContext::overridden_method_range 1594 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1595 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1596 OverriddenMethods.find(Method->getCanonicalDecl()); 1597 if (Pos == OverriddenMethods.end()) 1598 return overridden_method_range(nullptr, nullptr); 1599 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1600 } 1601 1602 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1603 const CXXMethodDecl *Overridden) { 1604 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1605 OverriddenMethods[Method].push_back(Overridden); 1606 } 1607 1608 void ASTContext::getOverriddenMethods( 1609 const NamedDecl *D, 1610 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1611 assert(D); 1612 1613 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1614 Overridden.append(overridden_methods_begin(CXXMethod), 1615 overridden_methods_end(CXXMethod)); 1616 return; 1617 } 1618 1619 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1620 if (!Method) 1621 return; 1622 1623 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1624 Method->getOverriddenMethods(OverDecls); 1625 Overridden.append(OverDecls.begin(), OverDecls.end()); 1626 } 1627 1628 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1629 assert(!Import->getNextLocalImport() && 1630 "Import declaration already in the chain"); 1631 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1632 if (!FirstLocalImport) { 1633 FirstLocalImport = Import; 1634 LastLocalImport = Import; 1635 return; 1636 } 1637 1638 LastLocalImport->setNextLocalImport(Import); 1639 LastLocalImport = Import; 1640 } 1641 1642 //===----------------------------------------------------------------------===// 1643 // Type Sizing and Analysis 1644 //===----------------------------------------------------------------------===// 1645 1646 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1647 /// scalar floating point type. 1648 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1649 switch (T->castAs<BuiltinType>()->getKind()) { 1650 default: 1651 llvm_unreachable("Not a floating point type!"); 1652 case BuiltinType::BFloat16: 1653 return Target->getBFloat16Format(); 1654 case BuiltinType::Float16: 1655 return Target->getHalfFormat(); 1656 case BuiltinType::Half: 1657 // For HLSL, when the native half type is disabled, half will be treat as 1658 // float. 1659 if (getLangOpts().HLSL) 1660 if (getLangOpts().NativeHalfType) 1661 return Target->getHalfFormat(); 1662 else 1663 return Target->getFloatFormat(); 1664 else 1665 return Target->getHalfFormat(); 1666 case BuiltinType::Float: return Target->getFloatFormat(); 1667 case BuiltinType::Double: return Target->getDoubleFormat(); 1668 case BuiltinType::Ibm128: 1669 return Target->getIbm128Format(); 1670 case BuiltinType::LongDouble: 1671 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) 1672 return AuxTarget->getLongDoubleFormat(); 1673 return Target->getLongDoubleFormat(); 1674 case BuiltinType::Float128: 1675 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) 1676 return AuxTarget->getFloat128Format(); 1677 return Target->getFloat128Format(); 1678 } 1679 } 1680 1681 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1682 unsigned Align = Target->getCharWidth(); 1683 1684 bool UseAlignAttrOnly = false; 1685 if (unsigned AlignFromAttr = D->getMaxAlignment()) { 1686 Align = AlignFromAttr; 1687 1688 // __attribute__((aligned)) can increase or decrease alignment 1689 // *except* on a struct or struct member, where it only increases 1690 // alignment unless 'packed' is also specified. 1691 // 1692 // It is an error for alignas to decrease alignment, so we can 1693 // ignore that possibility; Sema should diagnose it. 1694 if (isa<FieldDecl>(D)) { 1695 UseAlignAttrOnly = D->hasAttr<PackedAttr>() || 1696 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1697 } else { 1698 UseAlignAttrOnly = true; 1699 } 1700 } 1701 else if (isa<FieldDecl>(D)) 1702 UseAlignAttrOnly = 1703 D->hasAttr<PackedAttr>() || 1704 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1705 1706 // If we're using the align attribute only, just ignore everything 1707 // else about the declaration and its type. 1708 if (UseAlignAttrOnly) { 1709 // do nothing 1710 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1711 QualType T = VD->getType(); 1712 if (const auto *RT = T->getAs<ReferenceType>()) { 1713 if (ForAlignof) 1714 T = RT->getPointeeType(); 1715 else 1716 T = getPointerType(RT->getPointeeType()); 1717 } 1718 QualType BaseT = getBaseElementType(T); 1719 if (T->isFunctionType()) 1720 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1721 else if (!BaseT->isIncompleteType()) { 1722 // Adjust alignments of declarations with array type by the 1723 // large-array alignment on the target. 1724 if (const ArrayType *arrayType = getAsArrayType(T)) { 1725 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1726 if (!ForAlignof && MinWidth) { 1727 if (isa<VariableArrayType>(arrayType)) 1728 Align = std::max(Align, Target->getLargeArrayAlign()); 1729 else if (isa<ConstantArrayType>(arrayType) && 1730 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1731 Align = std::max(Align, Target->getLargeArrayAlign()); 1732 } 1733 } 1734 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1735 if (BaseT.getQualifiers().hasUnaligned()) 1736 Align = Target->getCharWidth(); 1737 if (const auto *VD = dyn_cast<VarDecl>(D)) { 1738 if (VD->hasGlobalStorage() && !ForAlignof) { 1739 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 1740 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); 1741 } 1742 } 1743 } 1744 1745 // Fields can be subject to extra alignment constraints, like if 1746 // the field is packed, the struct is packed, or the struct has a 1747 // a max-field-alignment constraint (#pragma pack). So calculate 1748 // the actual alignment of the field within the struct, and then 1749 // (as we're expected to) constrain that by the alignment of the type. 1750 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1751 const RecordDecl *Parent = Field->getParent(); 1752 // We can only produce a sensible answer if the record is valid. 1753 if (!Parent->isInvalidDecl()) { 1754 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1755 1756 // Start with the record's overall alignment. 1757 unsigned FieldAlign = toBits(Layout.getAlignment()); 1758 1759 // Use the GCD of that and the offset within the record. 1760 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1761 if (Offset > 0) { 1762 // Alignment is always a power of 2, so the GCD will be a power of 2, 1763 // which means we get to do this crazy thing instead of Euclid's. 1764 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1765 if (LowBitOfOffset < FieldAlign) 1766 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1767 } 1768 1769 Align = std::min(Align, FieldAlign); 1770 } 1771 } 1772 } 1773 1774 // Some targets have hard limitation on the maximum requestable alignment in 1775 // aligned attribute for static variables. 1776 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1777 const auto *VD = dyn_cast<VarDecl>(D); 1778 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1779 Align = std::min(Align, MaxAlignedAttr); 1780 1781 return toCharUnitsFromBits(Align); 1782 } 1783 1784 CharUnits ASTContext::getExnObjectAlignment() const { 1785 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1786 } 1787 1788 // getTypeInfoDataSizeInChars - Return the size of a type, in 1789 // chars. If the type is a record, its data size is returned. This is 1790 // the size of the memcpy that's performed when assigning this type 1791 // using a trivial copy/move assignment operator. 1792 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1793 TypeInfoChars Info = getTypeInfoInChars(T); 1794 1795 // In C++, objects can sometimes be allocated into the tail padding 1796 // of a base-class subobject. We decide whether that's possible 1797 // during class layout, so here we can just trust the layout results. 1798 if (getLangOpts().CPlusPlus) { 1799 if (const auto *RT = T->getAs<RecordType>()) { 1800 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1801 Info.Width = layout.getDataSize(); 1802 } 1803 } 1804 1805 return Info; 1806 } 1807 1808 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1809 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1810 TypeInfoChars 1811 static getConstantArrayInfoInChars(const ASTContext &Context, 1812 const ConstantArrayType *CAT) { 1813 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1814 uint64_t Size = CAT->getSize().getZExtValue(); 1815 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1816 (uint64_t)(-1)/Size) && 1817 "Overflow in array type char size evaluation"); 1818 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1819 unsigned Align = EltInfo.Align.getQuantity(); 1820 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1821 Context.getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1822 Width = llvm::alignTo(Width, Align); 1823 return TypeInfoChars(CharUnits::fromQuantity(Width), 1824 CharUnits::fromQuantity(Align), 1825 EltInfo.AlignRequirement); 1826 } 1827 1828 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1829 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1830 return getConstantArrayInfoInChars(*this, CAT); 1831 TypeInfo Info = getTypeInfo(T); 1832 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1833 toCharUnitsFromBits(Info.Align), Info.AlignRequirement); 1834 } 1835 1836 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1837 return getTypeInfoInChars(T.getTypePtr()); 1838 } 1839 1840 bool ASTContext::isPromotableIntegerType(QualType T) const { 1841 // HLSL doesn't promote all small integer types to int, it 1842 // just uses the rank-based promotion rules for all types. 1843 if (getLangOpts().HLSL) 1844 return false; 1845 1846 if (const auto *BT = T->getAs<BuiltinType>()) 1847 switch (BT->getKind()) { 1848 case BuiltinType::Bool: 1849 case BuiltinType::Char_S: 1850 case BuiltinType::Char_U: 1851 case BuiltinType::SChar: 1852 case BuiltinType::UChar: 1853 case BuiltinType::Short: 1854 case BuiltinType::UShort: 1855 case BuiltinType::WChar_S: 1856 case BuiltinType::WChar_U: 1857 case BuiltinType::Char8: 1858 case BuiltinType::Char16: 1859 case BuiltinType::Char32: 1860 return true; 1861 default: 1862 return false; 1863 } 1864 1865 // Enumerated types are promotable to their compatible integer types 1866 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). 1867 if (const auto *ET = T->getAs<EnumType>()) { 1868 if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() || 1869 ET->getDecl()->isScoped()) 1870 return false; 1871 1872 return true; 1873 } 1874 1875 return false; 1876 } 1877 1878 bool ASTContext::isAlignmentRequired(const Type *T) const { 1879 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; 1880 } 1881 1882 bool ASTContext::isAlignmentRequired(QualType T) const { 1883 return isAlignmentRequired(T.getTypePtr()); 1884 } 1885 1886 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1887 bool NeedsPreferredAlignment) const { 1888 // An alignment on a typedef overrides anything else. 1889 if (const auto *TT = T->getAs<TypedefType>()) 1890 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1891 return Align; 1892 1893 // If we have an (array of) complete type, we're done. 1894 T = getBaseElementType(T); 1895 if (!T->isIncompleteType()) 1896 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1897 1898 // If we had an array type, its element type might be a typedef 1899 // type with an alignment attribute. 1900 if (const auto *TT = T->getAs<TypedefType>()) 1901 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1902 return Align; 1903 1904 // Otherwise, see if the declaration of the type had an attribute. 1905 if (const auto *TT = T->getAs<TagType>()) 1906 return TT->getDecl()->getMaxAlignment(); 1907 1908 return 0; 1909 } 1910 1911 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1912 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1913 if (I != MemoizedTypeInfo.end()) 1914 return I->second; 1915 1916 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1917 TypeInfo TI = getTypeInfoImpl(T); 1918 MemoizedTypeInfo[T] = TI; 1919 return TI; 1920 } 1921 1922 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1923 /// method does not work on incomplete types. 1924 /// 1925 /// FIXME: Pointers into different addr spaces could have different sizes and 1926 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1927 /// should take a QualType, &c. 1928 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1929 uint64_t Width = 0; 1930 unsigned Align = 8; 1931 AlignRequirementKind AlignRequirement = AlignRequirementKind::None; 1932 LangAS AS = LangAS::Default; 1933 switch (T->getTypeClass()) { 1934 #define TYPE(Class, Base) 1935 #define ABSTRACT_TYPE(Class, Base) 1936 #define NON_CANONICAL_TYPE(Class, Base) 1937 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1938 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1939 case Type::Class: \ 1940 assert(!T->isDependentType() && "should not see dependent types here"); \ 1941 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1942 #include "clang/AST/TypeNodes.inc" 1943 llvm_unreachable("Should not see dependent types"); 1944 1945 case Type::FunctionNoProto: 1946 case Type::FunctionProto: 1947 // GCC extension: alignof(function) = 32 bits 1948 Width = 0; 1949 Align = 32; 1950 break; 1951 1952 case Type::IncompleteArray: 1953 case Type::VariableArray: 1954 case Type::ConstantArray: { 1955 // Model non-constant sized arrays as size zero, but track the alignment. 1956 uint64_t Size = 0; 1957 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1958 Size = CAT->getSize().getZExtValue(); 1959 1960 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1961 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1962 "Overflow in array type bit size evaluation"); 1963 Width = EltInfo.Width * Size; 1964 Align = EltInfo.Align; 1965 AlignRequirement = EltInfo.AlignRequirement; 1966 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1967 getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1968 Width = llvm::alignTo(Width, Align); 1969 break; 1970 } 1971 1972 case Type::ExtVector: 1973 case Type::Vector: { 1974 const auto *VT = cast<VectorType>(T); 1975 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 1976 Width = VT->isExtVectorBoolType() ? VT->getNumElements() 1977 : EltInfo.Width * VT->getNumElements(); 1978 // Enforce at least byte size and alignment. 1979 Width = std::max<unsigned>(8, Width); 1980 Align = std::max<unsigned>(8, Width); 1981 1982 // If the alignment is not a power of 2, round up to the next power of 2. 1983 // This happens for non-power-of-2 length vectors. 1984 if (Align & (Align-1)) { 1985 Align = llvm::bit_ceil(Align); 1986 Width = llvm::alignTo(Width, Align); 1987 } 1988 // Adjust the alignment based on the target max. 1989 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 1990 if (TargetVectorAlign && TargetVectorAlign < Align) 1991 Align = TargetVectorAlign; 1992 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 1993 // Adjust the alignment for fixed-length SVE vectors. This is important 1994 // for non-power-of-2 vector lengths. 1995 Align = 128; 1996 else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 1997 // Adjust the alignment for fixed-length SVE predicates. 1998 Align = 16; 1999 else if (VT->getVectorKind() == VectorType::RVVFixedLengthDataVector) 2000 // Adjust the alignment for fixed-length RVV vectors. 2001 Align = std::min<unsigned>(64, Width); 2002 break; 2003 } 2004 2005 case Type::ConstantMatrix: { 2006 const auto *MT = cast<ConstantMatrixType>(T); 2007 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 2008 // The internal layout of a matrix value is implementation defined. 2009 // Initially be ABI compatible with arrays with respect to alignment and 2010 // size. 2011 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 2012 Align = ElementInfo.Align; 2013 break; 2014 } 2015 2016 case Type::Builtin: 2017 switch (cast<BuiltinType>(T)->getKind()) { 2018 default: llvm_unreachable("Unknown builtin type!"); 2019 case BuiltinType::Void: 2020 // GCC extension: alignof(void) = 8 bits. 2021 Width = 0; 2022 Align = 8; 2023 break; 2024 case BuiltinType::Bool: 2025 Width = Target->getBoolWidth(); 2026 Align = Target->getBoolAlign(); 2027 break; 2028 case BuiltinType::Char_S: 2029 case BuiltinType::Char_U: 2030 case BuiltinType::UChar: 2031 case BuiltinType::SChar: 2032 case BuiltinType::Char8: 2033 Width = Target->getCharWidth(); 2034 Align = Target->getCharAlign(); 2035 break; 2036 case BuiltinType::WChar_S: 2037 case BuiltinType::WChar_U: 2038 Width = Target->getWCharWidth(); 2039 Align = Target->getWCharAlign(); 2040 break; 2041 case BuiltinType::Char16: 2042 Width = Target->getChar16Width(); 2043 Align = Target->getChar16Align(); 2044 break; 2045 case BuiltinType::Char32: 2046 Width = Target->getChar32Width(); 2047 Align = Target->getChar32Align(); 2048 break; 2049 case BuiltinType::UShort: 2050 case BuiltinType::Short: 2051 Width = Target->getShortWidth(); 2052 Align = Target->getShortAlign(); 2053 break; 2054 case BuiltinType::UInt: 2055 case BuiltinType::Int: 2056 Width = Target->getIntWidth(); 2057 Align = Target->getIntAlign(); 2058 break; 2059 case BuiltinType::ULong: 2060 case BuiltinType::Long: 2061 Width = Target->getLongWidth(); 2062 Align = Target->getLongAlign(); 2063 break; 2064 case BuiltinType::ULongLong: 2065 case BuiltinType::LongLong: 2066 Width = Target->getLongLongWidth(); 2067 Align = Target->getLongLongAlign(); 2068 break; 2069 case BuiltinType::Int128: 2070 case BuiltinType::UInt128: 2071 Width = 128; 2072 Align = Target->getInt128Align(); 2073 break; 2074 case BuiltinType::ShortAccum: 2075 case BuiltinType::UShortAccum: 2076 case BuiltinType::SatShortAccum: 2077 case BuiltinType::SatUShortAccum: 2078 Width = Target->getShortAccumWidth(); 2079 Align = Target->getShortAccumAlign(); 2080 break; 2081 case BuiltinType::Accum: 2082 case BuiltinType::UAccum: 2083 case BuiltinType::SatAccum: 2084 case BuiltinType::SatUAccum: 2085 Width = Target->getAccumWidth(); 2086 Align = Target->getAccumAlign(); 2087 break; 2088 case BuiltinType::LongAccum: 2089 case BuiltinType::ULongAccum: 2090 case BuiltinType::SatLongAccum: 2091 case BuiltinType::SatULongAccum: 2092 Width = Target->getLongAccumWidth(); 2093 Align = Target->getLongAccumAlign(); 2094 break; 2095 case BuiltinType::ShortFract: 2096 case BuiltinType::UShortFract: 2097 case BuiltinType::SatShortFract: 2098 case BuiltinType::SatUShortFract: 2099 Width = Target->getShortFractWidth(); 2100 Align = Target->getShortFractAlign(); 2101 break; 2102 case BuiltinType::Fract: 2103 case BuiltinType::UFract: 2104 case BuiltinType::SatFract: 2105 case BuiltinType::SatUFract: 2106 Width = Target->getFractWidth(); 2107 Align = Target->getFractAlign(); 2108 break; 2109 case BuiltinType::LongFract: 2110 case BuiltinType::ULongFract: 2111 case BuiltinType::SatLongFract: 2112 case BuiltinType::SatULongFract: 2113 Width = Target->getLongFractWidth(); 2114 Align = Target->getLongFractAlign(); 2115 break; 2116 case BuiltinType::BFloat16: 2117 if (Target->hasBFloat16Type()) { 2118 Width = Target->getBFloat16Width(); 2119 Align = Target->getBFloat16Align(); 2120 } else if ((getLangOpts().SYCLIsDevice || 2121 (getLangOpts().OpenMP && 2122 getLangOpts().OpenMPIsTargetDevice)) && 2123 AuxTarget->hasBFloat16Type()) { 2124 Width = AuxTarget->getBFloat16Width(); 2125 Align = AuxTarget->getBFloat16Align(); 2126 } 2127 break; 2128 case BuiltinType::Float16: 2129 case BuiltinType::Half: 2130 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2131 !getLangOpts().OpenMPIsTargetDevice) { 2132 Width = Target->getHalfWidth(); 2133 Align = Target->getHalfAlign(); 2134 } else { 2135 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2136 "Expected OpenMP device compilation."); 2137 Width = AuxTarget->getHalfWidth(); 2138 Align = AuxTarget->getHalfAlign(); 2139 } 2140 break; 2141 case BuiltinType::Float: 2142 Width = Target->getFloatWidth(); 2143 Align = Target->getFloatAlign(); 2144 break; 2145 case BuiltinType::Double: 2146 Width = Target->getDoubleWidth(); 2147 Align = Target->getDoubleAlign(); 2148 break; 2149 case BuiltinType::Ibm128: 2150 Width = Target->getIbm128Width(); 2151 Align = Target->getIbm128Align(); 2152 break; 2153 case BuiltinType::LongDouble: 2154 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2155 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2156 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2157 Width = AuxTarget->getLongDoubleWidth(); 2158 Align = AuxTarget->getLongDoubleAlign(); 2159 } else { 2160 Width = Target->getLongDoubleWidth(); 2161 Align = Target->getLongDoubleAlign(); 2162 } 2163 break; 2164 case BuiltinType::Float128: 2165 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2166 !getLangOpts().OpenMPIsTargetDevice) { 2167 Width = Target->getFloat128Width(); 2168 Align = Target->getFloat128Align(); 2169 } else { 2170 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2171 "Expected OpenMP device compilation."); 2172 Width = AuxTarget->getFloat128Width(); 2173 Align = AuxTarget->getFloat128Align(); 2174 } 2175 break; 2176 case BuiltinType::NullPtr: 2177 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*) 2178 Width = Target->getPointerWidth(LangAS::Default); 2179 Align = Target->getPointerAlign(LangAS::Default); 2180 break; 2181 case BuiltinType::ObjCId: 2182 case BuiltinType::ObjCClass: 2183 case BuiltinType::ObjCSel: 2184 Width = Target->getPointerWidth(LangAS::Default); 2185 Align = Target->getPointerAlign(LangAS::Default); 2186 break; 2187 case BuiltinType::OCLSampler: 2188 case BuiltinType::OCLEvent: 2189 case BuiltinType::OCLClkEvent: 2190 case BuiltinType::OCLQueue: 2191 case BuiltinType::OCLReserveID: 2192 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2193 case BuiltinType::Id: 2194 #include "clang/Basic/OpenCLImageTypes.def" 2195 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2196 case BuiltinType::Id: 2197 #include "clang/Basic/OpenCLExtensionTypes.def" 2198 AS = Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 2199 Width = Target->getPointerWidth(AS); 2200 Align = Target->getPointerAlign(AS); 2201 break; 2202 // The SVE types are effectively target-specific. The length of an 2203 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2204 // of 128 bits. There is one predicate bit for each vector byte, so the 2205 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2206 // 2207 // Because the length is only known at runtime, we use a dummy value 2208 // of 0 for the static length. The alignment values are those defined 2209 // by the Procedure Call Standard for the Arm Architecture. 2210 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2211 IsSigned, IsFP, IsBF) \ 2212 case BuiltinType::Id: \ 2213 Width = 0; \ 2214 Align = 128; \ 2215 break; 2216 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2217 case BuiltinType::Id: \ 2218 Width = 0; \ 2219 Align = 16; \ 2220 break; 2221 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \ 2222 case BuiltinType::Id: \ 2223 Width = 0; \ 2224 Align = 16; \ 2225 break; 2226 #include "clang/Basic/AArch64SVEACLETypes.def" 2227 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2228 case BuiltinType::Id: \ 2229 Width = Size; \ 2230 Align = Size; \ 2231 break; 2232 #include "clang/Basic/PPCTypes.def" 2233 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2234 IsFP) \ 2235 case BuiltinType::Id: \ 2236 Width = 0; \ 2237 Align = ElBits; \ 2238 break; 2239 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2240 case BuiltinType::Id: \ 2241 Width = 0; \ 2242 Align = 8; \ 2243 break; 2244 #include "clang/Basic/RISCVVTypes.def" 2245 #define WASM_TYPE(Name, Id, SingletonId) \ 2246 case BuiltinType::Id: \ 2247 Width = 0; \ 2248 Align = 8; \ 2249 break; 2250 #include "clang/Basic/WebAssemblyReferenceTypes.def" 2251 } 2252 break; 2253 case Type::ObjCObjectPointer: 2254 Width = Target->getPointerWidth(LangAS::Default); 2255 Align = Target->getPointerAlign(LangAS::Default); 2256 break; 2257 case Type::BlockPointer: 2258 AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace(); 2259 Width = Target->getPointerWidth(AS); 2260 Align = Target->getPointerAlign(AS); 2261 break; 2262 case Type::LValueReference: 2263 case Type::RValueReference: 2264 // alignof and sizeof should never enter this code path here, so we go 2265 // the pointer route. 2266 AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace(); 2267 Width = Target->getPointerWidth(AS); 2268 Align = Target->getPointerAlign(AS); 2269 break; 2270 case Type::Pointer: 2271 AS = cast<PointerType>(T)->getPointeeType().getAddressSpace(); 2272 Width = Target->getPointerWidth(AS); 2273 Align = Target->getPointerAlign(AS); 2274 break; 2275 case Type::MemberPointer: { 2276 const auto *MPT = cast<MemberPointerType>(T); 2277 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2278 Width = MPI.Width; 2279 Align = MPI.Align; 2280 break; 2281 } 2282 case Type::Complex: { 2283 // Complex types have the same alignment as their elements, but twice the 2284 // size. 2285 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2286 Width = EltInfo.Width * 2; 2287 Align = EltInfo.Align; 2288 break; 2289 } 2290 case Type::ObjCObject: 2291 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2292 case Type::Adjusted: 2293 case Type::Decayed: 2294 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2295 case Type::ObjCInterface: { 2296 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2297 if (ObjCI->getDecl()->isInvalidDecl()) { 2298 Width = 8; 2299 Align = 8; 2300 break; 2301 } 2302 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2303 Width = toBits(Layout.getSize()); 2304 Align = toBits(Layout.getAlignment()); 2305 break; 2306 } 2307 case Type::BitInt: { 2308 const auto *EIT = cast<BitIntType>(T); 2309 Align = std::clamp<unsigned>(llvm::PowerOf2Ceil(EIT->getNumBits()), 2310 getCharWidth(), Target->getLongLongAlign()); 2311 Width = llvm::alignTo(EIT->getNumBits(), Align); 2312 break; 2313 } 2314 case Type::Record: 2315 case Type::Enum: { 2316 const auto *TT = cast<TagType>(T); 2317 2318 if (TT->getDecl()->isInvalidDecl()) { 2319 Width = 8; 2320 Align = 8; 2321 break; 2322 } 2323 2324 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2325 const EnumDecl *ED = ET->getDecl(); 2326 TypeInfo Info = 2327 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2328 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2329 Info.Align = AttrAlign; 2330 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; 2331 } 2332 return Info; 2333 } 2334 2335 const auto *RT = cast<RecordType>(TT); 2336 const RecordDecl *RD = RT->getDecl(); 2337 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2338 Width = toBits(Layout.getSize()); 2339 Align = toBits(Layout.getAlignment()); 2340 AlignRequirement = RD->hasAttr<AlignedAttr>() 2341 ? AlignRequirementKind::RequiredByRecord 2342 : AlignRequirementKind::None; 2343 break; 2344 } 2345 2346 case Type::SubstTemplateTypeParm: 2347 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2348 getReplacementType().getTypePtr()); 2349 2350 case Type::Auto: 2351 case Type::DeducedTemplateSpecialization: { 2352 const auto *A = cast<DeducedType>(T); 2353 assert(!A->getDeducedType().isNull() && 2354 "cannot request the size of an undeduced or dependent auto type"); 2355 return getTypeInfo(A->getDeducedType().getTypePtr()); 2356 } 2357 2358 case Type::Paren: 2359 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2360 2361 case Type::MacroQualified: 2362 return getTypeInfo( 2363 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2364 2365 case Type::ObjCTypeParam: 2366 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2367 2368 case Type::Using: 2369 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr()); 2370 2371 case Type::Typedef: { 2372 const auto *TT = cast<TypedefType>(T); 2373 TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr()); 2374 // If the typedef has an aligned attribute on it, it overrides any computed 2375 // alignment we have. This violates the GCC documentation (which says that 2376 // attribute(aligned) can only round up) but matches its implementation. 2377 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) { 2378 Align = AttrAlign; 2379 AlignRequirement = AlignRequirementKind::RequiredByTypedef; 2380 } else { 2381 Align = Info.Align; 2382 AlignRequirement = Info.AlignRequirement; 2383 } 2384 Width = Info.Width; 2385 break; 2386 } 2387 2388 case Type::Elaborated: 2389 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2390 2391 case Type::Attributed: 2392 return getTypeInfo( 2393 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2394 2395 case Type::BTFTagAttributed: 2396 return getTypeInfo( 2397 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr()); 2398 2399 case Type::Atomic: { 2400 // Start with the base type information. 2401 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2402 Width = Info.Width; 2403 Align = Info.Align; 2404 2405 if (!Width) { 2406 // An otherwise zero-sized type should still generate an 2407 // atomic operation. 2408 Width = Target->getCharWidth(); 2409 assert(Align); 2410 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2411 // If the size of the type doesn't exceed the platform's max 2412 // atomic promotion width, make the size and alignment more 2413 // favorable to atomic operations: 2414 2415 // Round the size up to a power of 2. 2416 Width = llvm::bit_ceil(Width); 2417 2418 // Set the alignment equal to the size. 2419 Align = static_cast<unsigned>(Width); 2420 } 2421 } 2422 break; 2423 2424 case Type::Pipe: 2425 Width = Target->getPointerWidth(LangAS::opencl_global); 2426 Align = Target->getPointerAlign(LangAS::opencl_global); 2427 break; 2428 } 2429 2430 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2431 return TypeInfo(Width, Align, AlignRequirement); 2432 } 2433 2434 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2435 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2436 if (I != MemoizedUnadjustedAlign.end()) 2437 return I->second; 2438 2439 unsigned UnadjustedAlign; 2440 if (const auto *RT = T->getAs<RecordType>()) { 2441 const RecordDecl *RD = RT->getDecl(); 2442 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2443 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2444 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2445 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2446 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2447 } else { 2448 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2449 } 2450 2451 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2452 return UnadjustedAlign; 2453 } 2454 2455 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2456 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign( 2457 getTargetInfo().getTriple(), Target->getTargetOpts().FeatureMap); 2458 return SimdAlign; 2459 } 2460 2461 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2462 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2463 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2464 } 2465 2466 /// toBits - Convert a size in characters to a size in characters. 2467 int64_t ASTContext::toBits(CharUnits CharSize) const { 2468 return CharSize.getQuantity() * getCharWidth(); 2469 } 2470 2471 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2472 /// This method does not work on incomplete types. 2473 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2474 return getTypeInfoInChars(T).Width; 2475 } 2476 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2477 return getTypeInfoInChars(T).Width; 2478 } 2479 2480 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2481 /// characters. This method does not work on incomplete types. 2482 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2483 return toCharUnitsFromBits(getTypeAlign(T)); 2484 } 2485 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2486 return toCharUnitsFromBits(getTypeAlign(T)); 2487 } 2488 2489 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2490 /// type, in characters, before alignment adjustments. This method does 2491 /// not work on incomplete types. 2492 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2493 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2494 } 2495 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2496 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2497 } 2498 2499 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2500 /// type for the current target in bits. This can be different than the ABI 2501 /// alignment in cases where it is beneficial for performance or backwards 2502 /// compatibility preserving to overalign a data type. (Note: despite the name, 2503 /// the preferred alignment is ABI-impacting, and not an optimization.) 2504 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2505 TypeInfo TI = getTypeInfo(T); 2506 unsigned ABIAlign = TI.Align; 2507 2508 T = T->getBaseElementTypeUnsafe(); 2509 2510 // The preferred alignment of member pointers is that of a pointer. 2511 if (T->isMemberPointerType()) 2512 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2513 2514 if (!Target->allowsLargerPreferedTypeAlignment()) 2515 return ABIAlign; 2516 2517 if (const auto *RT = T->getAs<RecordType>()) { 2518 const RecordDecl *RD = RT->getDecl(); 2519 2520 // When used as part of a typedef, or together with a 'packed' attribute, 2521 // the 'aligned' attribute can be used to decrease alignment. Note that the 2522 // 'packed' case is already taken into consideration when computing the 2523 // alignment, we only need to handle the typedef case here. 2524 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || 2525 RD->isInvalidDecl()) 2526 return ABIAlign; 2527 2528 unsigned PreferredAlign = static_cast<unsigned>( 2529 toBits(getASTRecordLayout(RD).PreferredAlignment)); 2530 assert(PreferredAlign >= ABIAlign && 2531 "PreferredAlign should be at least as large as ABIAlign."); 2532 return PreferredAlign; 2533 } 2534 2535 // Double (and, for targets supporting AIX `power` alignment, long double) and 2536 // long long should be naturally aligned (despite requiring less alignment) if 2537 // possible. 2538 if (const auto *CT = T->getAs<ComplexType>()) 2539 T = CT->getElementType().getTypePtr(); 2540 if (const auto *ET = T->getAs<EnumType>()) 2541 T = ET->getDecl()->getIntegerType().getTypePtr(); 2542 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2543 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2544 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2545 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2546 Target->defaultsToAIXPowerAlignment())) 2547 // Don't increase the alignment if an alignment attribute was specified on a 2548 // typedef declaration. 2549 if (!TI.isAlignRequired()) 2550 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2551 2552 return ABIAlign; 2553 } 2554 2555 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2556 /// for __attribute__((aligned)) on this target, to be used if no alignment 2557 /// value is specified. 2558 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2559 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2560 } 2561 2562 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2563 /// to a global variable of the specified type. 2564 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 2565 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2566 return std::max(getPreferredTypeAlign(T), 2567 getTargetInfo().getMinGlobalAlign(TypeSize)); 2568 } 2569 2570 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2571 /// should be given to a global variable of the specified type. 2572 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 2573 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 2574 } 2575 2576 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2577 CharUnits Offset = CharUnits::Zero(); 2578 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2579 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2580 Offset += Layout->getBaseClassOffset(Base); 2581 Layout = &getASTRecordLayout(Base); 2582 } 2583 return Offset; 2584 } 2585 2586 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2587 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2588 CharUnits ThisAdjustment = CharUnits::Zero(); 2589 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2590 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2591 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2592 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2593 const CXXRecordDecl *Base = RD; 2594 const CXXRecordDecl *Derived = Path[I]; 2595 if (DerivedMember) 2596 std::swap(Base, Derived); 2597 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2598 RD = Path[I]; 2599 } 2600 if (DerivedMember) 2601 ThisAdjustment = -ThisAdjustment; 2602 return ThisAdjustment; 2603 } 2604 2605 /// DeepCollectObjCIvars - 2606 /// This routine first collects all declared, but not synthesized, ivars in 2607 /// super class and then collects all ivars, including those synthesized for 2608 /// current class. This routine is used for implementation of current class 2609 /// when all ivars, declared and synthesized are known. 2610 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2611 bool leafClass, 2612 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2613 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2614 DeepCollectObjCIvars(SuperClass, false, Ivars); 2615 if (!leafClass) { 2616 llvm::append_range(Ivars, OI->ivars()); 2617 } else { 2618 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2619 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2620 Iv= Iv->getNextIvar()) 2621 Ivars.push_back(Iv); 2622 } 2623 } 2624 2625 /// CollectInheritedProtocols - Collect all protocols in current class and 2626 /// those inherited by it. 2627 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2628 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2629 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2630 // We can use protocol_iterator here instead of 2631 // all_referenced_protocol_iterator since we are walking all categories. 2632 for (auto *Proto : OI->all_referenced_protocols()) { 2633 CollectInheritedProtocols(Proto, Protocols); 2634 } 2635 2636 // Categories of this Interface. 2637 for (const auto *Cat : OI->visible_categories()) 2638 CollectInheritedProtocols(Cat, Protocols); 2639 2640 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2641 while (SD) { 2642 CollectInheritedProtocols(SD, Protocols); 2643 SD = SD->getSuperClass(); 2644 } 2645 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2646 for (auto *Proto : OC->protocols()) { 2647 CollectInheritedProtocols(Proto, Protocols); 2648 } 2649 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2650 // Insert the protocol. 2651 if (!Protocols.insert( 2652 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2653 return; 2654 2655 for (auto *Proto : OP->protocols()) 2656 CollectInheritedProtocols(Proto, Protocols); 2657 } 2658 } 2659 2660 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2661 const RecordDecl *RD, 2662 bool CheckIfTriviallyCopyable) { 2663 assert(RD->isUnion() && "Must be union type"); 2664 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2665 2666 for (const auto *Field : RD->fields()) { 2667 if (!Context.hasUniqueObjectRepresentations(Field->getType(), 2668 CheckIfTriviallyCopyable)) 2669 return false; 2670 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2671 if (FieldSize != UnionSize) 2672 return false; 2673 } 2674 return !RD->field_empty(); 2675 } 2676 2677 static int64_t getSubobjectOffset(const FieldDecl *Field, 2678 const ASTContext &Context, 2679 const clang::ASTRecordLayout & /*Layout*/) { 2680 return Context.getFieldOffset(Field); 2681 } 2682 2683 static int64_t getSubobjectOffset(const CXXRecordDecl *RD, 2684 const ASTContext &Context, 2685 const clang::ASTRecordLayout &Layout) { 2686 return Context.toBits(Layout.getBaseClassOffset(RD)); 2687 } 2688 2689 static std::optional<int64_t> 2690 structHasUniqueObjectRepresentations(const ASTContext &Context, 2691 const RecordDecl *RD, 2692 bool CheckIfTriviallyCopyable); 2693 2694 static std::optional<int64_t> 2695 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context, 2696 bool CheckIfTriviallyCopyable) { 2697 if (Field->getType()->isRecordType()) { 2698 const RecordDecl *RD = Field->getType()->getAsRecordDecl(); 2699 if (!RD->isUnion()) 2700 return structHasUniqueObjectRepresentations(Context, RD, 2701 CheckIfTriviallyCopyable); 2702 } 2703 2704 // A _BitInt type may not be unique if it has padding bits 2705 // but if it is a bitfield the padding bits are not used. 2706 bool IsBitIntType = Field->getType()->isBitIntType(); 2707 if (!Field->getType()->isReferenceType() && !IsBitIntType && 2708 !Context.hasUniqueObjectRepresentations(Field->getType(), 2709 CheckIfTriviallyCopyable)) 2710 return std::nullopt; 2711 2712 int64_t FieldSizeInBits = 2713 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2714 if (Field->isBitField()) { 2715 // If we have explicit padding bits, they don't contribute bits 2716 // to the actual object representation, so return 0. 2717 if (Field->isUnnamedBitfield()) 2718 return 0; 2719 2720 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2721 if (IsBitIntType) { 2722 if ((unsigned)BitfieldSize > 2723 cast<BitIntType>(Field->getType())->getNumBits()) 2724 return std::nullopt; 2725 } else if (BitfieldSize > FieldSizeInBits) { 2726 return std::nullopt; 2727 } 2728 FieldSizeInBits = BitfieldSize; 2729 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations( 2730 Field->getType(), CheckIfTriviallyCopyable)) { 2731 return std::nullopt; 2732 } 2733 return FieldSizeInBits; 2734 } 2735 2736 static std::optional<int64_t> 2737 getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context, 2738 bool CheckIfTriviallyCopyable) { 2739 return structHasUniqueObjectRepresentations(Context, RD, 2740 CheckIfTriviallyCopyable); 2741 } 2742 2743 template <typename RangeT> 2744 static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( 2745 const RangeT &Subobjects, int64_t CurOffsetInBits, 2746 const ASTContext &Context, const clang::ASTRecordLayout &Layout, 2747 bool CheckIfTriviallyCopyable) { 2748 for (const auto *Subobject : Subobjects) { 2749 std::optional<int64_t> SizeInBits = 2750 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable); 2751 if (!SizeInBits) 2752 return std::nullopt; 2753 if (*SizeInBits != 0) { 2754 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); 2755 if (Offset != CurOffsetInBits) 2756 return std::nullopt; 2757 CurOffsetInBits += *SizeInBits; 2758 } 2759 } 2760 return CurOffsetInBits; 2761 } 2762 2763 static std::optional<int64_t> 2764 structHasUniqueObjectRepresentations(const ASTContext &Context, 2765 const RecordDecl *RD, 2766 bool CheckIfTriviallyCopyable) { 2767 assert(!RD->isUnion() && "Must be struct/class type"); 2768 const auto &Layout = Context.getASTRecordLayout(RD); 2769 2770 int64_t CurOffsetInBits = 0; 2771 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2772 if (ClassDecl->isDynamicClass()) 2773 return std::nullopt; 2774 2775 SmallVector<CXXRecordDecl *, 4> Bases; 2776 for (const auto &Base : ClassDecl->bases()) { 2777 // Empty types can be inherited from, and non-empty types can potentially 2778 // have tail padding, so just make sure there isn't an error. 2779 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); 2780 } 2781 2782 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { 2783 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); 2784 }); 2785 2786 std::optional<int64_t> OffsetAfterBases = 2787 structSubobjectsHaveUniqueObjectRepresentations( 2788 Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable); 2789 if (!OffsetAfterBases) 2790 return std::nullopt; 2791 CurOffsetInBits = *OffsetAfterBases; 2792 } 2793 2794 std::optional<int64_t> OffsetAfterFields = 2795 structSubobjectsHaveUniqueObjectRepresentations( 2796 RD->fields(), CurOffsetInBits, Context, Layout, 2797 CheckIfTriviallyCopyable); 2798 if (!OffsetAfterFields) 2799 return std::nullopt; 2800 CurOffsetInBits = *OffsetAfterFields; 2801 2802 return CurOffsetInBits; 2803 } 2804 2805 bool ASTContext::hasUniqueObjectRepresentations( 2806 QualType Ty, bool CheckIfTriviallyCopyable) const { 2807 // C++17 [meta.unary.prop]: 2808 // The predicate condition for a template specialization 2809 // has_unique_object_representations<T> shall be 2810 // satisfied if and only if: 2811 // (9.1) - T is trivially copyable, and 2812 // (9.2) - any two objects of type T with the same value have the same 2813 // object representation, where two objects 2814 // of array or non-union class type are considered to have the same value 2815 // if their respective sequences of 2816 // direct subobjects have the same values, and two objects of union type 2817 // are considered to have the same 2818 // value if they have the same active member and the corresponding members 2819 // have the same value. 2820 // The set of scalar types for which this condition holds is 2821 // implementation-defined. [ Note: If a type has padding 2822 // bits, the condition does not hold; otherwise, the condition holds true 2823 // for unsigned integral types. -- end note ] 2824 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2825 2826 // Arrays are unique only if their element type is unique. 2827 if (Ty->isArrayType()) 2828 return hasUniqueObjectRepresentations(getBaseElementType(Ty), 2829 CheckIfTriviallyCopyable); 2830 2831 // (9.1) - T is trivially copyable... 2832 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this)) 2833 return false; 2834 2835 // All integrals and enums are unique. 2836 if (Ty->isIntegralOrEnumerationType()) { 2837 // Except _BitInt types that have padding bits. 2838 if (const auto *BIT = Ty->getAs<BitIntType>()) 2839 return getTypeSize(BIT) == BIT->getNumBits(); 2840 2841 return true; 2842 } 2843 2844 // All other pointers are unique. 2845 if (Ty->isPointerType()) 2846 return true; 2847 2848 if (const auto *MPT = Ty->getAs<MemberPointerType>()) 2849 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2850 2851 if (Ty->isRecordType()) { 2852 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2853 2854 if (Record->isInvalidDecl()) 2855 return false; 2856 2857 if (Record->isUnion()) 2858 return unionHasUniqueObjectRepresentations(*this, Record, 2859 CheckIfTriviallyCopyable); 2860 2861 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations( 2862 *this, Record, CheckIfTriviallyCopyable); 2863 2864 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty)); 2865 } 2866 2867 // FIXME: More cases to handle here (list by rsmith): 2868 // vectors (careful about, eg, vector of 3 foo) 2869 // _Complex int and friends 2870 // _Atomic T 2871 // Obj-C block pointers 2872 // Obj-C object pointers 2873 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2874 // clk_event_t, queue_t, reserve_id_t) 2875 // There're also Obj-C class types and the Obj-C selector type, but I think it 2876 // makes sense for those to return false here. 2877 2878 return false; 2879 } 2880 2881 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2882 unsigned count = 0; 2883 // Count ivars declared in class extension. 2884 for (const auto *Ext : OI->known_extensions()) 2885 count += Ext->ivar_size(); 2886 2887 // Count ivar defined in this class's implementation. This 2888 // includes synthesized ivars. 2889 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2890 count += ImplDecl->ivar_size(); 2891 2892 return count; 2893 } 2894 2895 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2896 if (!E) 2897 return false; 2898 2899 // nullptr_t is always treated as null. 2900 if (E->getType()->isNullPtrType()) return true; 2901 2902 if (E->getType()->isAnyPointerType() && 2903 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2904 Expr::NPC_ValueDependentIsNull)) 2905 return true; 2906 2907 // Unfortunately, __null has type 'int'. 2908 if (isa<GNUNullExpr>(E)) return true; 2909 2910 return false; 2911 } 2912 2913 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2914 /// exists. 2915 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2916 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2917 I = ObjCImpls.find(D); 2918 if (I != ObjCImpls.end()) 2919 return cast<ObjCImplementationDecl>(I->second); 2920 return nullptr; 2921 } 2922 2923 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2924 /// exists. 2925 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2926 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2927 I = ObjCImpls.find(D); 2928 if (I != ObjCImpls.end()) 2929 return cast<ObjCCategoryImplDecl>(I->second); 2930 return nullptr; 2931 } 2932 2933 /// Set the implementation of ObjCInterfaceDecl. 2934 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2935 ObjCImplementationDecl *ImplD) { 2936 assert(IFaceD && ImplD && "Passed null params"); 2937 ObjCImpls[IFaceD] = ImplD; 2938 } 2939 2940 /// Set the implementation of ObjCCategoryDecl. 2941 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2942 ObjCCategoryImplDecl *ImplD) { 2943 assert(CatD && ImplD && "Passed null params"); 2944 ObjCImpls[CatD] = ImplD; 2945 } 2946 2947 const ObjCMethodDecl * 2948 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2949 return ObjCMethodRedecls.lookup(MD); 2950 } 2951 2952 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2953 const ObjCMethodDecl *Redecl) { 2954 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2955 ObjCMethodRedecls[MD] = Redecl; 2956 } 2957 2958 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2959 const NamedDecl *ND) const { 2960 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2961 return ID; 2962 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2963 return CD->getClassInterface(); 2964 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2965 return IMD->getClassInterface(); 2966 2967 return nullptr; 2968 } 2969 2970 /// Get the copy initialization expression of VarDecl, or nullptr if 2971 /// none exists. 2972 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2973 assert(VD && "Passed null params"); 2974 assert(VD->hasAttr<BlocksAttr>() && 2975 "getBlockVarCopyInits - not __block var"); 2976 auto I = BlockVarCopyInits.find(VD); 2977 if (I != BlockVarCopyInits.end()) 2978 return I->second; 2979 return {nullptr, false}; 2980 } 2981 2982 /// Set the copy initialization expression of a block var decl. 2983 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2984 bool CanThrow) { 2985 assert(VD && CopyExpr && "Passed null params"); 2986 assert(VD->hasAttr<BlocksAttr>() && 2987 "setBlockVarCopyInits - not __block var"); 2988 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2989 } 2990 2991 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2992 unsigned DataSize) const { 2993 if (!DataSize) 2994 DataSize = TypeLoc::getFullDataSizeForType(T); 2995 else 2996 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2997 "incorrect data size provided to CreateTypeSourceInfo!"); 2998 2999 auto *TInfo = 3000 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 3001 new (TInfo) TypeSourceInfo(T, DataSize); 3002 return TInfo; 3003 } 3004 3005 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 3006 SourceLocation L) const { 3007 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 3008 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 3009 return DI; 3010 } 3011 3012 const ASTRecordLayout & 3013 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 3014 return getObjCLayout(D, nullptr); 3015 } 3016 3017 const ASTRecordLayout & 3018 ASTContext::getASTObjCImplementationLayout( 3019 const ObjCImplementationDecl *D) const { 3020 return getObjCLayout(D->getClassInterface(), D); 3021 } 3022 3023 static auto getCanonicalTemplateArguments(const ASTContext &C, 3024 ArrayRef<TemplateArgument> Args, 3025 bool &AnyNonCanonArgs) { 3026 SmallVector<TemplateArgument, 16> CanonArgs(Args); 3027 for (auto &Arg : CanonArgs) { 3028 TemplateArgument OrigArg = Arg; 3029 Arg = C.getCanonicalTemplateArgument(Arg); 3030 AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg); 3031 } 3032 return CanonArgs; 3033 } 3034 3035 //===----------------------------------------------------------------------===// 3036 // Type creation/memoization methods 3037 //===----------------------------------------------------------------------===// 3038 3039 QualType 3040 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 3041 unsigned fastQuals = quals.getFastQualifiers(); 3042 quals.removeFastQualifiers(); 3043 3044 // Check if we've already instantiated this type. 3045 llvm::FoldingSetNodeID ID; 3046 ExtQuals::Profile(ID, baseType, quals); 3047 void *insertPos = nullptr; 3048 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 3049 assert(eq->getQualifiers() == quals); 3050 return QualType(eq, fastQuals); 3051 } 3052 3053 // If the base type is not canonical, make the appropriate canonical type. 3054 QualType canon; 3055 if (!baseType->isCanonicalUnqualified()) { 3056 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 3057 canonSplit.Quals.addConsistentQualifiers(quals); 3058 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 3059 3060 // Re-find the insert position. 3061 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 3062 } 3063 3064 auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); 3065 ExtQualNodes.InsertNode(eq, insertPos); 3066 return QualType(eq, fastQuals); 3067 } 3068 3069 QualType ASTContext::getAddrSpaceQualType(QualType T, 3070 LangAS AddressSpace) const { 3071 QualType CanT = getCanonicalType(T); 3072 if (CanT.getAddressSpace() == AddressSpace) 3073 return T; 3074 3075 // If we are composing extended qualifiers together, merge together 3076 // into one ExtQuals node. 3077 QualifierCollector Quals; 3078 const Type *TypeNode = Quals.strip(T); 3079 3080 // If this type already has an address space specified, it cannot get 3081 // another one. 3082 assert(!Quals.hasAddressSpace() && 3083 "Type cannot be in multiple addr spaces!"); 3084 Quals.addAddressSpace(AddressSpace); 3085 3086 return getExtQualType(TypeNode, Quals); 3087 } 3088 3089 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 3090 // If the type is not qualified with an address space, just return it 3091 // immediately. 3092 if (!T.hasAddressSpace()) 3093 return T; 3094 3095 // If we are composing extended qualifiers together, merge together 3096 // into one ExtQuals node. 3097 QualifierCollector Quals; 3098 const Type *TypeNode; 3099 3100 while (T.hasAddressSpace()) { 3101 TypeNode = Quals.strip(T); 3102 3103 // If the type no longer has an address space after stripping qualifiers, 3104 // jump out. 3105 if (!QualType(TypeNode, 0).hasAddressSpace()) 3106 break; 3107 3108 // There might be sugar in the way. Strip it and try again. 3109 T = T.getSingleStepDesugaredType(*this); 3110 } 3111 3112 Quals.removeAddressSpace(); 3113 3114 // Removal of the address space can mean there are no longer any 3115 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3116 // or required. 3117 if (Quals.hasNonFastQualifiers()) 3118 return getExtQualType(TypeNode, Quals); 3119 else 3120 return QualType(TypeNode, Quals.getFastQualifiers()); 3121 } 3122 3123 QualType ASTContext::getObjCGCQualType(QualType T, 3124 Qualifiers::GC GCAttr) const { 3125 QualType CanT = getCanonicalType(T); 3126 if (CanT.getObjCGCAttr() == GCAttr) 3127 return T; 3128 3129 if (const auto *ptr = T->getAs<PointerType>()) { 3130 QualType Pointee = ptr->getPointeeType(); 3131 if (Pointee->isAnyPointerType()) { 3132 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3133 return getPointerType(ResultType); 3134 } 3135 } 3136 3137 // If we are composing extended qualifiers together, merge together 3138 // into one ExtQuals node. 3139 QualifierCollector Quals; 3140 const Type *TypeNode = Quals.strip(T); 3141 3142 // If this type already has an ObjCGC specified, it cannot get 3143 // another one. 3144 assert(!Quals.hasObjCGCAttr() && 3145 "Type cannot have multiple ObjCGCs!"); 3146 Quals.addObjCGCAttr(GCAttr); 3147 3148 return getExtQualType(TypeNode, Quals); 3149 } 3150 3151 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3152 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3153 QualType Pointee = Ptr->getPointeeType(); 3154 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3155 return getPointerType(removeAddrSpaceQualType(Pointee)); 3156 } 3157 } 3158 return T; 3159 } 3160 3161 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3162 FunctionType::ExtInfo Info) { 3163 if (T->getExtInfo() == Info) 3164 return T; 3165 3166 QualType Result; 3167 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3168 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3169 } else { 3170 const auto *FPT = cast<FunctionProtoType>(T); 3171 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3172 EPI.ExtInfo = Info; 3173 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3174 } 3175 3176 return cast<FunctionType>(Result.getTypePtr()); 3177 } 3178 3179 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3180 QualType ResultType) { 3181 FD = FD->getMostRecentDecl(); 3182 while (true) { 3183 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3184 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3185 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3186 if (FunctionDecl *Next = FD->getPreviousDecl()) 3187 FD = Next; 3188 else 3189 break; 3190 } 3191 if (ASTMutationListener *L = getASTMutationListener()) 3192 L->DeducedReturnType(FD, ResultType); 3193 } 3194 3195 /// Get a function type and produce the equivalent function type with the 3196 /// specified exception specification. Type sugar that can be present on a 3197 /// declaration of a function with an exception specification is permitted 3198 /// and preserved. Other type sugar (for instance, typedefs) is not. 3199 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3200 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const { 3201 // Might have some parens. 3202 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3203 return getParenType( 3204 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3205 3206 // Might be wrapped in a macro qualified type. 3207 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3208 return getMacroQualifiedType( 3209 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3210 MQT->getMacroIdentifier()); 3211 3212 // Might have a calling-convention attribute. 3213 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3214 return getAttributedType( 3215 AT->getAttrKind(), 3216 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3217 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3218 3219 // Anything else must be a function type. Rebuild it with the new exception 3220 // specification. 3221 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3222 return getFunctionType( 3223 Proto->getReturnType(), Proto->getParamTypes(), 3224 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3225 } 3226 3227 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3228 QualType U) const { 3229 return hasSameType(T, U) || 3230 (getLangOpts().CPlusPlus17 && 3231 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3232 getFunctionTypeWithExceptionSpec(U, EST_None))); 3233 } 3234 3235 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3236 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3237 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3238 SmallVector<QualType, 16> Args(Proto->param_types().size()); 3239 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3240 Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]); 3241 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3242 } 3243 3244 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3245 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3246 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3247 } 3248 3249 return T; 3250 } 3251 3252 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3253 return hasSameType(T, U) || 3254 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3255 getFunctionTypeWithoutPtrSizes(U)); 3256 } 3257 3258 void ASTContext::adjustExceptionSpec( 3259 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3260 bool AsWritten) { 3261 // Update the type. 3262 QualType Updated = 3263 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3264 FD->setType(Updated); 3265 3266 if (!AsWritten) 3267 return; 3268 3269 // Update the type in the type source information too. 3270 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3271 // If the type and the type-as-written differ, we may need to update 3272 // the type-as-written too. 3273 if (TSInfo->getType() != FD->getType()) 3274 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3275 3276 // FIXME: When we get proper type location information for exceptions, 3277 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3278 // up the TypeSourceInfo; 3279 assert(TypeLoc::getFullDataSizeForType(Updated) == 3280 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3281 "TypeLoc size mismatch from updating exception specification"); 3282 TSInfo->overrideType(Updated); 3283 } 3284 } 3285 3286 /// getComplexType - Return the uniqued reference to the type for a complex 3287 /// number with the specified element type. 3288 QualType ASTContext::getComplexType(QualType T) const { 3289 // Unique pointers, to guarantee there is only one pointer of a particular 3290 // structure. 3291 llvm::FoldingSetNodeID ID; 3292 ComplexType::Profile(ID, T); 3293 3294 void *InsertPos = nullptr; 3295 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3296 return QualType(CT, 0); 3297 3298 // If the pointee type isn't canonical, this won't be a canonical type either, 3299 // so fill in the canonical type field. 3300 QualType Canonical; 3301 if (!T.isCanonical()) { 3302 Canonical = getComplexType(getCanonicalType(T)); 3303 3304 // Get the new insert position for the node we care about. 3305 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3306 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3307 } 3308 auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical); 3309 Types.push_back(New); 3310 ComplexTypes.InsertNode(New, InsertPos); 3311 return QualType(New, 0); 3312 } 3313 3314 /// getPointerType - Return the uniqued reference to the type for a pointer to 3315 /// the specified type. 3316 QualType ASTContext::getPointerType(QualType T) const { 3317 // Unique pointers, to guarantee there is only one pointer of a particular 3318 // structure. 3319 llvm::FoldingSetNodeID ID; 3320 PointerType::Profile(ID, T); 3321 3322 void *InsertPos = nullptr; 3323 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3324 return QualType(PT, 0); 3325 3326 // If the pointee type isn't canonical, this won't be a canonical type either, 3327 // so fill in the canonical type field. 3328 QualType Canonical; 3329 if (!T.isCanonical()) { 3330 Canonical = getPointerType(getCanonicalType(T)); 3331 3332 // Get the new insert position for the node we care about. 3333 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3334 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3335 } 3336 auto *New = new (*this, TypeAlignment) PointerType(T, Canonical); 3337 Types.push_back(New); 3338 PointerTypes.InsertNode(New, InsertPos); 3339 return QualType(New, 0); 3340 } 3341 3342 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3343 llvm::FoldingSetNodeID ID; 3344 AdjustedType::Profile(ID, Orig, New); 3345 void *InsertPos = nullptr; 3346 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3347 if (AT) 3348 return QualType(AT, 0); 3349 3350 QualType Canonical = getCanonicalType(New); 3351 3352 // Get the new insert position for the node we care about. 3353 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3354 assert(!AT && "Shouldn't be in the map!"); 3355 3356 AT = new (*this, TypeAlignment) 3357 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3358 Types.push_back(AT); 3359 AdjustedTypes.InsertNode(AT, InsertPos); 3360 return QualType(AT, 0); 3361 } 3362 3363 QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const { 3364 llvm::FoldingSetNodeID ID; 3365 AdjustedType::Profile(ID, Orig, Decayed); 3366 void *InsertPos = nullptr; 3367 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3368 if (AT) 3369 return QualType(AT, 0); 3370 3371 QualType Canonical = getCanonicalType(Decayed); 3372 3373 // Get the new insert position for the node we care about. 3374 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3375 assert(!AT && "Shouldn't be in the map!"); 3376 3377 AT = new (*this, TypeAlignment) DecayedType(Orig, Decayed, Canonical); 3378 Types.push_back(AT); 3379 AdjustedTypes.InsertNode(AT, InsertPos); 3380 return QualType(AT, 0); 3381 } 3382 3383 QualType ASTContext::getDecayedType(QualType T) const { 3384 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3385 3386 QualType Decayed; 3387 3388 // C99 6.7.5.3p7: 3389 // A declaration of a parameter as "array of type" shall be 3390 // adjusted to "qualified pointer to type", where the type 3391 // qualifiers (if any) are those specified within the [ and ] of 3392 // the array type derivation. 3393 if (T->isArrayType()) 3394 Decayed = getArrayDecayedType(T); 3395 3396 // C99 6.7.5.3p8: 3397 // A declaration of a parameter as "function returning type" 3398 // shall be adjusted to "pointer to function returning type", as 3399 // in 6.3.2.1. 3400 if (T->isFunctionType()) 3401 Decayed = getPointerType(T); 3402 3403 return getDecayedType(T, Decayed); 3404 } 3405 3406 /// getBlockPointerType - Return the uniqued reference to the type for 3407 /// a pointer to the specified block. 3408 QualType ASTContext::getBlockPointerType(QualType T) const { 3409 assert(T->isFunctionType() && "block of function types only"); 3410 // Unique pointers, to guarantee there is only one block of a particular 3411 // structure. 3412 llvm::FoldingSetNodeID ID; 3413 BlockPointerType::Profile(ID, T); 3414 3415 void *InsertPos = nullptr; 3416 if (BlockPointerType *PT = 3417 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3418 return QualType(PT, 0); 3419 3420 // If the block pointee type isn't canonical, this won't be a canonical 3421 // type either so fill in the canonical type field. 3422 QualType Canonical; 3423 if (!T.isCanonical()) { 3424 Canonical = getBlockPointerType(getCanonicalType(T)); 3425 3426 // Get the new insert position for the node we care about. 3427 BlockPointerType *NewIP = 3428 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3429 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3430 } 3431 auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical); 3432 Types.push_back(New); 3433 BlockPointerTypes.InsertNode(New, InsertPos); 3434 return QualType(New, 0); 3435 } 3436 3437 /// getLValueReferenceType - Return the uniqued reference to the type for an 3438 /// lvalue reference to the specified type. 3439 QualType 3440 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3441 assert((!T->isPlaceholderType() || 3442 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3443 "Unresolved placeholder type"); 3444 3445 // Unique pointers, to guarantee there is only one pointer of a particular 3446 // structure. 3447 llvm::FoldingSetNodeID ID; 3448 ReferenceType::Profile(ID, T, SpelledAsLValue); 3449 3450 void *InsertPos = nullptr; 3451 if (LValueReferenceType *RT = 3452 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3453 return QualType(RT, 0); 3454 3455 const auto *InnerRef = T->getAs<ReferenceType>(); 3456 3457 // If the referencee type isn't canonical, this won't be a canonical type 3458 // either, so fill in the canonical type field. 3459 QualType Canonical; 3460 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3461 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3462 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3463 3464 // Get the new insert position for the node we care about. 3465 LValueReferenceType *NewIP = 3466 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3467 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3468 } 3469 3470 auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, 3471 SpelledAsLValue); 3472 Types.push_back(New); 3473 LValueReferenceTypes.InsertNode(New, InsertPos); 3474 3475 return QualType(New, 0); 3476 } 3477 3478 /// getRValueReferenceType - Return the uniqued reference to the type for an 3479 /// rvalue reference to the specified type. 3480 QualType ASTContext::getRValueReferenceType(QualType T) const { 3481 assert((!T->isPlaceholderType() || 3482 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3483 "Unresolved placeholder type"); 3484 3485 // Unique pointers, to guarantee there is only one pointer of a particular 3486 // structure. 3487 llvm::FoldingSetNodeID ID; 3488 ReferenceType::Profile(ID, T, false); 3489 3490 void *InsertPos = nullptr; 3491 if (RValueReferenceType *RT = 3492 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3493 return QualType(RT, 0); 3494 3495 const auto *InnerRef = T->getAs<ReferenceType>(); 3496 3497 // If the referencee type isn't canonical, this won't be a canonical type 3498 // either, so fill in the canonical type field. 3499 QualType Canonical; 3500 if (InnerRef || !T.isCanonical()) { 3501 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3502 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3503 3504 // Get the new insert position for the node we care about. 3505 RValueReferenceType *NewIP = 3506 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3507 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3508 } 3509 3510 auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); 3511 Types.push_back(New); 3512 RValueReferenceTypes.InsertNode(New, InsertPos); 3513 return QualType(New, 0); 3514 } 3515 3516 /// getMemberPointerType - Return the uniqued reference to the type for a 3517 /// member pointer to the specified type, in the specified class. 3518 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3519 // Unique pointers, to guarantee there is only one pointer of a particular 3520 // structure. 3521 llvm::FoldingSetNodeID ID; 3522 MemberPointerType::Profile(ID, T, Cls); 3523 3524 void *InsertPos = nullptr; 3525 if (MemberPointerType *PT = 3526 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3527 return QualType(PT, 0); 3528 3529 // If the pointee or class type isn't canonical, this won't be a canonical 3530 // type either, so fill in the canonical type field. 3531 QualType Canonical; 3532 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3533 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3534 3535 // Get the new insert position for the node we care about. 3536 MemberPointerType *NewIP = 3537 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3538 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3539 } 3540 auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); 3541 Types.push_back(New); 3542 MemberPointerTypes.InsertNode(New, InsertPos); 3543 return QualType(New, 0); 3544 } 3545 3546 /// getConstantArrayType - Return the unique reference to the type for an 3547 /// array of the specified element type. 3548 QualType ASTContext::getConstantArrayType(QualType EltTy, 3549 const llvm::APInt &ArySizeIn, 3550 const Expr *SizeExpr, 3551 ArrayType::ArraySizeModifier ASM, 3552 unsigned IndexTypeQuals) const { 3553 assert((EltTy->isDependentType() || 3554 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3555 "Constant array of VLAs is illegal!"); 3556 3557 // We only need the size as part of the type if it's instantiation-dependent. 3558 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3559 SizeExpr = nullptr; 3560 3561 // Convert the array size into a canonical width matching the pointer size for 3562 // the target. 3563 llvm::APInt ArySize(ArySizeIn); 3564 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3565 3566 llvm::FoldingSetNodeID ID; 3567 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, 3568 IndexTypeQuals); 3569 3570 void *InsertPos = nullptr; 3571 if (ConstantArrayType *ATP = 3572 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3573 return QualType(ATP, 0); 3574 3575 // If the element type isn't canonical or has qualifiers, or the array bound 3576 // is instantiation-dependent, this won't be a canonical type either, so fill 3577 // in the canonical type field. 3578 QualType Canon; 3579 // FIXME: Check below should look for qualifiers behind sugar. 3580 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3581 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3582 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3583 ASM, IndexTypeQuals); 3584 Canon = getQualifiedType(Canon, canonSplit.Quals); 3585 3586 // Get the new insert position for the node we care about. 3587 ConstantArrayType *NewIP = 3588 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3589 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3590 } 3591 3592 void *Mem = Allocate( 3593 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), 3594 TypeAlignment); 3595 auto *New = new (Mem) 3596 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); 3597 ConstantArrayTypes.InsertNode(New, InsertPos); 3598 Types.push_back(New); 3599 return QualType(New, 0); 3600 } 3601 3602 /// getVariableArrayDecayedType - Turns the given type, which may be 3603 /// variably-modified, into the corresponding type with all the known 3604 /// sizes replaced with [*]. 3605 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3606 // Vastly most common case. 3607 if (!type->isVariablyModifiedType()) return type; 3608 3609 QualType result; 3610 3611 SplitQualType split = type.getSplitDesugaredType(); 3612 const Type *ty = split.Ty; 3613 switch (ty->getTypeClass()) { 3614 #define TYPE(Class, Base) 3615 #define ABSTRACT_TYPE(Class, Base) 3616 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3617 #include "clang/AST/TypeNodes.inc" 3618 llvm_unreachable("didn't desugar past all non-canonical types?"); 3619 3620 // These types should never be variably-modified. 3621 case Type::Builtin: 3622 case Type::Complex: 3623 case Type::Vector: 3624 case Type::DependentVector: 3625 case Type::ExtVector: 3626 case Type::DependentSizedExtVector: 3627 case Type::ConstantMatrix: 3628 case Type::DependentSizedMatrix: 3629 case Type::DependentAddressSpace: 3630 case Type::ObjCObject: 3631 case Type::ObjCInterface: 3632 case Type::ObjCObjectPointer: 3633 case Type::Record: 3634 case Type::Enum: 3635 case Type::UnresolvedUsing: 3636 case Type::TypeOfExpr: 3637 case Type::TypeOf: 3638 case Type::Decltype: 3639 case Type::UnaryTransform: 3640 case Type::DependentName: 3641 case Type::InjectedClassName: 3642 case Type::TemplateSpecialization: 3643 case Type::DependentTemplateSpecialization: 3644 case Type::TemplateTypeParm: 3645 case Type::SubstTemplateTypeParmPack: 3646 case Type::Auto: 3647 case Type::DeducedTemplateSpecialization: 3648 case Type::PackExpansion: 3649 case Type::BitInt: 3650 case Type::DependentBitInt: 3651 llvm_unreachable("type should never be variably-modified"); 3652 3653 // These types can be variably-modified but should never need to 3654 // further decay. 3655 case Type::FunctionNoProto: 3656 case Type::FunctionProto: 3657 case Type::BlockPointer: 3658 case Type::MemberPointer: 3659 case Type::Pipe: 3660 return type; 3661 3662 // These types can be variably-modified. All these modifications 3663 // preserve structure except as noted by comments. 3664 // TODO: if we ever care about optimizing VLAs, there are no-op 3665 // optimizations available here. 3666 case Type::Pointer: 3667 result = getPointerType(getVariableArrayDecayedType( 3668 cast<PointerType>(ty)->getPointeeType())); 3669 break; 3670 3671 case Type::LValueReference: { 3672 const auto *lv = cast<LValueReferenceType>(ty); 3673 result = getLValueReferenceType( 3674 getVariableArrayDecayedType(lv->getPointeeType()), 3675 lv->isSpelledAsLValue()); 3676 break; 3677 } 3678 3679 case Type::RValueReference: { 3680 const auto *lv = cast<RValueReferenceType>(ty); 3681 result = getRValueReferenceType( 3682 getVariableArrayDecayedType(lv->getPointeeType())); 3683 break; 3684 } 3685 3686 case Type::Atomic: { 3687 const auto *at = cast<AtomicType>(ty); 3688 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 3689 break; 3690 } 3691 3692 case Type::ConstantArray: { 3693 const auto *cat = cast<ConstantArrayType>(ty); 3694 result = getConstantArrayType( 3695 getVariableArrayDecayedType(cat->getElementType()), 3696 cat->getSize(), 3697 cat->getSizeExpr(), 3698 cat->getSizeModifier(), 3699 cat->getIndexTypeCVRQualifiers()); 3700 break; 3701 } 3702 3703 case Type::DependentSizedArray: { 3704 const auto *dat = cast<DependentSizedArrayType>(ty); 3705 result = getDependentSizedArrayType( 3706 getVariableArrayDecayedType(dat->getElementType()), 3707 dat->getSizeExpr(), 3708 dat->getSizeModifier(), 3709 dat->getIndexTypeCVRQualifiers(), 3710 dat->getBracketsRange()); 3711 break; 3712 } 3713 3714 // Turn incomplete types into [*] types. 3715 case Type::IncompleteArray: { 3716 const auto *iat = cast<IncompleteArrayType>(ty); 3717 result = getVariableArrayType( 3718 getVariableArrayDecayedType(iat->getElementType()), 3719 /*size*/ nullptr, 3720 ArrayType::Normal, 3721 iat->getIndexTypeCVRQualifiers(), 3722 SourceRange()); 3723 break; 3724 } 3725 3726 // Turn VLA types into [*] types. 3727 case Type::VariableArray: { 3728 const auto *vat = cast<VariableArrayType>(ty); 3729 result = getVariableArrayType( 3730 getVariableArrayDecayedType(vat->getElementType()), 3731 /*size*/ nullptr, 3732 ArrayType::Star, 3733 vat->getIndexTypeCVRQualifiers(), 3734 vat->getBracketsRange()); 3735 break; 3736 } 3737 } 3738 3739 // Apply the top-level qualifiers from the original. 3740 return getQualifiedType(result, split.Quals); 3741 } 3742 3743 /// getVariableArrayType - Returns a non-unique reference to the type for a 3744 /// variable array of the specified element type. 3745 QualType ASTContext::getVariableArrayType(QualType EltTy, 3746 Expr *NumElts, 3747 ArrayType::ArraySizeModifier ASM, 3748 unsigned IndexTypeQuals, 3749 SourceRange Brackets) const { 3750 // Since we don't unique expressions, it isn't possible to unique VLA's 3751 // that have an expression provided for their size. 3752 QualType Canon; 3753 3754 // Be sure to pull qualifiers off the element type. 3755 // FIXME: Check below should look for qualifiers behind sugar. 3756 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 3757 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3758 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 3759 IndexTypeQuals, Brackets); 3760 Canon = getQualifiedType(Canon, canonSplit.Quals); 3761 } 3762 3763 auto *New = new (*this, TypeAlignment) 3764 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 3765 3766 VariableArrayTypes.push_back(New); 3767 Types.push_back(New); 3768 return QualType(New, 0); 3769 } 3770 3771 /// getDependentSizedArrayType - Returns a non-unique reference to 3772 /// the type for a dependently-sized array of the specified element 3773 /// type. 3774 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 3775 Expr *numElements, 3776 ArrayType::ArraySizeModifier ASM, 3777 unsigned elementTypeQuals, 3778 SourceRange brackets) const { 3779 assert((!numElements || numElements->isTypeDependent() || 3780 numElements->isValueDependent()) && 3781 "Size must be type- or value-dependent!"); 3782 3783 // Dependently-sized array types that do not have a specified number 3784 // of elements will have their sizes deduced from a dependent 3785 // initializer. We do no canonicalization here at all, which is okay 3786 // because they can't be used in most locations. 3787 if (!numElements) { 3788 auto *newType 3789 = new (*this, TypeAlignment) 3790 DependentSizedArrayType(*this, elementType, QualType(), 3791 numElements, ASM, elementTypeQuals, 3792 brackets); 3793 Types.push_back(newType); 3794 return QualType(newType, 0); 3795 } 3796 3797 // Otherwise, we actually build a new type every time, but we 3798 // also build a canonical type. 3799 3800 SplitQualType canonElementType = getCanonicalType(elementType).split(); 3801 3802 void *insertPos = nullptr; 3803 llvm::FoldingSetNodeID ID; 3804 DependentSizedArrayType::Profile(ID, *this, 3805 QualType(canonElementType.Ty, 0), 3806 ASM, elementTypeQuals, numElements); 3807 3808 // Look for an existing type with these properties. 3809 DependentSizedArrayType *canonTy = 3810 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3811 3812 // If we don't have one, build one. 3813 if (!canonTy) { 3814 canonTy = new (*this, TypeAlignment) 3815 DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), 3816 QualType(), numElements, ASM, elementTypeQuals, 3817 brackets); 3818 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 3819 Types.push_back(canonTy); 3820 } 3821 3822 // Apply qualifiers from the element type to the array. 3823 QualType canon = getQualifiedType(QualType(canonTy,0), 3824 canonElementType.Quals); 3825 3826 // If we didn't need extra canonicalization for the element type or the size 3827 // expression, then just use that as our result. 3828 if (QualType(canonElementType.Ty, 0) == elementType && 3829 canonTy->getSizeExpr() == numElements) 3830 return canon; 3831 3832 // Otherwise, we need to build a type which follows the spelling 3833 // of the element type. 3834 auto *sugaredType 3835 = new (*this, TypeAlignment) 3836 DependentSizedArrayType(*this, elementType, canon, numElements, 3837 ASM, elementTypeQuals, brackets); 3838 Types.push_back(sugaredType); 3839 return QualType(sugaredType, 0); 3840 } 3841 3842 QualType ASTContext::getIncompleteArrayType(QualType elementType, 3843 ArrayType::ArraySizeModifier ASM, 3844 unsigned elementTypeQuals) const { 3845 llvm::FoldingSetNodeID ID; 3846 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 3847 3848 void *insertPos = nullptr; 3849 if (IncompleteArrayType *iat = 3850 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 3851 return QualType(iat, 0); 3852 3853 // If the element type isn't canonical, this won't be a canonical type 3854 // either, so fill in the canonical type field. We also have to pull 3855 // qualifiers off the element type. 3856 QualType canon; 3857 3858 // FIXME: Check below should look for qualifiers behind sugar. 3859 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 3860 SplitQualType canonSplit = getCanonicalType(elementType).split(); 3861 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 3862 ASM, elementTypeQuals); 3863 canon = getQualifiedType(canon, canonSplit.Quals); 3864 3865 // Get the new insert position for the node we care about. 3866 IncompleteArrayType *existing = 3867 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3868 assert(!existing && "Shouldn't be in the map!"); (void) existing; 3869 } 3870 3871 auto *newType = new (*this, TypeAlignment) 3872 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 3873 3874 IncompleteArrayTypes.InsertNode(newType, insertPos); 3875 Types.push_back(newType); 3876 return QualType(newType, 0); 3877 } 3878 3879 ASTContext::BuiltinVectorTypeInfo 3880 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 3881 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 3882 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 3883 NUMVECTORS}; 3884 3885 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 3886 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 3887 3888 switch (Ty->getKind()) { 3889 default: 3890 llvm_unreachable("Unsupported builtin vector type"); 3891 case BuiltinType::SveInt8: 3892 return SVE_INT_ELTTY(8, 16, true, 1); 3893 case BuiltinType::SveUint8: 3894 return SVE_INT_ELTTY(8, 16, false, 1); 3895 case BuiltinType::SveInt8x2: 3896 return SVE_INT_ELTTY(8, 16, true, 2); 3897 case BuiltinType::SveUint8x2: 3898 return SVE_INT_ELTTY(8, 16, false, 2); 3899 case BuiltinType::SveInt8x3: 3900 return SVE_INT_ELTTY(8, 16, true, 3); 3901 case BuiltinType::SveUint8x3: 3902 return SVE_INT_ELTTY(8, 16, false, 3); 3903 case BuiltinType::SveInt8x4: 3904 return SVE_INT_ELTTY(8, 16, true, 4); 3905 case BuiltinType::SveUint8x4: 3906 return SVE_INT_ELTTY(8, 16, false, 4); 3907 case BuiltinType::SveInt16: 3908 return SVE_INT_ELTTY(16, 8, true, 1); 3909 case BuiltinType::SveUint16: 3910 return SVE_INT_ELTTY(16, 8, false, 1); 3911 case BuiltinType::SveInt16x2: 3912 return SVE_INT_ELTTY(16, 8, true, 2); 3913 case BuiltinType::SveUint16x2: 3914 return SVE_INT_ELTTY(16, 8, false, 2); 3915 case BuiltinType::SveInt16x3: 3916 return SVE_INT_ELTTY(16, 8, true, 3); 3917 case BuiltinType::SveUint16x3: 3918 return SVE_INT_ELTTY(16, 8, false, 3); 3919 case BuiltinType::SveInt16x4: 3920 return SVE_INT_ELTTY(16, 8, true, 4); 3921 case BuiltinType::SveUint16x4: 3922 return SVE_INT_ELTTY(16, 8, false, 4); 3923 case BuiltinType::SveInt32: 3924 return SVE_INT_ELTTY(32, 4, true, 1); 3925 case BuiltinType::SveUint32: 3926 return SVE_INT_ELTTY(32, 4, false, 1); 3927 case BuiltinType::SveInt32x2: 3928 return SVE_INT_ELTTY(32, 4, true, 2); 3929 case BuiltinType::SveUint32x2: 3930 return SVE_INT_ELTTY(32, 4, false, 2); 3931 case BuiltinType::SveInt32x3: 3932 return SVE_INT_ELTTY(32, 4, true, 3); 3933 case BuiltinType::SveUint32x3: 3934 return SVE_INT_ELTTY(32, 4, false, 3); 3935 case BuiltinType::SveInt32x4: 3936 return SVE_INT_ELTTY(32, 4, true, 4); 3937 case BuiltinType::SveUint32x4: 3938 return SVE_INT_ELTTY(32, 4, false, 4); 3939 case BuiltinType::SveInt64: 3940 return SVE_INT_ELTTY(64, 2, true, 1); 3941 case BuiltinType::SveUint64: 3942 return SVE_INT_ELTTY(64, 2, false, 1); 3943 case BuiltinType::SveInt64x2: 3944 return SVE_INT_ELTTY(64, 2, true, 2); 3945 case BuiltinType::SveUint64x2: 3946 return SVE_INT_ELTTY(64, 2, false, 2); 3947 case BuiltinType::SveInt64x3: 3948 return SVE_INT_ELTTY(64, 2, true, 3); 3949 case BuiltinType::SveUint64x3: 3950 return SVE_INT_ELTTY(64, 2, false, 3); 3951 case BuiltinType::SveInt64x4: 3952 return SVE_INT_ELTTY(64, 2, true, 4); 3953 case BuiltinType::SveUint64x4: 3954 return SVE_INT_ELTTY(64, 2, false, 4); 3955 case BuiltinType::SveBool: 3956 return SVE_ELTTY(BoolTy, 16, 1); 3957 case BuiltinType::SveBoolx2: 3958 return SVE_ELTTY(BoolTy, 16, 2); 3959 case BuiltinType::SveBoolx4: 3960 return SVE_ELTTY(BoolTy, 16, 4); 3961 case BuiltinType::SveFloat16: 3962 return SVE_ELTTY(HalfTy, 8, 1); 3963 case BuiltinType::SveFloat16x2: 3964 return SVE_ELTTY(HalfTy, 8, 2); 3965 case BuiltinType::SveFloat16x3: 3966 return SVE_ELTTY(HalfTy, 8, 3); 3967 case BuiltinType::SveFloat16x4: 3968 return SVE_ELTTY(HalfTy, 8, 4); 3969 case BuiltinType::SveFloat32: 3970 return SVE_ELTTY(FloatTy, 4, 1); 3971 case BuiltinType::SveFloat32x2: 3972 return SVE_ELTTY(FloatTy, 4, 2); 3973 case BuiltinType::SveFloat32x3: 3974 return SVE_ELTTY(FloatTy, 4, 3); 3975 case BuiltinType::SveFloat32x4: 3976 return SVE_ELTTY(FloatTy, 4, 4); 3977 case BuiltinType::SveFloat64: 3978 return SVE_ELTTY(DoubleTy, 2, 1); 3979 case BuiltinType::SveFloat64x2: 3980 return SVE_ELTTY(DoubleTy, 2, 2); 3981 case BuiltinType::SveFloat64x3: 3982 return SVE_ELTTY(DoubleTy, 2, 3); 3983 case BuiltinType::SveFloat64x4: 3984 return SVE_ELTTY(DoubleTy, 2, 4); 3985 case BuiltinType::SveBFloat16: 3986 return SVE_ELTTY(BFloat16Ty, 8, 1); 3987 case BuiltinType::SveBFloat16x2: 3988 return SVE_ELTTY(BFloat16Ty, 8, 2); 3989 case BuiltinType::SveBFloat16x3: 3990 return SVE_ELTTY(BFloat16Ty, 8, 3); 3991 case BuiltinType::SveBFloat16x4: 3992 return SVE_ELTTY(BFloat16Ty, 8, 4); 3993 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 3994 IsSigned) \ 3995 case BuiltinType::Id: \ 3996 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 3997 llvm::ElementCount::getScalable(NumEls), NF}; 3998 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3999 case BuiltinType::Id: \ 4000 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 4001 llvm::ElementCount::getScalable(NumEls), NF}; 4002 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 4003 case BuiltinType::Id: \ 4004 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 4005 #include "clang/Basic/RISCVVTypes.def" 4006 } 4007 } 4008 4009 /// getExternrefType - Return a WebAssembly externref type, which represents an 4010 /// opaque reference to a host value. 4011 QualType ASTContext::getWebAssemblyExternrefType() const { 4012 if (Target->getTriple().isWasm() && Target->hasFeature("reference-types")) { 4013 #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \ 4014 if (BuiltinType::Id == BuiltinType::WasmExternRef) \ 4015 return SingletonId; 4016 #include "clang/Basic/WebAssemblyReferenceTypes.def" 4017 } 4018 llvm_unreachable( 4019 "shouldn't try to generate type externref outside WebAssembly target"); 4020 } 4021 4022 /// getScalableVectorType - Return the unique reference to a scalable vector 4023 /// type of the specified element type and size. VectorType must be a built-in 4024 /// type. 4025 QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, 4026 unsigned NumFields) const { 4027 if (Target->hasAArch64SVETypes()) { 4028 uint64_t EltTySize = getTypeSize(EltTy); 4029 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 4030 IsSigned, IsFP, IsBF) \ 4031 if (!EltTy->isBooleanType() && \ 4032 ((EltTy->hasIntegerRepresentation() && \ 4033 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 4034 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 4035 IsFP && !IsBF) || \ 4036 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 4037 IsBF && !IsFP)) && \ 4038 EltTySize == ElBits && NumElts == NumEls) { \ 4039 return SingletonId; \ 4040 } 4041 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 4042 if (EltTy->isBooleanType() && NumElts == NumEls) \ 4043 return SingletonId; 4044 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingleTonId) 4045 #include "clang/Basic/AArch64SVEACLETypes.def" 4046 } else if (Target->hasRISCVVTypes()) { 4047 uint64_t EltTySize = getTypeSize(EltTy); 4048 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 4049 IsFP) \ 4050 if (!EltTy->isBooleanType() && \ 4051 ((EltTy->hasIntegerRepresentation() && \ 4052 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 4053 (EltTy->hasFloatingRepresentation() && IsFP)) && \ 4054 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \ 4055 return SingletonId; 4056 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 4057 if (EltTy->isBooleanType() && NumElts == NumEls) \ 4058 return SingletonId; 4059 #include "clang/Basic/RISCVVTypes.def" 4060 } 4061 return QualType(); 4062 } 4063 4064 /// getVectorType - Return the unique reference to a vector type of 4065 /// the specified element type and size. VectorType must be a built-in type. 4066 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 4067 VectorType::VectorKind VecKind) const { 4068 assert(vecType->isBuiltinType() || 4069 (vecType->isBitIntType() && 4070 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4071 llvm::isPowerOf2_32(vecType->getAs<BitIntType>()->getNumBits()) && 4072 vecType->getAs<BitIntType>()->getNumBits() >= 8)); 4073 4074 // Check if we've already instantiated a vector of this type. 4075 llvm::FoldingSetNodeID ID; 4076 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 4077 4078 void *InsertPos = nullptr; 4079 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4080 return QualType(VTP, 0); 4081 4082 // If the element type isn't canonical, this won't be a canonical type either, 4083 // so fill in the canonical type field. 4084 QualType Canonical; 4085 if (!vecType.isCanonical()) { 4086 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 4087 4088 // Get the new insert position for the node we care about. 4089 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4090 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4091 } 4092 auto *New = new (*this, TypeAlignment) 4093 VectorType(vecType, NumElts, Canonical, VecKind); 4094 VectorTypes.InsertNode(New, InsertPos); 4095 Types.push_back(New); 4096 return QualType(New, 0); 4097 } 4098 4099 QualType 4100 ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 4101 SourceLocation AttrLoc, 4102 VectorType::VectorKind VecKind) const { 4103 llvm::FoldingSetNodeID ID; 4104 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 4105 VecKind); 4106 void *InsertPos = nullptr; 4107 DependentVectorType *Canon = 4108 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4109 DependentVectorType *New; 4110 4111 if (Canon) { 4112 New = new (*this, TypeAlignment) DependentVectorType( 4113 *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 4114 } else { 4115 QualType CanonVecTy = getCanonicalType(VecType); 4116 if (CanonVecTy == VecType) { 4117 New = new (*this, TypeAlignment) DependentVectorType( 4118 *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind); 4119 4120 DependentVectorType *CanonCheck = 4121 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4122 assert(!CanonCheck && 4123 "Dependent-sized vector_size canonical type broken"); 4124 (void)CanonCheck; 4125 DependentVectorTypes.InsertNode(New, InsertPos); 4126 } else { 4127 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 4128 SourceLocation(), VecKind); 4129 New = new (*this, TypeAlignment) DependentVectorType( 4130 *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 4131 } 4132 } 4133 4134 Types.push_back(New); 4135 return QualType(New, 0); 4136 } 4137 4138 /// getExtVectorType - Return the unique reference to an extended vector type of 4139 /// the specified element type and size. VectorType must be a built-in type. 4140 QualType ASTContext::getExtVectorType(QualType vecType, 4141 unsigned NumElts) const { 4142 assert(vecType->isBuiltinType() || vecType->isDependentType() || 4143 (vecType->isBitIntType() && 4144 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4145 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && 4146 vecType->castAs<BitIntType>()->getNumBits() >= 8)); 4147 4148 // Check if we've already instantiated a vector of this type. 4149 llvm::FoldingSetNodeID ID; 4150 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4151 VectorType::GenericVector); 4152 void *InsertPos = nullptr; 4153 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4154 return QualType(VTP, 0); 4155 4156 // If the element type isn't canonical, this won't be a canonical type either, 4157 // so fill in the canonical type field. 4158 QualType Canonical; 4159 if (!vecType.isCanonical()) { 4160 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4161 4162 // Get the new insert position for the node we care about. 4163 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4164 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4165 } 4166 auto *New = new (*this, TypeAlignment) 4167 ExtVectorType(vecType, NumElts, Canonical); 4168 VectorTypes.InsertNode(New, InsertPos); 4169 Types.push_back(New); 4170 return QualType(New, 0); 4171 } 4172 4173 QualType 4174 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4175 Expr *SizeExpr, 4176 SourceLocation AttrLoc) const { 4177 llvm::FoldingSetNodeID ID; 4178 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4179 SizeExpr); 4180 4181 void *InsertPos = nullptr; 4182 DependentSizedExtVectorType *Canon 4183 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4184 DependentSizedExtVectorType *New; 4185 if (Canon) { 4186 // We already have a canonical version of this array type; use it as 4187 // the canonical type for a newly-built type. 4188 New = new (*this, TypeAlignment) 4189 DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), 4190 SizeExpr, AttrLoc); 4191 } else { 4192 QualType CanonVecTy = getCanonicalType(vecType); 4193 if (CanonVecTy == vecType) { 4194 New = new (*this, TypeAlignment) 4195 DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, 4196 AttrLoc); 4197 4198 DependentSizedExtVectorType *CanonCheck 4199 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4200 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4201 (void)CanonCheck; 4202 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4203 } else { 4204 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4205 SourceLocation()); 4206 New = new (*this, TypeAlignment) DependentSizedExtVectorType( 4207 *this, vecType, CanonExtTy, SizeExpr, AttrLoc); 4208 } 4209 } 4210 4211 Types.push_back(New); 4212 return QualType(New, 0); 4213 } 4214 4215 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4216 unsigned NumColumns) const { 4217 llvm::FoldingSetNodeID ID; 4218 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4219 Type::ConstantMatrix); 4220 4221 assert(MatrixType::isValidElementType(ElementTy) && 4222 "need a valid element type"); 4223 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4224 ConstantMatrixType::isDimensionValid(NumColumns) && 4225 "need valid matrix dimensions"); 4226 void *InsertPos = nullptr; 4227 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4228 return QualType(MTP, 0); 4229 4230 QualType Canonical; 4231 if (!ElementTy.isCanonical()) { 4232 Canonical = 4233 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4234 4235 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4236 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4237 (void)NewIP; 4238 } 4239 4240 auto *New = new (*this, TypeAlignment) 4241 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4242 MatrixTypes.InsertNode(New, InsertPos); 4243 Types.push_back(New); 4244 return QualType(New, 0); 4245 } 4246 4247 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4248 Expr *RowExpr, 4249 Expr *ColumnExpr, 4250 SourceLocation AttrLoc) const { 4251 QualType CanonElementTy = getCanonicalType(ElementTy); 4252 llvm::FoldingSetNodeID ID; 4253 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4254 ColumnExpr); 4255 4256 void *InsertPos = nullptr; 4257 DependentSizedMatrixType *Canon = 4258 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4259 4260 if (!Canon) { 4261 Canon = new (*this, TypeAlignment) DependentSizedMatrixType( 4262 *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc); 4263 #ifndef NDEBUG 4264 DependentSizedMatrixType *CanonCheck = 4265 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4266 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4267 #endif 4268 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4269 Types.push_back(Canon); 4270 } 4271 4272 // Already have a canonical version of the matrix type 4273 // 4274 // If it exactly matches the requested type, use it directly. 4275 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4276 Canon->getRowExpr() == ColumnExpr) 4277 return QualType(Canon, 0); 4278 4279 // Use Canon as the canonical type for newly-built type. 4280 DependentSizedMatrixType *New = new (*this, TypeAlignment) 4281 DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr, 4282 ColumnExpr, AttrLoc); 4283 Types.push_back(New); 4284 return QualType(New, 0); 4285 } 4286 4287 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4288 Expr *AddrSpaceExpr, 4289 SourceLocation AttrLoc) const { 4290 assert(AddrSpaceExpr->isInstantiationDependent()); 4291 4292 QualType canonPointeeType = getCanonicalType(PointeeType); 4293 4294 void *insertPos = nullptr; 4295 llvm::FoldingSetNodeID ID; 4296 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4297 AddrSpaceExpr); 4298 4299 DependentAddressSpaceType *canonTy = 4300 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4301 4302 if (!canonTy) { 4303 canonTy = new (*this, TypeAlignment) 4304 DependentAddressSpaceType(*this, canonPointeeType, 4305 QualType(), AddrSpaceExpr, AttrLoc); 4306 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4307 Types.push_back(canonTy); 4308 } 4309 4310 if (canonPointeeType == PointeeType && 4311 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4312 return QualType(canonTy, 0); 4313 4314 auto *sugaredType 4315 = new (*this, TypeAlignment) 4316 DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0), 4317 AddrSpaceExpr, AttrLoc); 4318 Types.push_back(sugaredType); 4319 return QualType(sugaredType, 0); 4320 } 4321 4322 /// Determine whether \p T is canonical as the result type of a function. 4323 static bool isCanonicalResultType(QualType T) { 4324 return T.isCanonical() && 4325 (T.getObjCLifetime() == Qualifiers::OCL_None || 4326 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4327 } 4328 4329 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4330 QualType 4331 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4332 const FunctionType::ExtInfo &Info) const { 4333 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter 4334 // functionality creates a function without a prototype regardless of 4335 // language mode (so it makes them even in C++). Once the rewriter has been 4336 // fixed, this assertion can be enabled again. 4337 //assert(!LangOpts.requiresStrictPrototypes() && 4338 // "strict prototypes are disabled"); 4339 4340 // Unique functions, to guarantee there is only one function of a particular 4341 // structure. 4342 llvm::FoldingSetNodeID ID; 4343 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4344 4345 void *InsertPos = nullptr; 4346 if (FunctionNoProtoType *FT = 4347 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4348 return QualType(FT, 0); 4349 4350 QualType Canonical; 4351 if (!isCanonicalResultType(ResultTy)) { 4352 Canonical = 4353 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4354 4355 // Get the new insert position for the node we care about. 4356 FunctionNoProtoType *NewIP = 4357 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4358 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4359 } 4360 4361 auto *New = new (*this, TypeAlignment) 4362 FunctionNoProtoType(ResultTy, Canonical, Info); 4363 Types.push_back(New); 4364 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4365 return QualType(New, 0); 4366 } 4367 4368 CanQualType 4369 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4370 CanQualType CanResultType = getCanonicalType(ResultType); 4371 4372 // Canonical result types do not have ARC lifetime qualifiers. 4373 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4374 Qualifiers Qs = CanResultType.getQualifiers(); 4375 Qs.removeObjCLifetime(); 4376 return CanQualType::CreateUnsafe( 4377 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4378 } 4379 4380 return CanResultType; 4381 } 4382 4383 static bool isCanonicalExceptionSpecification( 4384 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4385 if (ESI.Type == EST_None) 4386 return true; 4387 if (!NoexceptInType) 4388 return false; 4389 4390 // C++17 onwards: exception specification is part of the type, as a simple 4391 // boolean "can this function type throw". 4392 if (ESI.Type == EST_BasicNoexcept) 4393 return true; 4394 4395 // A noexcept(expr) specification is (possibly) canonical if expr is 4396 // value-dependent. 4397 if (ESI.Type == EST_DependentNoexcept) 4398 return true; 4399 4400 // A dynamic exception specification is canonical if it only contains pack 4401 // expansions (so we can't tell whether it's non-throwing) and all its 4402 // contained types are canonical. 4403 if (ESI.Type == EST_Dynamic) { 4404 bool AnyPackExpansions = false; 4405 for (QualType ET : ESI.Exceptions) { 4406 if (!ET.isCanonical()) 4407 return false; 4408 if (ET->getAs<PackExpansionType>()) 4409 AnyPackExpansions = true; 4410 } 4411 return AnyPackExpansions; 4412 } 4413 4414 return false; 4415 } 4416 4417 QualType ASTContext::getFunctionTypeInternal( 4418 QualType ResultTy, ArrayRef<QualType> ArgArray, 4419 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4420 size_t NumArgs = ArgArray.size(); 4421 4422 // Unique functions, to guarantee there is only one function of a particular 4423 // structure. 4424 llvm::FoldingSetNodeID ID; 4425 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4426 *this, true); 4427 4428 QualType Canonical; 4429 bool Unique = false; 4430 4431 void *InsertPos = nullptr; 4432 if (FunctionProtoType *FPT = 4433 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4434 QualType Existing = QualType(FPT, 0); 4435 4436 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4437 // it so long as our exception specification doesn't contain a dependent 4438 // noexcept expression, or we're just looking for a canonical type. 4439 // Otherwise, we're going to need to create a type 4440 // sugar node to hold the concrete expression. 4441 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4442 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4443 return Existing; 4444 4445 // We need a new type sugar node for this one, to hold the new noexcept 4446 // expression. We do no canonicalization here, but that's OK since we don't 4447 // expect to see the same noexcept expression much more than once. 4448 Canonical = getCanonicalType(Existing); 4449 Unique = true; 4450 } 4451 4452 bool NoexceptInType = getLangOpts().CPlusPlus17; 4453 bool IsCanonicalExceptionSpec = 4454 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4455 4456 // Determine whether the type being created is already canonical or not. 4457 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4458 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4459 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4460 if (!ArgArray[i].isCanonicalAsParam()) 4461 isCanonical = false; 4462 4463 if (OnlyWantCanonical) 4464 assert(isCanonical && 4465 "given non-canonical parameters constructing canonical type"); 4466 4467 // If this type isn't canonical, get the canonical version of it if we don't 4468 // already have it. The exception spec is only partially part of the 4469 // canonical type, and only in C++17 onwards. 4470 if (!isCanonical && Canonical.isNull()) { 4471 SmallVector<QualType, 16> CanonicalArgs; 4472 CanonicalArgs.reserve(NumArgs); 4473 for (unsigned i = 0; i != NumArgs; ++i) 4474 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4475 4476 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4477 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4478 CanonicalEPI.HasTrailingReturn = false; 4479 4480 if (IsCanonicalExceptionSpec) { 4481 // Exception spec is already OK. 4482 } else if (NoexceptInType) { 4483 switch (EPI.ExceptionSpec.Type) { 4484 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4485 // We don't know yet. It shouldn't matter what we pick here; no-one 4486 // should ever look at this. 4487 [[fallthrough]]; 4488 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4489 CanonicalEPI.ExceptionSpec.Type = EST_None; 4490 break; 4491 4492 // A dynamic exception specification is almost always "not noexcept", 4493 // with the exception that a pack expansion might expand to no types. 4494 case EST_Dynamic: { 4495 bool AnyPacks = false; 4496 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4497 if (ET->getAs<PackExpansionType>()) 4498 AnyPacks = true; 4499 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4500 } 4501 if (!AnyPacks) 4502 CanonicalEPI.ExceptionSpec.Type = EST_None; 4503 else { 4504 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4505 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4506 } 4507 break; 4508 } 4509 4510 case EST_DynamicNone: 4511 case EST_BasicNoexcept: 4512 case EST_NoexceptTrue: 4513 case EST_NoThrow: 4514 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4515 break; 4516 4517 case EST_DependentNoexcept: 4518 llvm_unreachable("dependent noexcept is already canonical"); 4519 } 4520 } else { 4521 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4522 } 4523 4524 // Adjust the canonical function result type. 4525 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4526 Canonical = 4527 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4528 4529 // Get the new insert position for the node we care about. 4530 FunctionProtoType *NewIP = 4531 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4532 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4533 } 4534 4535 // Compute the needed size to hold this FunctionProtoType and the 4536 // various trailing objects. 4537 auto ESH = FunctionProtoType::getExceptionSpecSize( 4538 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4539 size_t Size = FunctionProtoType::totalSizeToAlloc< 4540 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4541 FunctionType::ExceptionType, Expr *, FunctionDecl *, 4542 FunctionProtoType::ExtParameterInfo, Qualifiers>( 4543 NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(), 4544 ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4545 EPI.ExtParameterInfos ? NumArgs : 0, 4546 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); 4547 4548 auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment); 4549 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4550 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4551 Types.push_back(FTP); 4552 if (!Unique) 4553 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4554 return QualType(FTP, 0); 4555 } 4556 4557 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4558 llvm::FoldingSetNodeID ID; 4559 PipeType::Profile(ID, T, ReadOnly); 4560 4561 void *InsertPos = nullptr; 4562 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4563 return QualType(PT, 0); 4564 4565 // If the pipe element type isn't canonical, this won't be a canonical type 4566 // either, so fill in the canonical type field. 4567 QualType Canonical; 4568 if (!T.isCanonical()) { 4569 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4570 4571 // Get the new insert position for the node we care about. 4572 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4573 assert(!NewIP && "Shouldn't be in the map!"); 4574 (void)NewIP; 4575 } 4576 auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly); 4577 Types.push_back(New); 4578 PipeTypes.InsertNode(New, InsertPos); 4579 return QualType(New, 0); 4580 } 4581 4582 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4583 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4584 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4585 : Ty; 4586 } 4587 4588 QualType ASTContext::getReadPipeType(QualType T) const { 4589 return getPipeType(T, true); 4590 } 4591 4592 QualType ASTContext::getWritePipeType(QualType T) const { 4593 return getPipeType(T, false); 4594 } 4595 4596 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const { 4597 llvm::FoldingSetNodeID ID; 4598 BitIntType::Profile(ID, IsUnsigned, NumBits); 4599 4600 void *InsertPos = nullptr; 4601 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4602 return QualType(EIT, 0); 4603 4604 auto *New = new (*this, TypeAlignment) BitIntType(IsUnsigned, NumBits); 4605 BitIntTypes.InsertNode(New, InsertPos); 4606 Types.push_back(New); 4607 return QualType(New, 0); 4608 } 4609 4610 QualType ASTContext::getDependentBitIntType(bool IsUnsigned, 4611 Expr *NumBitsExpr) const { 4612 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4613 llvm::FoldingSetNodeID ID; 4614 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4615 4616 void *InsertPos = nullptr; 4617 if (DependentBitIntType *Existing = 4618 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4619 return QualType(Existing, 0); 4620 4621 auto *New = new (*this, TypeAlignment) 4622 DependentBitIntType(*this, IsUnsigned, NumBitsExpr); 4623 DependentBitIntTypes.InsertNode(New, InsertPos); 4624 4625 Types.push_back(New); 4626 return QualType(New, 0); 4627 } 4628 4629 #ifndef NDEBUG 4630 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4631 if (!isa<CXXRecordDecl>(D)) return false; 4632 const auto *RD = cast<CXXRecordDecl>(D); 4633 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 4634 return true; 4635 if (RD->getDescribedClassTemplate() && 4636 !isa<ClassTemplateSpecializationDecl>(RD)) 4637 return true; 4638 return false; 4639 } 4640 #endif 4641 4642 /// getInjectedClassNameType - Return the unique reference to the 4643 /// injected class name type for the specified templated declaration. 4644 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 4645 QualType TST) const { 4646 assert(NeedsInjectedClassNameType(Decl)); 4647 if (Decl->TypeForDecl) { 4648 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4649 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 4650 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 4651 Decl->TypeForDecl = PrevDecl->TypeForDecl; 4652 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4653 } else { 4654 Type *newType = 4655 new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); 4656 Decl->TypeForDecl = newType; 4657 Types.push_back(newType); 4658 } 4659 return QualType(Decl->TypeForDecl, 0); 4660 } 4661 4662 /// getTypeDeclType - Return the unique reference to the type for the 4663 /// specified type declaration. 4664 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 4665 assert(Decl && "Passed null for Decl param"); 4666 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 4667 4668 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 4669 return getTypedefType(Typedef); 4670 4671 assert(!isa<TemplateTypeParmDecl>(Decl) && 4672 "Template type parameter types are always available."); 4673 4674 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 4675 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 4676 assert(!NeedsInjectedClassNameType(Record)); 4677 return getRecordType(Record); 4678 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 4679 assert(Enum->isFirstDecl() && "enum has previous declaration"); 4680 return getEnumType(Enum); 4681 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 4682 return getUnresolvedUsingType(Using); 4683 } else 4684 llvm_unreachable("TypeDecl without a type?"); 4685 4686 return QualType(Decl->TypeForDecl, 0); 4687 } 4688 4689 /// getTypedefType - Return the unique reference to the type for the 4690 /// specified typedef name decl. 4691 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 4692 QualType Underlying) const { 4693 if (!Decl->TypeForDecl) { 4694 if (Underlying.isNull()) 4695 Underlying = Decl->getUnderlyingType(); 4696 auto *NewType = new (*this, TypeAlignment) TypedefType( 4697 Type::Typedef, Decl, QualType(), getCanonicalType(Underlying)); 4698 Decl->TypeForDecl = NewType; 4699 Types.push_back(NewType); 4700 return QualType(NewType, 0); 4701 } 4702 if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying) 4703 return QualType(Decl->TypeForDecl, 0); 4704 assert(hasSameType(Decl->getUnderlyingType(), Underlying)); 4705 4706 llvm::FoldingSetNodeID ID; 4707 TypedefType::Profile(ID, Decl, Underlying); 4708 4709 void *InsertPos = nullptr; 4710 if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4711 assert(!T->typeMatchesDecl() && 4712 "non-divergent case should be handled with TypeDecl"); 4713 return QualType(T, 0); 4714 } 4715 4716 void *Mem = 4717 Allocate(TypedefType::totalSizeToAlloc<QualType>(true), TypeAlignment); 4718 auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying, 4719 getCanonicalType(Underlying)); 4720 TypedefTypes.InsertNode(NewType, InsertPos); 4721 Types.push_back(NewType); 4722 return QualType(NewType, 0); 4723 } 4724 4725 QualType ASTContext::getUsingType(const UsingShadowDecl *Found, 4726 QualType Underlying) const { 4727 llvm::FoldingSetNodeID ID; 4728 UsingType::Profile(ID, Found, Underlying); 4729 4730 void *InsertPos = nullptr; 4731 if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos)) 4732 return QualType(T, 0); 4733 4734 const Type *TypeForDecl = 4735 cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(); 4736 4737 assert(!Underlying.hasLocalQualifiers()); 4738 QualType Canon = Underlying->getCanonicalTypeInternal(); 4739 assert(TypeForDecl->getCanonicalTypeInternal() == Canon); 4740 4741 if (Underlying.getTypePtr() == TypeForDecl) 4742 Underlying = QualType(); 4743 void *Mem = 4744 Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()), 4745 TypeAlignment); 4746 UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon); 4747 Types.push_back(NewType); 4748 UsingTypes.InsertNode(NewType, InsertPos); 4749 return QualType(NewType, 0); 4750 } 4751 4752 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 4753 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4754 4755 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 4756 if (PrevDecl->TypeForDecl) 4757 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4758 4759 auto *newType = new (*this, TypeAlignment) RecordType(Decl); 4760 Decl->TypeForDecl = newType; 4761 Types.push_back(newType); 4762 return QualType(newType, 0); 4763 } 4764 4765 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 4766 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4767 4768 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 4769 if (PrevDecl->TypeForDecl) 4770 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4771 4772 auto *newType = new (*this, TypeAlignment) EnumType(Decl); 4773 Decl->TypeForDecl = newType; 4774 Types.push_back(newType); 4775 return QualType(newType, 0); 4776 } 4777 4778 QualType ASTContext::getUnresolvedUsingType( 4779 const UnresolvedUsingTypenameDecl *Decl) const { 4780 if (Decl->TypeForDecl) 4781 return QualType(Decl->TypeForDecl, 0); 4782 4783 if (const UnresolvedUsingTypenameDecl *CanonicalDecl = 4784 Decl->getCanonicalDecl()) 4785 if (CanonicalDecl->TypeForDecl) 4786 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0); 4787 4788 Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Decl); 4789 Decl->TypeForDecl = newType; 4790 Types.push_back(newType); 4791 return QualType(newType, 0); 4792 } 4793 4794 QualType ASTContext::getAttributedType(attr::Kind attrKind, 4795 QualType modifiedType, 4796 QualType equivalentType) const { 4797 llvm::FoldingSetNodeID id; 4798 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 4799 4800 void *insertPos = nullptr; 4801 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 4802 if (type) return QualType(type, 0); 4803 4804 QualType canon = getCanonicalType(equivalentType); 4805 type = new (*this, TypeAlignment) 4806 AttributedType(canon, attrKind, modifiedType, equivalentType); 4807 4808 Types.push_back(type); 4809 AttributedTypes.InsertNode(type, insertPos); 4810 4811 return QualType(type, 0); 4812 } 4813 4814 QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr, 4815 QualType Wrapped) { 4816 llvm::FoldingSetNodeID ID; 4817 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr); 4818 4819 void *InsertPos = nullptr; 4820 BTFTagAttributedType *Ty = 4821 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); 4822 if (Ty) 4823 return QualType(Ty, 0); 4824 4825 QualType Canon = getCanonicalType(Wrapped); 4826 Ty = new (*this, TypeAlignment) BTFTagAttributedType(Canon, Wrapped, BTFAttr); 4827 4828 Types.push_back(Ty); 4829 BTFTagAttributedTypes.InsertNode(Ty, InsertPos); 4830 4831 return QualType(Ty, 0); 4832 } 4833 4834 /// Retrieve a substitution-result type. 4835 QualType ASTContext::getSubstTemplateTypeParmType( 4836 QualType Replacement, Decl *AssociatedDecl, unsigned Index, 4837 std::optional<unsigned> PackIndex) const { 4838 llvm::FoldingSetNodeID ID; 4839 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index, 4840 PackIndex); 4841 void *InsertPos = nullptr; 4842 SubstTemplateTypeParmType *SubstParm = 4843 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4844 4845 if (!SubstParm) { 4846 void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>( 4847 !Replacement.isCanonical()), 4848 TypeAlignment); 4849 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl, 4850 Index, PackIndex); 4851 Types.push_back(SubstParm); 4852 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 4853 } 4854 4855 return QualType(SubstParm, 0); 4856 } 4857 4858 /// Retrieve a 4859 QualType 4860 ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl, 4861 unsigned Index, bool Final, 4862 const TemplateArgument &ArgPack) { 4863 #ifndef NDEBUG 4864 for (const auto &P : ArgPack.pack_elements()) 4865 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type"); 4866 #endif 4867 4868 llvm::FoldingSetNodeID ID; 4869 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final, 4870 ArgPack); 4871 void *InsertPos = nullptr; 4872 if (SubstTemplateTypeParmPackType *SubstParm = 4873 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 4874 return QualType(SubstParm, 0); 4875 4876 QualType Canon; 4877 { 4878 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack); 4879 if (!AssociatedDecl->isCanonicalDecl() || 4880 !CanonArgPack.structurallyEquals(ArgPack)) { 4881 Canon = getSubstTemplateTypeParmPackType( 4882 AssociatedDecl->getCanonicalDecl(), Index, Final, CanonArgPack); 4883 [[maybe_unused]] const auto *Nothing = 4884 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 4885 assert(!Nothing); 4886 } 4887 } 4888 4889 auto *SubstParm = new (*this, TypeAlignment) SubstTemplateTypeParmPackType( 4890 Canon, AssociatedDecl, Index, Final, ArgPack); 4891 Types.push_back(SubstParm); 4892 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 4893 return QualType(SubstParm, 0); 4894 } 4895 4896 /// Retrieve the template type parameter type for a template 4897 /// parameter or parameter pack with the given depth, index, and (optionally) 4898 /// name. 4899 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 4900 bool ParameterPack, 4901 TemplateTypeParmDecl *TTPDecl) const { 4902 llvm::FoldingSetNodeID ID; 4903 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 4904 void *InsertPos = nullptr; 4905 TemplateTypeParmType *TypeParm 4906 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4907 4908 if (TypeParm) 4909 return QualType(TypeParm, 0); 4910 4911 if (TTPDecl) { 4912 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 4913 TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); 4914 4915 TemplateTypeParmType *TypeCheck 4916 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4917 assert(!TypeCheck && "Template type parameter canonical type broken"); 4918 (void)TypeCheck; 4919 } else 4920 TypeParm = new (*this, TypeAlignment) 4921 TemplateTypeParmType(Depth, Index, ParameterPack); 4922 4923 Types.push_back(TypeParm); 4924 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 4925 4926 return QualType(TypeParm, 0); 4927 } 4928 4929 TypeSourceInfo * 4930 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 4931 SourceLocation NameLoc, 4932 const TemplateArgumentListInfo &Args, 4933 QualType Underlying) const { 4934 assert(!Name.getAsDependentTemplateName() && 4935 "No dependent template names here!"); 4936 QualType TST = 4937 getTemplateSpecializationType(Name, Args.arguments(), Underlying); 4938 4939 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 4940 TemplateSpecializationTypeLoc TL = 4941 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 4942 TL.setTemplateKeywordLoc(SourceLocation()); 4943 TL.setTemplateNameLoc(NameLoc); 4944 TL.setLAngleLoc(Args.getLAngleLoc()); 4945 TL.setRAngleLoc(Args.getRAngleLoc()); 4946 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 4947 TL.setArgLocInfo(i, Args[i].getLocInfo()); 4948 return DI; 4949 } 4950 4951 QualType 4952 ASTContext::getTemplateSpecializationType(TemplateName Template, 4953 ArrayRef<TemplateArgumentLoc> Args, 4954 QualType Underlying) const { 4955 assert(!Template.getAsDependentTemplateName() && 4956 "No dependent template names here!"); 4957 4958 SmallVector<TemplateArgument, 4> ArgVec; 4959 ArgVec.reserve(Args.size()); 4960 for (const TemplateArgumentLoc &Arg : Args) 4961 ArgVec.push_back(Arg.getArgument()); 4962 4963 return getTemplateSpecializationType(Template, ArgVec, Underlying); 4964 } 4965 4966 #ifndef NDEBUG 4967 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 4968 for (const TemplateArgument &Arg : Args) 4969 if (Arg.isPackExpansion()) 4970 return true; 4971 4972 return true; 4973 } 4974 #endif 4975 4976 QualType 4977 ASTContext::getTemplateSpecializationType(TemplateName Template, 4978 ArrayRef<TemplateArgument> Args, 4979 QualType Underlying) const { 4980 assert(!Template.getAsDependentTemplateName() && 4981 "No dependent template names here!"); 4982 // Look through qualified template names. 4983 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4984 Template = QTN->getUnderlyingTemplate(); 4985 4986 const auto *TD = Template.getAsTemplateDecl(); 4987 bool IsTypeAlias = TD && TD->isTypeAlias(); 4988 QualType CanonType; 4989 if (!Underlying.isNull()) 4990 CanonType = getCanonicalType(Underlying); 4991 else { 4992 // We can get here with an alias template when the specialization contains 4993 // a pack expansion that does not match up with a parameter pack. 4994 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 4995 "Caller must compute aliased type"); 4996 IsTypeAlias = false; 4997 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 4998 } 4999 5000 // Allocate the (non-canonical) template specialization type, but don't 5001 // try to unique it: these types typically have location information that 5002 // we don't unique and don't want to lose. 5003 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 5004 sizeof(TemplateArgument) * Args.size() + 5005 (IsTypeAlias? sizeof(QualType) : 0), 5006 TypeAlignment); 5007 auto *Spec 5008 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 5009 IsTypeAlias ? Underlying : QualType()); 5010 5011 Types.push_back(Spec); 5012 return QualType(Spec, 0); 5013 } 5014 5015 QualType ASTContext::getCanonicalTemplateSpecializationType( 5016 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 5017 assert(!Template.getAsDependentTemplateName() && 5018 "No dependent template names here!"); 5019 5020 // Look through qualified template names. 5021 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 5022 Template = TemplateName(QTN->getUnderlyingTemplate()); 5023 5024 // Build the canonical template specialization type. 5025 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 5026 bool AnyNonCanonArgs = false; 5027 auto CanonArgs = 5028 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 5029 5030 // Determine whether this canonical template specialization type already 5031 // exists. 5032 llvm::FoldingSetNodeID ID; 5033 TemplateSpecializationType::Profile(ID, CanonTemplate, 5034 CanonArgs, *this); 5035 5036 void *InsertPos = nullptr; 5037 TemplateSpecializationType *Spec 5038 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5039 5040 if (!Spec) { 5041 // Allocate a new canonical template specialization type. 5042 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 5043 sizeof(TemplateArgument) * CanonArgs.size()), 5044 TypeAlignment); 5045 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 5046 CanonArgs, 5047 QualType(), QualType()); 5048 Types.push_back(Spec); 5049 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 5050 } 5051 5052 assert(Spec->isDependentType() && 5053 "Non-dependent template-id type must have a canonical type"); 5054 return QualType(Spec, 0); 5055 } 5056 5057 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 5058 NestedNameSpecifier *NNS, 5059 QualType NamedType, 5060 TagDecl *OwnedTagDecl) const { 5061 llvm::FoldingSetNodeID ID; 5062 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 5063 5064 void *InsertPos = nullptr; 5065 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5066 if (T) 5067 return QualType(T, 0); 5068 5069 QualType Canon = NamedType; 5070 if (!Canon.isCanonical()) { 5071 Canon = getCanonicalType(NamedType); 5072 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5073 assert(!CheckT && "Elaborated canonical type broken"); 5074 (void)CheckT; 5075 } 5076 5077 void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 5078 TypeAlignment); 5079 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 5080 5081 Types.push_back(T); 5082 ElaboratedTypes.InsertNode(T, InsertPos); 5083 return QualType(T, 0); 5084 } 5085 5086 QualType 5087 ASTContext::getParenType(QualType InnerType) const { 5088 llvm::FoldingSetNodeID ID; 5089 ParenType::Profile(ID, InnerType); 5090 5091 void *InsertPos = nullptr; 5092 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5093 if (T) 5094 return QualType(T, 0); 5095 5096 QualType Canon = InnerType; 5097 if (!Canon.isCanonical()) { 5098 Canon = getCanonicalType(InnerType); 5099 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5100 assert(!CheckT && "Paren canonical type broken"); 5101 (void)CheckT; 5102 } 5103 5104 T = new (*this, TypeAlignment) ParenType(InnerType, Canon); 5105 Types.push_back(T); 5106 ParenTypes.InsertNode(T, InsertPos); 5107 return QualType(T, 0); 5108 } 5109 5110 QualType 5111 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 5112 const IdentifierInfo *MacroII) const { 5113 QualType Canon = UnderlyingTy; 5114 if (!Canon.isCanonical()) 5115 Canon = getCanonicalType(UnderlyingTy); 5116 5117 auto *newType = new (*this, TypeAlignment) 5118 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 5119 Types.push_back(newType); 5120 return QualType(newType, 0); 5121 } 5122 5123 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 5124 NestedNameSpecifier *NNS, 5125 const IdentifierInfo *Name, 5126 QualType Canon) const { 5127 if (Canon.isNull()) { 5128 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5129 if (CanonNNS != NNS) 5130 Canon = getDependentNameType(Keyword, CanonNNS, Name); 5131 } 5132 5133 llvm::FoldingSetNodeID ID; 5134 DependentNameType::Profile(ID, Keyword, NNS, Name); 5135 5136 void *InsertPos = nullptr; 5137 DependentNameType *T 5138 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 5139 if (T) 5140 return QualType(T, 0); 5141 5142 T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon); 5143 Types.push_back(T); 5144 DependentNameTypes.InsertNode(T, InsertPos); 5145 return QualType(T, 0); 5146 } 5147 5148 QualType ASTContext::getDependentTemplateSpecializationType( 5149 ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, 5150 const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const { 5151 // TODO: avoid this copy 5152 SmallVector<TemplateArgument, 16> ArgCopy; 5153 for (unsigned I = 0, E = Args.size(); I != E; ++I) 5154 ArgCopy.push_back(Args[I].getArgument()); 5155 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 5156 } 5157 5158 QualType 5159 ASTContext::getDependentTemplateSpecializationType( 5160 ElaboratedTypeKeyword Keyword, 5161 NestedNameSpecifier *NNS, 5162 const IdentifierInfo *Name, 5163 ArrayRef<TemplateArgument> Args) const { 5164 assert((!NNS || NNS->isDependent()) && 5165 "nested-name-specifier must be dependent"); 5166 5167 llvm::FoldingSetNodeID ID; 5168 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 5169 Name, Args); 5170 5171 void *InsertPos = nullptr; 5172 DependentTemplateSpecializationType *T 5173 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5174 if (T) 5175 return QualType(T, 0); 5176 5177 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5178 5179 ElaboratedTypeKeyword CanonKeyword = Keyword; 5180 if (Keyword == ETK_None) CanonKeyword = ETK_Typename; 5181 5182 bool AnyNonCanonArgs = false; 5183 auto CanonArgs = 5184 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 5185 5186 QualType Canon; 5187 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 5188 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 5189 Name, 5190 CanonArgs); 5191 5192 // Find the insert position again. 5193 [[maybe_unused]] auto *Nothing = 5194 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5195 assert(!Nothing && "canonical type broken"); 5196 } 5197 5198 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 5199 sizeof(TemplateArgument) * Args.size()), 5200 TypeAlignment); 5201 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 5202 Name, Args, Canon); 5203 Types.push_back(T); 5204 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 5205 return QualType(T, 0); 5206 } 5207 5208 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 5209 TemplateArgument Arg; 5210 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 5211 QualType ArgType = getTypeDeclType(TTP); 5212 if (TTP->isParameterPack()) 5213 ArgType = getPackExpansionType(ArgType, std::nullopt); 5214 5215 Arg = TemplateArgument(ArgType); 5216 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 5217 QualType T = 5218 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 5219 // For class NTTPs, ensure we include the 'const' so the type matches that 5220 // of a real template argument. 5221 // FIXME: It would be more faithful to model this as something like an 5222 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 5223 if (T->isRecordType()) 5224 T.addConst(); 5225 Expr *E = new (*this) DeclRefExpr( 5226 *this, NTTP, /*RefersToEnclosingVariableOrCapture*/ false, T, 5227 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 5228 5229 if (NTTP->isParameterPack()) 5230 E = new (*this) 5231 PackExpansionExpr(DependentTy, E, NTTP->getLocation(), std::nullopt); 5232 Arg = TemplateArgument(E); 5233 } else { 5234 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5235 if (TTP->isParameterPack()) 5236 Arg = TemplateArgument(TemplateName(TTP), std::optional<unsigned>()); 5237 else 5238 Arg = TemplateArgument(TemplateName(TTP)); 5239 } 5240 5241 if (Param->isTemplateParameterPack()) 5242 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5243 5244 return Arg; 5245 } 5246 5247 void 5248 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5249 SmallVectorImpl<TemplateArgument> &Args) { 5250 Args.reserve(Args.size() + Params->size()); 5251 5252 for (NamedDecl *Param : *Params) 5253 Args.push_back(getInjectedTemplateArg(Param)); 5254 } 5255 5256 QualType ASTContext::getPackExpansionType(QualType Pattern, 5257 std::optional<unsigned> NumExpansions, 5258 bool ExpectPackInType) { 5259 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5260 "Pack expansions must expand one or more parameter packs"); 5261 5262 llvm::FoldingSetNodeID ID; 5263 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5264 5265 void *InsertPos = nullptr; 5266 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5267 if (T) 5268 return QualType(T, 0); 5269 5270 QualType Canon; 5271 if (!Pattern.isCanonical()) { 5272 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5273 /*ExpectPackInType=*/false); 5274 5275 // Find the insert position again, in case we inserted an element into 5276 // PackExpansionTypes and invalidated our insert position. 5277 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5278 } 5279 5280 T = new (*this, TypeAlignment) 5281 PackExpansionType(Pattern, Canon, NumExpansions); 5282 Types.push_back(T); 5283 PackExpansionTypes.InsertNode(T, InsertPos); 5284 return QualType(T, 0); 5285 } 5286 5287 /// CmpProtocolNames - Comparison predicate for sorting protocols 5288 /// alphabetically. 5289 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5290 ObjCProtocolDecl *const *RHS) { 5291 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5292 } 5293 5294 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5295 if (Protocols.empty()) return true; 5296 5297 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5298 return false; 5299 5300 for (unsigned i = 1; i != Protocols.size(); ++i) 5301 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5302 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5303 return false; 5304 return true; 5305 } 5306 5307 static void 5308 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5309 // Sort protocols, keyed by name. 5310 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5311 5312 // Canonicalize. 5313 for (ObjCProtocolDecl *&P : Protocols) 5314 P = P->getCanonicalDecl(); 5315 5316 // Remove duplicates. 5317 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5318 Protocols.erase(ProtocolsEnd, Protocols.end()); 5319 } 5320 5321 QualType ASTContext::getObjCObjectType(QualType BaseType, 5322 ObjCProtocolDecl * const *Protocols, 5323 unsigned NumProtocols) const { 5324 return getObjCObjectType(BaseType, {}, 5325 llvm::ArrayRef(Protocols, NumProtocols), 5326 /*isKindOf=*/false); 5327 } 5328 5329 QualType ASTContext::getObjCObjectType( 5330 QualType baseType, 5331 ArrayRef<QualType> typeArgs, 5332 ArrayRef<ObjCProtocolDecl *> protocols, 5333 bool isKindOf) const { 5334 // If the base type is an interface and there aren't any protocols or 5335 // type arguments to add, then the interface type will do just fine. 5336 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5337 isa<ObjCInterfaceType>(baseType)) 5338 return baseType; 5339 5340 // Look in the folding set for an existing type. 5341 llvm::FoldingSetNodeID ID; 5342 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5343 void *InsertPos = nullptr; 5344 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5345 return QualType(QT, 0); 5346 5347 // Determine the type arguments to be used for canonicalization, 5348 // which may be explicitly specified here or written on the base 5349 // type. 5350 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5351 if (effectiveTypeArgs.empty()) { 5352 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5353 effectiveTypeArgs = baseObject->getTypeArgs(); 5354 } 5355 5356 // Build the canonical type, which has the canonical base type and a 5357 // sorted-and-uniqued list of protocols and the type arguments 5358 // canonicalized. 5359 QualType canonical; 5360 bool typeArgsAreCanonical = llvm::all_of( 5361 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); 5362 bool protocolsSorted = areSortedAndUniqued(protocols); 5363 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5364 // Determine the canonical type arguments. 5365 ArrayRef<QualType> canonTypeArgs; 5366 SmallVector<QualType, 4> canonTypeArgsVec; 5367 if (!typeArgsAreCanonical) { 5368 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5369 for (auto typeArg : effectiveTypeArgs) 5370 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5371 canonTypeArgs = canonTypeArgsVec; 5372 } else { 5373 canonTypeArgs = effectiveTypeArgs; 5374 } 5375 5376 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5377 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5378 if (!protocolsSorted) { 5379 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5380 SortAndUniqueProtocols(canonProtocolsVec); 5381 canonProtocols = canonProtocolsVec; 5382 } else { 5383 canonProtocols = protocols; 5384 } 5385 5386 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5387 canonProtocols, isKindOf); 5388 5389 // Regenerate InsertPos. 5390 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5391 } 5392 5393 unsigned size = sizeof(ObjCObjectTypeImpl); 5394 size += typeArgs.size() * sizeof(QualType); 5395 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5396 void *mem = Allocate(size, TypeAlignment); 5397 auto *T = 5398 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5399 isKindOf); 5400 5401 Types.push_back(T); 5402 ObjCObjectTypes.InsertNode(T, InsertPos); 5403 return QualType(T, 0); 5404 } 5405 5406 /// Apply Objective-C protocol qualifiers to the given type. 5407 /// If this is for the canonical type of a type parameter, we can apply 5408 /// protocol qualifiers on the ObjCObjectPointerType. 5409 QualType 5410 ASTContext::applyObjCProtocolQualifiers(QualType type, 5411 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5412 bool allowOnPointerType) const { 5413 hasError = false; 5414 5415 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5416 return getObjCTypeParamType(objT->getDecl(), protocols); 5417 } 5418 5419 // Apply protocol qualifiers to ObjCObjectPointerType. 5420 if (allowOnPointerType) { 5421 if (const auto *objPtr = 5422 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5423 const ObjCObjectType *objT = objPtr->getObjectType(); 5424 // Merge protocol lists and construct ObjCObjectType. 5425 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5426 protocolsVec.append(objT->qual_begin(), 5427 objT->qual_end()); 5428 protocolsVec.append(protocols.begin(), protocols.end()); 5429 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5430 type = getObjCObjectType( 5431 objT->getBaseType(), 5432 objT->getTypeArgsAsWritten(), 5433 protocols, 5434 objT->isKindOfTypeAsWritten()); 5435 return getObjCObjectPointerType(type); 5436 } 5437 } 5438 5439 // Apply protocol qualifiers to ObjCObjectType. 5440 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5441 // FIXME: Check for protocols to which the class type is already 5442 // known to conform. 5443 5444 return getObjCObjectType(objT->getBaseType(), 5445 objT->getTypeArgsAsWritten(), 5446 protocols, 5447 objT->isKindOfTypeAsWritten()); 5448 } 5449 5450 // If the canonical type is ObjCObjectType, ... 5451 if (type->isObjCObjectType()) { 5452 // Silently overwrite any existing protocol qualifiers. 5453 // TODO: determine whether that's the right thing to do. 5454 5455 // FIXME: Check for protocols to which the class type is already 5456 // known to conform. 5457 return getObjCObjectType(type, {}, protocols, false); 5458 } 5459 5460 // id<protocol-list> 5461 if (type->isObjCIdType()) { 5462 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5463 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5464 objPtr->isKindOfType()); 5465 return getObjCObjectPointerType(type); 5466 } 5467 5468 // Class<protocol-list> 5469 if (type->isObjCClassType()) { 5470 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5471 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5472 objPtr->isKindOfType()); 5473 return getObjCObjectPointerType(type); 5474 } 5475 5476 hasError = true; 5477 return type; 5478 } 5479 5480 QualType 5481 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5482 ArrayRef<ObjCProtocolDecl *> protocols) const { 5483 // Look in the folding set for an existing type. 5484 llvm::FoldingSetNodeID ID; 5485 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5486 void *InsertPos = nullptr; 5487 if (ObjCTypeParamType *TypeParam = 5488 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5489 return QualType(TypeParam, 0); 5490 5491 // We canonicalize to the underlying type. 5492 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5493 if (!protocols.empty()) { 5494 // Apply the protocol qualifers. 5495 bool hasError; 5496 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5497 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5498 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5499 } 5500 5501 unsigned size = sizeof(ObjCTypeParamType); 5502 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5503 void *mem = Allocate(size, TypeAlignment); 5504 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5505 5506 Types.push_back(newType); 5507 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5508 return QualType(newType, 0); 5509 } 5510 5511 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5512 ObjCTypeParamDecl *New) const { 5513 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5514 // Update TypeForDecl after updating TypeSourceInfo. 5515 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5516 SmallVector<ObjCProtocolDecl *, 8> protocols; 5517 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5518 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5519 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5520 } 5521 5522 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5523 /// protocol list adopt all protocols in QT's qualified-id protocol 5524 /// list. 5525 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5526 ObjCInterfaceDecl *IC) { 5527 if (!QT->isObjCQualifiedIdType()) 5528 return false; 5529 5530 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5531 // If both the right and left sides have qualifiers. 5532 for (auto *Proto : OPT->quals()) { 5533 if (!IC->ClassImplementsProtocol(Proto, false)) 5534 return false; 5535 } 5536 return true; 5537 } 5538 return false; 5539 } 5540 5541 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5542 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5543 /// of protocols. 5544 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5545 ObjCInterfaceDecl *IDecl) { 5546 if (!QT->isObjCQualifiedIdType()) 5547 return false; 5548 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5549 if (!OPT) 5550 return false; 5551 if (!IDecl->hasDefinition()) 5552 return false; 5553 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5554 CollectInheritedProtocols(IDecl, InheritedProtocols); 5555 if (InheritedProtocols.empty()) 5556 return false; 5557 // Check that if every protocol in list of id<plist> conforms to a protocol 5558 // of IDecl's, then bridge casting is ok. 5559 bool Conforms = false; 5560 for (auto *Proto : OPT->quals()) { 5561 Conforms = false; 5562 for (auto *PI : InheritedProtocols) { 5563 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5564 Conforms = true; 5565 break; 5566 } 5567 } 5568 if (!Conforms) 5569 break; 5570 } 5571 if (Conforms) 5572 return true; 5573 5574 for (auto *PI : InheritedProtocols) { 5575 // If both the right and left sides have qualifiers. 5576 bool Adopts = false; 5577 for (auto *Proto : OPT->quals()) { 5578 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5579 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5580 break; 5581 } 5582 if (!Adopts) 5583 return false; 5584 } 5585 return true; 5586 } 5587 5588 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5589 /// the given object type. 5590 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5591 llvm::FoldingSetNodeID ID; 5592 ObjCObjectPointerType::Profile(ID, ObjectT); 5593 5594 void *InsertPos = nullptr; 5595 if (ObjCObjectPointerType *QT = 5596 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5597 return QualType(QT, 0); 5598 5599 // Find the canonical object type. 5600 QualType Canonical; 5601 if (!ObjectT.isCanonical()) { 5602 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5603 5604 // Regenerate InsertPos. 5605 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5606 } 5607 5608 // No match. 5609 void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); 5610 auto *QType = 5611 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5612 5613 Types.push_back(QType); 5614 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5615 return QualType(QType, 0); 5616 } 5617 5618 /// getObjCInterfaceType - Return the unique reference to the type for the 5619 /// specified ObjC interface decl. The list of protocols is optional. 5620 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5621 ObjCInterfaceDecl *PrevDecl) const { 5622 if (Decl->TypeForDecl) 5623 return QualType(Decl->TypeForDecl, 0); 5624 5625 if (PrevDecl) { 5626 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5627 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5628 return QualType(PrevDecl->TypeForDecl, 0); 5629 } 5630 5631 // Prefer the definition, if there is one. 5632 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 5633 Decl = Def; 5634 5635 void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); 5636 auto *T = new (Mem) ObjCInterfaceType(Decl); 5637 Decl->TypeForDecl = T; 5638 Types.push_back(T); 5639 return QualType(T, 0); 5640 } 5641 5642 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 5643 /// TypeOfExprType AST's (since expression's are never shared). For example, 5644 /// multiple declarations that refer to "typeof(x)" all contain different 5645 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 5646 /// on canonical type's (which are always unique). 5647 QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const { 5648 TypeOfExprType *toe; 5649 if (tofExpr->isTypeDependent()) { 5650 llvm::FoldingSetNodeID ID; 5651 DependentTypeOfExprType::Profile(ID, *this, tofExpr, 5652 Kind == TypeOfKind::Unqualified); 5653 5654 void *InsertPos = nullptr; 5655 DependentTypeOfExprType *Canon = 5656 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 5657 if (Canon) { 5658 // We already have a "canonical" version of an identical, dependent 5659 // typeof(expr) type. Use that as our canonical type. 5660 toe = new (*this, TypeAlignment) 5661 TypeOfExprType(tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0)); 5662 } else { 5663 // Build a new, canonical typeof(expr) type. 5664 Canon = new (*this, TypeAlignment) 5665 DependentTypeOfExprType(*this, tofExpr, Kind); 5666 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 5667 toe = Canon; 5668 } 5669 } else { 5670 QualType Canonical = getCanonicalType(tofExpr->getType()); 5671 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Kind, Canonical); 5672 } 5673 Types.push_back(toe); 5674 return QualType(toe, 0); 5675 } 5676 5677 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 5678 /// TypeOfType nodes. The only motivation to unique these nodes would be 5679 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 5680 /// an issue. This doesn't affect the type checker, since it operates 5681 /// on canonical types (which are always unique). 5682 QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const { 5683 QualType Canonical = getCanonicalType(tofType); 5684 auto *tot = 5685 new (*this, TypeAlignment) TypeOfType(tofType, Canonical, Kind); 5686 Types.push_back(tot); 5687 return QualType(tot, 0); 5688 } 5689 5690 /// getReferenceQualifiedType - Given an expr, will return the type for 5691 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions 5692 /// and class member access into account. 5693 QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { 5694 // C++11 [dcl.type.simple]p4: 5695 // [...] 5696 QualType T = E->getType(); 5697 switch (E->getValueKind()) { 5698 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the 5699 // type of e; 5700 case VK_XValue: 5701 return getRValueReferenceType(T); 5702 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the 5703 // type of e; 5704 case VK_LValue: 5705 return getLValueReferenceType(T); 5706 // - otherwise, decltype(e) is the type of e. 5707 case VK_PRValue: 5708 return T; 5709 } 5710 llvm_unreachable("Unknown value kind"); 5711 } 5712 5713 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 5714 /// nodes. This would never be helpful, since each such type has its own 5715 /// expression, and would not give a significant memory saving, since there 5716 /// is an Expr tree under each such type. 5717 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 5718 DecltypeType *dt; 5719 5720 // C++11 [temp.type]p2: 5721 // If an expression e involves a template parameter, decltype(e) denotes a 5722 // unique dependent type. Two such decltype-specifiers refer to the same 5723 // type only if their expressions are equivalent (14.5.6.1). 5724 if (e->isInstantiationDependent()) { 5725 llvm::FoldingSetNodeID ID; 5726 DependentDecltypeType::Profile(ID, *this, e); 5727 5728 void *InsertPos = nullptr; 5729 DependentDecltypeType *Canon 5730 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 5731 if (!Canon) { 5732 // Build a new, canonical decltype(expr) type. 5733 Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); 5734 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 5735 } 5736 dt = new (*this, TypeAlignment) 5737 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 5738 } else { 5739 dt = new (*this, TypeAlignment) 5740 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 5741 } 5742 Types.push_back(dt); 5743 return QualType(dt, 0); 5744 } 5745 5746 /// getUnaryTransformationType - We don't unique these, since the memory 5747 /// savings are minimal and these are rare. 5748 QualType ASTContext::getUnaryTransformType(QualType BaseType, 5749 QualType UnderlyingType, 5750 UnaryTransformType::UTTKind Kind) 5751 const { 5752 UnaryTransformType *ut = nullptr; 5753 5754 if (BaseType->isDependentType()) { 5755 // Look in the folding set for an existing type. 5756 llvm::FoldingSetNodeID ID; 5757 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 5758 5759 void *InsertPos = nullptr; 5760 DependentUnaryTransformType *Canon 5761 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 5762 5763 if (!Canon) { 5764 // Build a new, canonical __underlying_type(type) type. 5765 Canon = new (*this, TypeAlignment) 5766 DependentUnaryTransformType(*this, getCanonicalType(BaseType), 5767 Kind); 5768 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 5769 } 5770 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5771 QualType(), Kind, 5772 QualType(Canon, 0)); 5773 } else { 5774 QualType CanonType = getCanonicalType(UnderlyingType); 5775 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5776 UnderlyingType, Kind, 5777 CanonType); 5778 } 5779 Types.push_back(ut); 5780 return QualType(ut, 0); 5781 } 5782 5783 QualType ASTContext::getAutoTypeInternal( 5784 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, 5785 bool IsPack, ConceptDecl *TypeConstraintConcept, 5786 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { 5787 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 5788 !TypeConstraintConcept && !IsDependent) 5789 return getAutoDeductType(); 5790 5791 // Look in the folding set for an existing type. 5792 void *InsertPos = nullptr; 5793 llvm::FoldingSetNodeID ID; 5794 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 5795 TypeConstraintConcept, TypeConstraintArgs); 5796 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 5797 return QualType(AT, 0); 5798 5799 QualType Canon; 5800 if (!IsCanon) { 5801 if (!DeducedType.isNull()) { 5802 Canon = DeducedType.getCanonicalType(); 5803 } else if (TypeConstraintConcept) { 5804 bool AnyNonCanonArgs = false; 5805 ConceptDecl *CanonicalConcept = TypeConstraintConcept->getCanonicalDecl(); 5806 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments( 5807 *this, TypeConstraintArgs, AnyNonCanonArgs); 5808 if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) { 5809 Canon = 5810 getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, 5811 CanonicalConcept, CanonicalConceptArgs, true); 5812 // Find the insert position again. 5813 [[maybe_unused]] auto *Nothing = 5814 AutoTypes.FindNodeOrInsertPos(ID, InsertPos); 5815 assert(!Nothing && "canonical type broken"); 5816 } 5817 } 5818 } 5819 5820 void *Mem = Allocate(sizeof(AutoType) + 5821 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 5822 TypeAlignment); 5823 auto *AT = new (Mem) AutoType( 5824 DeducedType, Keyword, 5825 (IsDependent ? TypeDependence::DependentInstantiation 5826 : TypeDependence::None) | 5827 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 5828 Canon, TypeConstraintConcept, TypeConstraintArgs); 5829 Types.push_back(AT); 5830 AutoTypes.InsertNode(AT, InsertPos); 5831 return QualType(AT, 0); 5832 } 5833 5834 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 5835 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 5836 /// canonical deduced-but-dependent 'auto' type. 5837 QualType 5838 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 5839 bool IsDependent, bool IsPack, 5840 ConceptDecl *TypeConstraintConcept, 5841 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 5842 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 5843 assert((!IsDependent || DeducedType.isNull()) && 5844 "A dependent auto should be undeduced"); 5845 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, 5846 TypeConstraintConcept, TypeConstraintArgs); 5847 } 5848 5849 QualType ASTContext::getUnconstrainedType(QualType T) const { 5850 QualType CanonT = T.getCanonicalType(); 5851 5852 // Remove a type-constraint from a top-level auto or decltype(auto). 5853 if (auto *AT = CanonT->getAs<AutoType>()) { 5854 if (!AT->isConstrained()) 5855 return T; 5856 return getQualifiedType(getAutoType(QualType(), AT->getKeyword(), false, 5857 AT->containsUnexpandedParameterPack()), 5858 T.getQualifiers()); 5859 } 5860 5861 // FIXME: We only support constrained auto at the top level in the type of a 5862 // non-type template parameter at the moment. Once we lift that restriction, 5863 // we'll need to recursively build types containing auto here. 5864 assert(!CanonT->getContainedAutoType() || 5865 !CanonT->getContainedAutoType()->isConstrained()); 5866 return T; 5867 } 5868 5869 /// Return the uniqued reference to the deduced template specialization type 5870 /// which has been deduced to the given type, or to the canonical undeduced 5871 /// such type, or the canonical deduced-but-dependent such type. 5872 QualType ASTContext::getDeducedTemplateSpecializationType( 5873 TemplateName Template, QualType DeducedType, bool IsDependent) const { 5874 // Look in the folding set for an existing type. 5875 void *InsertPos = nullptr; 5876 llvm::FoldingSetNodeID ID; 5877 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 5878 IsDependent); 5879 if (DeducedTemplateSpecializationType *DTST = 5880 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 5881 return QualType(DTST, 0); 5882 5883 auto *DTST = new (*this, TypeAlignment) 5884 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 5885 llvm::FoldingSetNodeID TempID; 5886 DTST->Profile(TempID); 5887 assert(ID == TempID && "ID does not match"); 5888 Types.push_back(DTST); 5889 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 5890 return QualType(DTST, 0); 5891 } 5892 5893 /// getAtomicType - Return the uniqued reference to the atomic type for 5894 /// the given value type. 5895 QualType ASTContext::getAtomicType(QualType T) const { 5896 // Unique pointers, to guarantee there is only one pointer of a particular 5897 // structure. 5898 llvm::FoldingSetNodeID ID; 5899 AtomicType::Profile(ID, T); 5900 5901 void *InsertPos = nullptr; 5902 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 5903 return QualType(AT, 0); 5904 5905 // If the atomic value type isn't canonical, this won't be a canonical type 5906 // either, so fill in the canonical type field. 5907 QualType Canonical; 5908 if (!T.isCanonical()) { 5909 Canonical = getAtomicType(getCanonicalType(T)); 5910 5911 // Get the new insert position for the node we care about. 5912 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 5913 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 5914 } 5915 auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical); 5916 Types.push_back(New); 5917 AtomicTypes.InsertNode(New, InsertPos); 5918 return QualType(New, 0); 5919 } 5920 5921 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 5922 QualType ASTContext::getAutoDeductType() const { 5923 if (AutoDeductTy.isNull()) 5924 AutoDeductTy = QualType(new (*this, TypeAlignment) 5925 AutoType(QualType(), AutoTypeKeyword::Auto, 5926 TypeDependence::None, QualType(), 5927 /*concept*/ nullptr, /*args*/ {}), 5928 0); 5929 return AutoDeductTy; 5930 } 5931 5932 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 5933 QualType ASTContext::getAutoRRefDeductType() const { 5934 if (AutoRRefDeductTy.isNull()) 5935 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 5936 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 5937 return AutoRRefDeductTy; 5938 } 5939 5940 /// getTagDeclType - Return the unique reference to the type for the 5941 /// specified TagDecl (struct/union/class/enum) decl. 5942 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 5943 assert(Decl); 5944 // FIXME: What is the design on getTagDeclType when it requires casting 5945 // away const? mutable? 5946 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 5947 } 5948 5949 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 5950 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 5951 /// needs to agree with the definition in <stddef.h>. 5952 CanQualType ASTContext::getSizeType() const { 5953 return getFromTargetType(Target->getSizeType()); 5954 } 5955 5956 /// Return the unique signed counterpart of the integer type 5957 /// corresponding to size_t. 5958 CanQualType ASTContext::getSignedSizeType() const { 5959 return getFromTargetType(Target->getSignedSizeType()); 5960 } 5961 5962 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 5963 CanQualType ASTContext::getIntMaxType() const { 5964 return getFromTargetType(Target->getIntMaxType()); 5965 } 5966 5967 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 5968 CanQualType ASTContext::getUIntMaxType() const { 5969 return getFromTargetType(Target->getUIntMaxType()); 5970 } 5971 5972 /// getSignedWCharType - Return the type of "signed wchar_t". 5973 /// Used when in C++, as a GCC extension. 5974 QualType ASTContext::getSignedWCharType() const { 5975 // FIXME: derive from "Target" ? 5976 return WCharTy; 5977 } 5978 5979 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 5980 /// Used when in C++, as a GCC extension. 5981 QualType ASTContext::getUnsignedWCharType() const { 5982 // FIXME: derive from "Target" ? 5983 return UnsignedIntTy; 5984 } 5985 5986 QualType ASTContext::getIntPtrType() const { 5987 return getFromTargetType(Target->getIntPtrType()); 5988 } 5989 5990 QualType ASTContext::getUIntPtrType() const { 5991 return getCorrespondingUnsignedType(getIntPtrType()); 5992 } 5993 5994 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 5995 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 5996 QualType ASTContext::getPointerDiffType() const { 5997 return getFromTargetType(Target->getPtrDiffType(LangAS::Default)); 5998 } 5999 6000 /// Return the unique unsigned counterpart of "ptrdiff_t" 6001 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 6002 /// in the definition of %tu format specifier. 6003 QualType ASTContext::getUnsignedPointerDiffType() const { 6004 return getFromTargetType(Target->getUnsignedPtrDiffType(LangAS::Default)); 6005 } 6006 6007 /// Return the unique type for "pid_t" defined in 6008 /// <sys/types.h>. We need this to compute the correct type for vfork(). 6009 QualType ASTContext::getProcessIDType() const { 6010 return getFromTargetType(Target->getProcessIDType()); 6011 } 6012 6013 //===----------------------------------------------------------------------===// 6014 // Type Operators 6015 //===----------------------------------------------------------------------===// 6016 6017 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 6018 // Push qualifiers into arrays, and then discard any remaining 6019 // qualifiers. 6020 T = getCanonicalType(T); 6021 T = getVariableArrayDecayedType(T); 6022 const Type *Ty = T.getTypePtr(); 6023 QualType Result; 6024 if (isa<ArrayType>(Ty)) { 6025 Result = getArrayDecayedType(QualType(Ty,0)); 6026 } else if (isa<FunctionType>(Ty)) { 6027 Result = getPointerType(QualType(Ty, 0)); 6028 } else { 6029 Result = QualType(Ty, 0); 6030 } 6031 6032 return CanQualType::CreateUnsafe(Result); 6033 } 6034 6035 QualType ASTContext::getUnqualifiedArrayType(QualType type, 6036 Qualifiers &quals) { 6037 SplitQualType splitType = type.getSplitUnqualifiedType(); 6038 6039 // FIXME: getSplitUnqualifiedType() actually walks all the way to 6040 // the unqualified desugared type and then drops it on the floor. 6041 // We then have to strip that sugar back off with 6042 // getUnqualifiedDesugaredType(), which is silly. 6043 const auto *AT = 6044 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 6045 6046 // If we don't have an array, just use the results in splitType. 6047 if (!AT) { 6048 quals = splitType.Quals; 6049 return QualType(splitType.Ty, 0); 6050 } 6051 6052 // Otherwise, recurse on the array's element type. 6053 QualType elementType = AT->getElementType(); 6054 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 6055 6056 // If that didn't change the element type, AT has no qualifiers, so we 6057 // can just use the results in splitType. 6058 if (elementType == unqualElementType) { 6059 assert(quals.empty()); // from the recursive call 6060 quals = splitType.Quals; 6061 return QualType(splitType.Ty, 0); 6062 } 6063 6064 // Otherwise, add in the qualifiers from the outermost type, then 6065 // build the type back up. 6066 quals.addConsistentQualifiers(splitType.Quals); 6067 6068 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 6069 return getConstantArrayType(unqualElementType, CAT->getSize(), 6070 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 6071 } 6072 6073 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 6074 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 6075 } 6076 6077 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 6078 return getVariableArrayType(unqualElementType, 6079 VAT->getSizeExpr(), 6080 VAT->getSizeModifier(), 6081 VAT->getIndexTypeCVRQualifiers(), 6082 VAT->getBracketsRange()); 6083 } 6084 6085 const auto *DSAT = cast<DependentSizedArrayType>(AT); 6086 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 6087 DSAT->getSizeModifier(), 0, 6088 SourceRange()); 6089 } 6090 6091 /// Attempt to unwrap two types that may both be array types with the same bound 6092 /// (or both be array types of unknown bound) for the purpose of comparing the 6093 /// cv-decomposition of two types per C++ [conv.qual]. 6094 /// 6095 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6096 /// C++20 [conv.qual], if permitted by the current language mode. 6097 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, 6098 bool AllowPiMismatch) { 6099 while (true) { 6100 auto *AT1 = getAsArrayType(T1); 6101 if (!AT1) 6102 return; 6103 6104 auto *AT2 = getAsArrayType(T2); 6105 if (!AT2) 6106 return; 6107 6108 // If we don't have two array types with the same constant bound nor two 6109 // incomplete array types, we've unwrapped everything we can. 6110 // C++20 also permits one type to be a constant array type and the other 6111 // to be an incomplete array type. 6112 // FIXME: Consider also unwrapping array of unknown bound and VLA. 6113 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 6114 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 6115 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || 6116 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6117 isa<IncompleteArrayType>(AT2)))) 6118 return; 6119 } else if (isa<IncompleteArrayType>(AT1)) { 6120 if (!(isa<IncompleteArrayType>(AT2) || 6121 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6122 isa<ConstantArrayType>(AT2)))) 6123 return; 6124 } else { 6125 return; 6126 } 6127 6128 T1 = AT1->getElementType(); 6129 T2 = AT2->getElementType(); 6130 } 6131 } 6132 6133 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 6134 /// 6135 /// If T1 and T2 are both pointer types of the same kind, or both array types 6136 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 6137 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 6138 /// 6139 /// This function will typically be called in a loop that successively 6140 /// "unwraps" pointer and pointer-to-member types to compare them at each 6141 /// level. 6142 /// 6143 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6144 /// C++20 [conv.qual], if permitted by the current language mode. 6145 /// 6146 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 6147 /// pair of types that can't be unwrapped further. 6148 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, 6149 bool AllowPiMismatch) { 6150 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); 6151 6152 const auto *T1PtrType = T1->getAs<PointerType>(); 6153 const auto *T2PtrType = T2->getAs<PointerType>(); 6154 if (T1PtrType && T2PtrType) { 6155 T1 = T1PtrType->getPointeeType(); 6156 T2 = T2PtrType->getPointeeType(); 6157 return true; 6158 } 6159 6160 const auto *T1MPType = T1->getAs<MemberPointerType>(); 6161 const auto *T2MPType = T2->getAs<MemberPointerType>(); 6162 if (T1MPType && T2MPType && 6163 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 6164 QualType(T2MPType->getClass(), 0))) { 6165 T1 = T1MPType->getPointeeType(); 6166 T2 = T2MPType->getPointeeType(); 6167 return true; 6168 } 6169 6170 if (getLangOpts().ObjC) { 6171 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 6172 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 6173 if (T1OPType && T2OPType) { 6174 T1 = T1OPType->getPointeeType(); 6175 T2 = T2OPType->getPointeeType(); 6176 return true; 6177 } 6178 } 6179 6180 // FIXME: Block pointers, too? 6181 6182 return false; 6183 } 6184 6185 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 6186 while (true) { 6187 Qualifiers Quals; 6188 T1 = getUnqualifiedArrayType(T1, Quals); 6189 T2 = getUnqualifiedArrayType(T2, Quals); 6190 if (hasSameType(T1, T2)) 6191 return true; 6192 if (!UnwrapSimilarTypes(T1, T2)) 6193 return false; 6194 } 6195 } 6196 6197 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 6198 while (true) { 6199 Qualifiers Quals1, Quals2; 6200 T1 = getUnqualifiedArrayType(T1, Quals1); 6201 T2 = getUnqualifiedArrayType(T2, Quals2); 6202 6203 Quals1.removeCVRQualifiers(); 6204 Quals2.removeCVRQualifiers(); 6205 if (Quals1 != Quals2) 6206 return false; 6207 6208 if (hasSameType(T1, T2)) 6209 return true; 6210 6211 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) 6212 return false; 6213 } 6214 } 6215 6216 DeclarationNameInfo 6217 ASTContext::getNameForTemplate(TemplateName Name, 6218 SourceLocation NameLoc) const { 6219 switch (Name.getKind()) { 6220 case TemplateName::QualifiedTemplate: 6221 case TemplateName::Template: 6222 // DNInfo work in progress: CHECKME: what about DNLoc? 6223 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 6224 NameLoc); 6225 6226 case TemplateName::OverloadedTemplate: { 6227 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 6228 // DNInfo work in progress: CHECKME: what about DNLoc? 6229 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 6230 } 6231 6232 case TemplateName::AssumedTemplate: { 6233 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 6234 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 6235 } 6236 6237 case TemplateName::DependentTemplate: { 6238 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6239 DeclarationName DName; 6240 if (DTN->isIdentifier()) { 6241 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 6242 return DeclarationNameInfo(DName, NameLoc); 6243 } else { 6244 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 6245 // DNInfo work in progress: FIXME: source locations? 6246 DeclarationNameLoc DNLoc = 6247 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 6248 return DeclarationNameInfo(DName, NameLoc, DNLoc); 6249 } 6250 } 6251 6252 case TemplateName::SubstTemplateTemplateParm: { 6253 SubstTemplateTemplateParmStorage *subst 6254 = Name.getAsSubstTemplateTemplateParm(); 6255 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 6256 NameLoc); 6257 } 6258 6259 case TemplateName::SubstTemplateTemplateParmPack: { 6260 SubstTemplateTemplateParmPackStorage *subst 6261 = Name.getAsSubstTemplateTemplateParmPack(); 6262 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 6263 NameLoc); 6264 } 6265 case TemplateName::UsingTemplate: 6266 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(), 6267 NameLoc); 6268 } 6269 6270 llvm_unreachable("bad template name kind!"); 6271 } 6272 6273 TemplateName 6274 ASTContext::getCanonicalTemplateName(const TemplateName &Name) const { 6275 switch (Name.getKind()) { 6276 case TemplateName::UsingTemplate: 6277 case TemplateName::QualifiedTemplate: 6278 case TemplateName::Template: { 6279 TemplateDecl *Template = Name.getAsTemplateDecl(); 6280 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 6281 Template = getCanonicalTemplateTemplateParmDecl(TTP); 6282 6283 // The canonical template name is the canonical template declaration. 6284 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 6285 } 6286 6287 case TemplateName::OverloadedTemplate: 6288 case TemplateName::AssumedTemplate: 6289 llvm_unreachable("cannot canonicalize unresolved template"); 6290 6291 case TemplateName::DependentTemplate: { 6292 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6293 assert(DTN && "Non-dependent template names must refer to template decls."); 6294 return DTN->CanonicalTemplateName; 6295 } 6296 6297 case TemplateName::SubstTemplateTemplateParm: { 6298 SubstTemplateTemplateParmStorage *subst 6299 = Name.getAsSubstTemplateTemplateParm(); 6300 return getCanonicalTemplateName(subst->getReplacement()); 6301 } 6302 6303 case TemplateName::SubstTemplateTemplateParmPack: { 6304 SubstTemplateTemplateParmPackStorage *subst = 6305 Name.getAsSubstTemplateTemplateParmPack(); 6306 TemplateArgument canonArgPack = 6307 getCanonicalTemplateArgument(subst->getArgumentPack()); 6308 return getSubstTemplateTemplateParmPack( 6309 canonArgPack, subst->getAssociatedDecl()->getCanonicalDecl(), 6310 subst->getFinal(), subst->getIndex()); 6311 } 6312 } 6313 6314 llvm_unreachable("bad template name!"); 6315 } 6316 6317 bool ASTContext::hasSameTemplateName(const TemplateName &X, 6318 const TemplateName &Y) const { 6319 return getCanonicalTemplateName(X).getAsVoidPointer() == 6320 getCanonicalTemplateName(Y).getAsVoidPointer(); 6321 } 6322 6323 bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const { 6324 if (!XCE != !YCE) 6325 return false; 6326 6327 if (!XCE) 6328 return true; 6329 6330 llvm::FoldingSetNodeID XCEID, YCEID; 6331 XCE->Profile(XCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); 6332 YCE->Profile(YCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); 6333 return XCEID == YCEID; 6334 } 6335 6336 bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC, 6337 const TypeConstraint *YTC) const { 6338 if (!XTC != !YTC) 6339 return false; 6340 6341 if (!XTC) 6342 return true; 6343 6344 auto *NCX = XTC->getNamedConcept(); 6345 auto *NCY = YTC->getNamedConcept(); 6346 if (!NCX || !NCY || !isSameEntity(NCX, NCY)) 6347 return false; 6348 if (XTC->hasExplicitTemplateArgs() != YTC->hasExplicitTemplateArgs()) 6349 return false; 6350 if (XTC->hasExplicitTemplateArgs()) 6351 if (XTC->getTemplateArgsAsWritten()->NumTemplateArgs != 6352 YTC->getTemplateArgsAsWritten()->NumTemplateArgs) 6353 return false; 6354 6355 // Compare slowly by profiling. 6356 // 6357 // We couldn't compare the profiling result for the template 6358 // args here. Consider the following example in different modules: 6359 // 6360 // template <__integer_like _Tp, C<_Tp> Sentinel> 6361 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const { 6362 // return __t; 6363 // } 6364 // 6365 // When we compare the profiling result for `C<_Tp>` in different 6366 // modules, it will compare the type of `_Tp` in different modules. 6367 // However, the type of `_Tp` in different modules refer to different 6368 // types here naturally. So we couldn't compare the profiling result 6369 // for the template args directly. 6370 return isSameConstraintExpr(XTC->getImmediatelyDeclaredConstraint(), 6371 YTC->getImmediatelyDeclaredConstraint()); 6372 } 6373 6374 bool ASTContext::isSameTemplateParameter(const NamedDecl *X, 6375 const NamedDecl *Y) const { 6376 if (X->getKind() != Y->getKind()) 6377 return false; 6378 6379 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) { 6380 auto *TY = cast<TemplateTypeParmDecl>(Y); 6381 if (TX->isParameterPack() != TY->isParameterPack()) 6382 return false; 6383 if (TX->hasTypeConstraint() != TY->hasTypeConstraint()) 6384 return false; 6385 return isSameTypeConstraint(TX->getTypeConstraint(), 6386 TY->getTypeConstraint()); 6387 } 6388 6389 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6390 auto *TY = cast<NonTypeTemplateParmDecl>(Y); 6391 return TX->isParameterPack() == TY->isParameterPack() && 6392 TX->getASTContext().hasSameType(TX->getType(), TY->getType()) && 6393 isSameConstraintExpr(TX->getPlaceholderTypeConstraint(), 6394 TY->getPlaceholderTypeConstraint()); 6395 } 6396 6397 auto *TX = cast<TemplateTemplateParmDecl>(X); 6398 auto *TY = cast<TemplateTemplateParmDecl>(Y); 6399 return TX->isParameterPack() == TY->isParameterPack() && 6400 isSameTemplateParameterList(TX->getTemplateParameters(), 6401 TY->getTemplateParameters()); 6402 } 6403 6404 bool ASTContext::isSameTemplateParameterList( 6405 const TemplateParameterList *X, const TemplateParameterList *Y) const { 6406 if (X->size() != Y->size()) 6407 return false; 6408 6409 for (unsigned I = 0, N = X->size(); I != N; ++I) 6410 if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I))) 6411 return false; 6412 6413 return isSameConstraintExpr(X->getRequiresClause(), Y->getRequiresClause()); 6414 } 6415 6416 bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X, 6417 const NamedDecl *Y) const { 6418 // If the type parameter isn't the same already, we don't need to check the 6419 // default argument further. 6420 if (!isSameTemplateParameter(X, Y)) 6421 return false; 6422 6423 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(X)) { 6424 auto *TTPY = cast<TemplateTypeParmDecl>(Y); 6425 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6426 return false; 6427 6428 return hasSameType(TTPX->getDefaultArgument(), TTPY->getDefaultArgument()); 6429 } 6430 6431 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6432 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Y); 6433 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument()) 6434 return false; 6435 6436 Expr *DefaultArgumentX = NTTPX->getDefaultArgument()->IgnoreImpCasts(); 6437 Expr *DefaultArgumentY = NTTPY->getDefaultArgument()->IgnoreImpCasts(); 6438 llvm::FoldingSetNodeID XID, YID; 6439 DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true); 6440 DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true); 6441 return XID == YID; 6442 } 6443 6444 auto *TTPX = cast<TemplateTemplateParmDecl>(X); 6445 auto *TTPY = cast<TemplateTemplateParmDecl>(Y); 6446 6447 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6448 return false; 6449 6450 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument(); 6451 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument(); 6452 return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate()); 6453 } 6454 6455 static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) { 6456 if (auto *NS = X->getAsNamespace()) 6457 return NS; 6458 if (auto *NAS = X->getAsNamespaceAlias()) 6459 return NAS->getNamespace(); 6460 return nullptr; 6461 } 6462 6463 static bool isSameQualifier(const NestedNameSpecifier *X, 6464 const NestedNameSpecifier *Y) { 6465 if (auto *NSX = getNamespace(X)) { 6466 auto *NSY = getNamespace(Y); 6467 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl()) 6468 return false; 6469 } else if (X->getKind() != Y->getKind()) 6470 return false; 6471 6472 // FIXME: For namespaces and types, we're permitted to check that the entity 6473 // is named via the same tokens. We should probably do so. 6474 switch (X->getKind()) { 6475 case NestedNameSpecifier::Identifier: 6476 if (X->getAsIdentifier() != Y->getAsIdentifier()) 6477 return false; 6478 break; 6479 case NestedNameSpecifier::Namespace: 6480 case NestedNameSpecifier::NamespaceAlias: 6481 // We've already checked that we named the same namespace. 6482 break; 6483 case NestedNameSpecifier::TypeSpec: 6484 case NestedNameSpecifier::TypeSpecWithTemplate: 6485 if (X->getAsType()->getCanonicalTypeInternal() != 6486 Y->getAsType()->getCanonicalTypeInternal()) 6487 return false; 6488 break; 6489 case NestedNameSpecifier::Global: 6490 case NestedNameSpecifier::Super: 6491 return true; 6492 } 6493 6494 // Recurse into earlier portion of NNS, if any. 6495 auto *PX = X->getPrefix(); 6496 auto *PY = Y->getPrefix(); 6497 if (PX && PY) 6498 return isSameQualifier(PX, PY); 6499 return !PX && !PY; 6500 } 6501 6502 /// Determine whether the attributes we can overload on are identical for A and 6503 /// B. Will ignore any overloadable attrs represented in the type of A and B. 6504 static bool hasSameOverloadableAttrs(const FunctionDecl *A, 6505 const FunctionDecl *B) { 6506 // Note that pass_object_size attributes are represented in the function's 6507 // ExtParameterInfo, so we don't need to check them here. 6508 6509 llvm::FoldingSetNodeID Cand1ID, Cand2ID; 6510 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>(); 6511 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>(); 6512 6513 for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) { 6514 std::optional<EnableIfAttr *> Cand1A = std::get<0>(Pair); 6515 std::optional<EnableIfAttr *> Cand2A = std::get<1>(Pair); 6516 6517 // Return false if the number of enable_if attributes is different. 6518 if (!Cand1A || !Cand2A) 6519 return false; 6520 6521 Cand1ID.clear(); 6522 Cand2ID.clear(); 6523 6524 (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true); 6525 (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true); 6526 6527 // Return false if any of the enable_if expressions of A and B are 6528 // different. 6529 if (Cand1ID != Cand2ID) 6530 return false; 6531 } 6532 return true; 6533 } 6534 6535 bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const { 6536 // Caution: this function is called by the AST reader during deserialization, 6537 // so it cannot rely on AST invariants being met. Non-trivial accessors 6538 // should be avoided, along with any traversal of redeclaration chains. 6539 6540 if (X == Y) 6541 return true; 6542 6543 if (X->getDeclName() != Y->getDeclName()) 6544 return false; 6545 6546 // Must be in the same context. 6547 // 6548 // Note that we can't use DeclContext::Equals here, because the DeclContexts 6549 // could be two different declarations of the same function. (We will fix the 6550 // semantic DC to refer to the primary definition after merging.) 6551 if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()), 6552 cast<Decl>(Y->getDeclContext()->getRedeclContext()))) 6553 return false; 6554 6555 // Two typedefs refer to the same entity if they have the same underlying 6556 // type. 6557 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X)) 6558 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y)) 6559 return hasSameType(TypedefX->getUnderlyingType(), 6560 TypedefY->getUnderlyingType()); 6561 6562 // Must have the same kind. 6563 if (X->getKind() != Y->getKind()) 6564 return false; 6565 6566 // Objective-C classes and protocols with the same name always match. 6567 if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X)) 6568 return true; 6569 6570 if (isa<ClassTemplateSpecializationDecl>(X)) { 6571 // No need to handle these here: we merge them when adding them to the 6572 // template. 6573 return false; 6574 } 6575 6576 // Compatible tags match. 6577 if (const auto *TagX = dyn_cast<TagDecl>(X)) { 6578 const auto *TagY = cast<TagDecl>(Y); 6579 return (TagX->getTagKind() == TagY->getTagKind()) || 6580 ((TagX->getTagKind() == TTK_Struct || 6581 TagX->getTagKind() == TTK_Class || 6582 TagX->getTagKind() == TTK_Interface) && 6583 (TagY->getTagKind() == TTK_Struct || 6584 TagY->getTagKind() == TTK_Class || 6585 TagY->getTagKind() == TTK_Interface)); 6586 } 6587 6588 // Functions with the same type and linkage match. 6589 // FIXME: This needs to cope with merging of prototyped/non-prototyped 6590 // functions, etc. 6591 if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) { 6592 const auto *FuncY = cast<FunctionDecl>(Y); 6593 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) { 6594 const auto *CtorY = cast<CXXConstructorDecl>(Y); 6595 if (CtorX->getInheritedConstructor() && 6596 !isSameEntity(CtorX->getInheritedConstructor().getConstructor(), 6597 CtorY->getInheritedConstructor().getConstructor())) 6598 return false; 6599 } 6600 6601 if (FuncX->isMultiVersion() != FuncY->isMultiVersion()) 6602 return false; 6603 6604 // Multiversioned functions with different feature strings are represented 6605 // as separate declarations. 6606 if (FuncX->isMultiVersion()) { 6607 const auto *TAX = FuncX->getAttr<TargetAttr>(); 6608 const auto *TAY = FuncY->getAttr<TargetAttr>(); 6609 assert(TAX && TAY && "Multiversion Function without target attribute"); 6610 6611 if (TAX->getFeaturesStr() != TAY->getFeaturesStr()) 6612 return false; 6613 } 6614 6615 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes 6616 // not the same entity if they are constrained. 6617 if ((FuncX->isMemberLikeConstrainedFriend() || 6618 FuncY->isMemberLikeConstrainedFriend()) && 6619 !FuncX->getLexicalDeclContext()->Equals( 6620 FuncY->getLexicalDeclContext())) { 6621 return false; 6622 } 6623 6624 if (!isSameConstraintExpr(FuncX->getTrailingRequiresClause(), 6625 FuncY->getTrailingRequiresClause())) 6626 return false; 6627 6628 auto GetTypeAsWritten = [](const FunctionDecl *FD) { 6629 // Map to the first declaration that we've already merged into this one. 6630 // The TSI of redeclarations might not match (due to calling conventions 6631 // being inherited onto the type but not the TSI), but the TSI type of 6632 // the first declaration of the function should match across modules. 6633 FD = FD->getCanonicalDecl(); 6634 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType() 6635 : FD->getType(); 6636 }; 6637 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY); 6638 if (!hasSameType(XT, YT)) { 6639 // We can get functions with different types on the redecl chain in C++17 6640 // if they have differing exception specifications and at least one of 6641 // the excpetion specs is unresolved. 6642 auto *XFPT = XT->getAs<FunctionProtoType>(); 6643 auto *YFPT = YT->getAs<FunctionProtoType>(); 6644 if (getLangOpts().CPlusPlus17 && XFPT && YFPT && 6645 (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) || 6646 isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) && 6647 hasSameFunctionTypeIgnoringExceptionSpec(XT, YT)) 6648 return true; 6649 return false; 6650 } 6651 6652 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() && 6653 hasSameOverloadableAttrs(FuncX, FuncY); 6654 } 6655 6656 // Variables with the same type and linkage match. 6657 if (const auto *VarX = dyn_cast<VarDecl>(X)) { 6658 const auto *VarY = cast<VarDecl>(Y); 6659 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) { 6660 // During deserialization, we might compare variables before we load 6661 // their types. Assume the types will end up being the same. 6662 if (VarX->getType().isNull() || VarY->getType().isNull()) 6663 return true; 6664 6665 if (hasSameType(VarX->getType(), VarY->getType())) 6666 return true; 6667 6668 // We can get decls with different types on the redecl chain. Eg. 6669 // template <typename T> struct S { static T Var[]; }; // #1 6670 // template <typename T> T S<T>::Var[sizeof(T)]; // #2 6671 // Only? happens when completing an incomplete array type. In this case 6672 // when comparing #1 and #2 we should go through their element type. 6673 const ArrayType *VarXTy = getAsArrayType(VarX->getType()); 6674 const ArrayType *VarYTy = getAsArrayType(VarY->getType()); 6675 if (!VarXTy || !VarYTy) 6676 return false; 6677 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType()) 6678 return hasSameType(VarXTy->getElementType(), VarYTy->getElementType()); 6679 } 6680 return false; 6681 } 6682 6683 // Namespaces with the same name and inlinedness match. 6684 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) { 6685 const auto *NamespaceY = cast<NamespaceDecl>(Y); 6686 return NamespaceX->isInline() == NamespaceY->isInline(); 6687 } 6688 6689 // Identical template names and kinds match if their template parameter lists 6690 // and patterns match. 6691 if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) { 6692 const auto *TemplateY = cast<TemplateDecl>(Y); 6693 6694 // ConceptDecl wouldn't be the same if their constraint expression differs. 6695 if (const auto *ConceptX = dyn_cast<ConceptDecl>(X)) { 6696 const auto *ConceptY = cast<ConceptDecl>(Y); 6697 if (!isSameConstraintExpr(ConceptX->getConstraintExpr(), 6698 ConceptY->getConstraintExpr())) 6699 return false; 6700 } 6701 6702 return isSameEntity(TemplateX->getTemplatedDecl(), 6703 TemplateY->getTemplatedDecl()) && 6704 isSameTemplateParameterList(TemplateX->getTemplateParameters(), 6705 TemplateY->getTemplateParameters()); 6706 } 6707 6708 // Fields with the same name and the same type match. 6709 if (const auto *FDX = dyn_cast<FieldDecl>(X)) { 6710 const auto *FDY = cast<FieldDecl>(Y); 6711 // FIXME: Also check the bitwidth is odr-equivalent, if any. 6712 return hasSameType(FDX->getType(), FDY->getType()); 6713 } 6714 6715 // Indirect fields with the same target field match. 6716 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) { 6717 const auto *IFDY = cast<IndirectFieldDecl>(Y); 6718 return IFDX->getAnonField()->getCanonicalDecl() == 6719 IFDY->getAnonField()->getCanonicalDecl(); 6720 } 6721 6722 // Enumerators with the same name match. 6723 if (isa<EnumConstantDecl>(X)) 6724 // FIXME: Also check the value is odr-equivalent. 6725 return true; 6726 6727 // Using shadow declarations with the same target match. 6728 if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) { 6729 const auto *USY = cast<UsingShadowDecl>(Y); 6730 return USX->getTargetDecl() == USY->getTargetDecl(); 6731 } 6732 6733 // Using declarations with the same qualifier match. (We already know that 6734 // the name matches.) 6735 if (const auto *UX = dyn_cast<UsingDecl>(X)) { 6736 const auto *UY = cast<UsingDecl>(Y); 6737 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6738 UX->hasTypename() == UY->hasTypename() && 6739 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6740 } 6741 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) { 6742 const auto *UY = cast<UnresolvedUsingValueDecl>(Y); 6743 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6744 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6745 } 6746 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) { 6747 return isSameQualifier( 6748 UX->getQualifier(), 6749 cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier()); 6750 } 6751 6752 // Using-pack declarations are only created by instantiation, and match if 6753 // they're instantiated from matching UnresolvedUsing...Decls. 6754 if (const auto *UX = dyn_cast<UsingPackDecl>(X)) { 6755 return declaresSameEntity( 6756 UX->getInstantiatedFromUsingDecl(), 6757 cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl()); 6758 } 6759 6760 // Namespace alias definitions with the same target match. 6761 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) { 6762 const auto *NAY = cast<NamespaceAliasDecl>(Y); 6763 return NAX->getNamespace()->Equals(NAY->getNamespace()); 6764 } 6765 6766 return false; 6767 } 6768 6769 TemplateArgument 6770 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 6771 switch (Arg.getKind()) { 6772 case TemplateArgument::Null: 6773 return Arg; 6774 6775 case TemplateArgument::Expression: 6776 return Arg; 6777 6778 case TemplateArgument::Declaration: { 6779 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 6780 return TemplateArgument(D, getCanonicalType(Arg.getParamTypeForDecl()), 6781 Arg.getIsDefaulted()); 6782 } 6783 6784 case TemplateArgument::NullPtr: 6785 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 6786 /*isNullPtr*/ true, Arg.getIsDefaulted()); 6787 6788 case TemplateArgument::Template: 6789 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()), 6790 Arg.getIsDefaulted()); 6791 6792 case TemplateArgument::TemplateExpansion: 6793 return TemplateArgument( 6794 getCanonicalTemplateName(Arg.getAsTemplateOrTemplatePattern()), 6795 Arg.getNumTemplateExpansions(), Arg.getIsDefaulted()); 6796 6797 case TemplateArgument::Integral: 6798 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 6799 6800 case TemplateArgument::Type: 6801 return TemplateArgument(getCanonicalType(Arg.getAsType()), 6802 /*isNullPtr*/ false, Arg.getIsDefaulted()); 6803 6804 case TemplateArgument::Pack: { 6805 bool AnyNonCanonArgs = false; 6806 auto CanonArgs = ::getCanonicalTemplateArguments( 6807 *this, Arg.pack_elements(), AnyNonCanonArgs); 6808 if (!AnyNonCanonArgs) 6809 return Arg; 6810 return TemplateArgument::CreatePackCopy(const_cast<ASTContext &>(*this), 6811 CanonArgs); 6812 } 6813 } 6814 6815 // Silence GCC warning 6816 llvm_unreachable("Unhandled template argument kind"); 6817 } 6818 6819 NestedNameSpecifier * 6820 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 6821 if (!NNS) 6822 return nullptr; 6823 6824 switch (NNS->getKind()) { 6825 case NestedNameSpecifier::Identifier: 6826 // Canonicalize the prefix but keep the identifier the same. 6827 return NestedNameSpecifier::Create(*this, 6828 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 6829 NNS->getAsIdentifier()); 6830 6831 case NestedNameSpecifier::Namespace: 6832 // A namespace is canonical; build a nested-name-specifier with 6833 // this namespace and no prefix. 6834 return NestedNameSpecifier::Create(*this, nullptr, 6835 NNS->getAsNamespace()->getOriginalNamespace()); 6836 6837 case NestedNameSpecifier::NamespaceAlias: 6838 // A namespace is canonical; build a nested-name-specifier with 6839 // this namespace and no prefix. 6840 return NestedNameSpecifier::Create(*this, nullptr, 6841 NNS->getAsNamespaceAlias()->getNamespace() 6842 ->getOriginalNamespace()); 6843 6844 // The difference between TypeSpec and TypeSpecWithTemplate is that the 6845 // latter will have the 'template' keyword when printed. 6846 case NestedNameSpecifier::TypeSpec: 6847 case NestedNameSpecifier::TypeSpecWithTemplate: { 6848 const Type *T = getCanonicalType(NNS->getAsType()); 6849 6850 // If we have some kind of dependent-named type (e.g., "typename T::type"), 6851 // break it apart into its prefix and identifier, then reconsititute those 6852 // as the canonical nested-name-specifier. This is required to canonicalize 6853 // a dependent nested-name-specifier involving typedefs of dependent-name 6854 // types, e.g., 6855 // typedef typename T::type T1; 6856 // typedef typename T1::type T2; 6857 if (const auto *DNT = T->getAs<DependentNameType>()) 6858 return NestedNameSpecifier::Create( 6859 *this, DNT->getQualifier(), 6860 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 6861 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 6862 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, 6863 const_cast<Type *>(T)); 6864 6865 // TODO: Set 'Template' parameter to true for other template types. 6866 return NestedNameSpecifier::Create(*this, nullptr, false, 6867 const_cast<Type *>(T)); 6868 } 6869 6870 case NestedNameSpecifier::Global: 6871 case NestedNameSpecifier::Super: 6872 // The global specifier and __super specifer are canonical and unique. 6873 return NNS; 6874 } 6875 6876 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 6877 } 6878 6879 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 6880 // Handle the non-qualified case efficiently. 6881 if (!T.hasLocalQualifiers()) { 6882 // Handle the common positive case fast. 6883 if (const auto *AT = dyn_cast<ArrayType>(T)) 6884 return AT; 6885 } 6886 6887 // Handle the common negative case fast. 6888 if (!isa<ArrayType>(T.getCanonicalType())) 6889 return nullptr; 6890 6891 // Apply any qualifiers from the array type to the element type. This 6892 // implements C99 6.7.3p8: "If the specification of an array type includes 6893 // any type qualifiers, the element type is so qualified, not the array type." 6894 6895 // If we get here, we either have type qualifiers on the type, or we have 6896 // sugar such as a typedef in the way. If we have type qualifiers on the type 6897 // we must propagate them down into the element type. 6898 6899 SplitQualType split = T.getSplitDesugaredType(); 6900 Qualifiers qs = split.Quals; 6901 6902 // If we have a simple case, just return now. 6903 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 6904 if (!ATy || qs.empty()) 6905 return ATy; 6906 6907 // Otherwise, we have an array and we have qualifiers on it. Push the 6908 // qualifiers into the array element type and return a new array type. 6909 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 6910 6911 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 6912 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 6913 CAT->getSizeExpr(), 6914 CAT->getSizeModifier(), 6915 CAT->getIndexTypeCVRQualifiers())); 6916 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 6917 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 6918 IAT->getSizeModifier(), 6919 IAT->getIndexTypeCVRQualifiers())); 6920 6921 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 6922 return cast<ArrayType>( 6923 getDependentSizedArrayType(NewEltTy, 6924 DSAT->getSizeExpr(), 6925 DSAT->getSizeModifier(), 6926 DSAT->getIndexTypeCVRQualifiers(), 6927 DSAT->getBracketsRange())); 6928 6929 const auto *VAT = cast<VariableArrayType>(ATy); 6930 return cast<ArrayType>(getVariableArrayType(NewEltTy, 6931 VAT->getSizeExpr(), 6932 VAT->getSizeModifier(), 6933 VAT->getIndexTypeCVRQualifiers(), 6934 VAT->getBracketsRange())); 6935 } 6936 6937 QualType ASTContext::getAdjustedParameterType(QualType T) const { 6938 if (T->isArrayType() || T->isFunctionType()) 6939 return getDecayedType(T); 6940 return T; 6941 } 6942 6943 QualType ASTContext::getSignatureParameterType(QualType T) const { 6944 T = getVariableArrayDecayedType(T); 6945 T = getAdjustedParameterType(T); 6946 return T.getUnqualifiedType(); 6947 } 6948 6949 QualType ASTContext::getExceptionObjectType(QualType T) const { 6950 // C++ [except.throw]p3: 6951 // A throw-expression initializes a temporary object, called the exception 6952 // object, the type of which is determined by removing any top-level 6953 // cv-qualifiers from the static type of the operand of throw and adjusting 6954 // the type from "array of T" or "function returning T" to "pointer to T" 6955 // or "pointer to function returning T", [...] 6956 T = getVariableArrayDecayedType(T); 6957 if (T->isArrayType() || T->isFunctionType()) 6958 T = getDecayedType(T); 6959 return T.getUnqualifiedType(); 6960 } 6961 6962 /// getArrayDecayedType - Return the properly qualified result of decaying the 6963 /// specified array type to a pointer. This operation is non-trivial when 6964 /// handling typedefs etc. The canonical type of "T" must be an array type, 6965 /// this returns a pointer to a properly qualified element of the array. 6966 /// 6967 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 6968 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 6969 // Get the element type with 'getAsArrayType' so that we don't lose any 6970 // typedefs in the element type of the array. This also handles propagation 6971 // of type qualifiers from the array type into the element type if present 6972 // (C99 6.7.3p8). 6973 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 6974 assert(PrettyArrayType && "Not an array type!"); 6975 6976 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 6977 6978 // int x[restrict 4] -> int *restrict 6979 QualType Result = getQualifiedType(PtrTy, 6980 PrettyArrayType->getIndexTypeQualifiers()); 6981 6982 // int x[_Nullable] -> int * _Nullable 6983 if (auto Nullability = Ty->getNullability()) { 6984 Result = const_cast<ASTContext *>(this)->getAttributedType( 6985 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 6986 } 6987 return Result; 6988 } 6989 6990 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 6991 return getBaseElementType(array->getElementType()); 6992 } 6993 6994 QualType ASTContext::getBaseElementType(QualType type) const { 6995 Qualifiers qs; 6996 while (true) { 6997 SplitQualType split = type.getSplitDesugaredType(); 6998 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 6999 if (!array) break; 7000 7001 type = array->getElementType(); 7002 qs.addConsistentQualifiers(split.Quals); 7003 } 7004 7005 return getQualifiedType(type, qs); 7006 } 7007 7008 /// getConstantArrayElementCount - Returns number of constant array elements. 7009 uint64_t 7010 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 7011 uint64_t ElementCount = 1; 7012 do { 7013 ElementCount *= CA->getSize().getZExtValue(); 7014 CA = dyn_cast_or_null<ConstantArrayType>( 7015 CA->getElementType()->getAsArrayTypeUnsafe()); 7016 } while (CA); 7017 return ElementCount; 7018 } 7019 7020 uint64_t ASTContext::getArrayInitLoopExprElementCount( 7021 const ArrayInitLoopExpr *AILE) const { 7022 if (!AILE) 7023 return 0; 7024 7025 uint64_t ElementCount = 1; 7026 7027 do { 7028 ElementCount *= AILE->getArraySize().getZExtValue(); 7029 AILE = dyn_cast<ArrayInitLoopExpr>(AILE->getSubExpr()); 7030 } while (AILE); 7031 7032 return ElementCount; 7033 } 7034 7035 /// getFloatingRank - Return a relative rank for floating point types. 7036 /// This routine will assert if passed a built-in type that isn't a float. 7037 static FloatingRank getFloatingRank(QualType T) { 7038 if (const auto *CT = T->getAs<ComplexType>()) 7039 return getFloatingRank(CT->getElementType()); 7040 7041 switch (T->castAs<BuiltinType>()->getKind()) { 7042 default: llvm_unreachable("getFloatingRank(): not a floating type"); 7043 case BuiltinType::Float16: return Float16Rank; 7044 case BuiltinType::Half: return HalfRank; 7045 case BuiltinType::Float: return FloatRank; 7046 case BuiltinType::Double: return DoubleRank; 7047 case BuiltinType::LongDouble: return LongDoubleRank; 7048 case BuiltinType::Float128: return Float128Rank; 7049 case BuiltinType::BFloat16: return BFloat16Rank; 7050 case BuiltinType::Ibm128: return Ibm128Rank; 7051 } 7052 } 7053 7054 /// getFloatingTypeOrder - Compare the rank of the two specified floating 7055 /// point types, ignoring the domain of the type (i.e. 'double' == 7056 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 7057 /// LHS < RHS, return -1. 7058 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 7059 FloatingRank LHSR = getFloatingRank(LHS); 7060 FloatingRank RHSR = getFloatingRank(RHS); 7061 7062 if (LHSR == RHSR) 7063 return 0; 7064 if (LHSR > RHSR) 7065 return 1; 7066 return -1; 7067 } 7068 7069 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 7070 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 7071 return 0; 7072 return getFloatingTypeOrder(LHS, RHS); 7073 } 7074 7075 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 7076 /// routine will assert if passed a built-in type that isn't an integer or enum, 7077 /// or if it is not canonicalized. 7078 unsigned ASTContext::getIntegerRank(const Type *T) const { 7079 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 7080 7081 // Results in this 'losing' to any type of the same size, but winning if 7082 // larger. 7083 if (const auto *EIT = dyn_cast<BitIntType>(T)) 7084 return 0 + (EIT->getNumBits() << 3); 7085 7086 switch (cast<BuiltinType>(T)->getKind()) { 7087 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 7088 case BuiltinType::Bool: 7089 return 1 + (getIntWidth(BoolTy) << 3); 7090 case BuiltinType::Char_S: 7091 case BuiltinType::Char_U: 7092 case BuiltinType::SChar: 7093 case BuiltinType::UChar: 7094 return 2 + (getIntWidth(CharTy) << 3); 7095 case BuiltinType::Short: 7096 case BuiltinType::UShort: 7097 return 3 + (getIntWidth(ShortTy) << 3); 7098 case BuiltinType::Int: 7099 case BuiltinType::UInt: 7100 return 4 + (getIntWidth(IntTy) << 3); 7101 case BuiltinType::Long: 7102 case BuiltinType::ULong: 7103 return 5 + (getIntWidth(LongTy) << 3); 7104 case BuiltinType::LongLong: 7105 case BuiltinType::ULongLong: 7106 return 6 + (getIntWidth(LongLongTy) << 3); 7107 case BuiltinType::Int128: 7108 case BuiltinType::UInt128: 7109 return 7 + (getIntWidth(Int128Ty) << 3); 7110 7111 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of 7112 // their underlying types" [c++20 conv.rank] 7113 case BuiltinType::Char8: 7114 return getIntegerRank(UnsignedCharTy.getTypePtr()); 7115 case BuiltinType::Char16: 7116 return getIntegerRank( 7117 getFromTargetType(Target->getChar16Type()).getTypePtr()); 7118 case BuiltinType::Char32: 7119 return getIntegerRank( 7120 getFromTargetType(Target->getChar32Type()).getTypePtr()); 7121 case BuiltinType::WChar_S: 7122 case BuiltinType::WChar_U: 7123 return getIntegerRank( 7124 getFromTargetType(Target->getWCharType()).getTypePtr()); 7125 } 7126 } 7127 7128 /// Whether this is a promotable bitfield reference according 7129 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 7130 /// 7131 /// \returns the type this bit-field will promote to, or NULL if no 7132 /// promotion occurs. 7133 QualType ASTContext::isPromotableBitField(Expr *E) const { 7134 if (E->isTypeDependent() || E->isValueDependent()) 7135 return {}; 7136 7137 // C++ [conv.prom]p5: 7138 // If the bit-field has an enumerated type, it is treated as any other 7139 // value of that type for promotion purposes. 7140 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 7141 return {}; 7142 7143 // FIXME: We should not do this unless E->refersToBitField() is true. This 7144 // matters in C where getSourceBitField() will find bit-fields for various 7145 // cases where the source expression is not a bit-field designator. 7146 7147 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 7148 if (!Field) 7149 return {}; 7150 7151 QualType FT = Field->getType(); 7152 7153 uint64_t BitWidth = Field->getBitWidthValue(*this); 7154 uint64_t IntSize = getTypeSize(IntTy); 7155 // C++ [conv.prom]p5: 7156 // A prvalue for an integral bit-field can be converted to a prvalue of type 7157 // int if int can represent all the values of the bit-field; otherwise, it 7158 // can be converted to unsigned int if unsigned int can represent all the 7159 // values of the bit-field. If the bit-field is larger yet, no integral 7160 // promotion applies to it. 7161 // C11 6.3.1.1/2: 7162 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 7163 // If an int can represent all values of the original type (as restricted by 7164 // the width, for a bit-field), the value is converted to an int; otherwise, 7165 // it is converted to an unsigned int. 7166 // 7167 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 7168 // We perform that promotion here to match GCC and C++. 7169 // FIXME: C does not permit promotion of an enum bit-field whose rank is 7170 // greater than that of 'int'. We perform that promotion to match GCC. 7171 if (BitWidth < IntSize) 7172 return IntTy; 7173 7174 if (BitWidth == IntSize) 7175 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 7176 7177 // Bit-fields wider than int are not subject to promotions, and therefore act 7178 // like the base type. GCC has some weird bugs in this area that we 7179 // deliberately do not follow (GCC follows a pre-standard resolution to 7180 // C's DR315 which treats bit-width as being part of the type, and this leaks 7181 // into their semantics in some cases). 7182 return {}; 7183 } 7184 7185 /// getPromotedIntegerType - Returns the type that Promotable will 7186 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 7187 /// integer type. 7188 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 7189 assert(!Promotable.isNull()); 7190 assert(isPromotableIntegerType(Promotable)); 7191 if (const auto *ET = Promotable->getAs<EnumType>()) 7192 return ET->getDecl()->getPromotionType(); 7193 7194 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 7195 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 7196 // (3.9.1) can be converted to a prvalue of the first of the following 7197 // types that can represent all the values of its underlying type: 7198 // int, unsigned int, long int, unsigned long int, long long int, or 7199 // unsigned long long int [...] 7200 // FIXME: Is there some better way to compute this? 7201 if (BT->getKind() == BuiltinType::WChar_S || 7202 BT->getKind() == BuiltinType::WChar_U || 7203 BT->getKind() == BuiltinType::Char8 || 7204 BT->getKind() == BuiltinType::Char16 || 7205 BT->getKind() == BuiltinType::Char32) { 7206 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 7207 uint64_t FromSize = getTypeSize(BT); 7208 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 7209 LongLongTy, UnsignedLongLongTy }; 7210 for (const auto &PT : PromoteTypes) { 7211 uint64_t ToSize = getTypeSize(PT); 7212 if (FromSize < ToSize || 7213 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType())) 7214 return PT; 7215 } 7216 llvm_unreachable("char type should fit into long long"); 7217 } 7218 } 7219 7220 // At this point, we should have a signed or unsigned integer type. 7221 if (Promotable->isSignedIntegerType()) 7222 return IntTy; 7223 uint64_t PromotableSize = getIntWidth(Promotable); 7224 uint64_t IntSize = getIntWidth(IntTy); 7225 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 7226 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 7227 } 7228 7229 /// Recurses in pointer/array types until it finds an objc retainable 7230 /// type and returns its ownership. 7231 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 7232 while (!T.isNull()) { 7233 if (T.getObjCLifetime() != Qualifiers::OCL_None) 7234 return T.getObjCLifetime(); 7235 if (T->isArrayType()) 7236 T = getBaseElementType(T); 7237 else if (const auto *PT = T->getAs<PointerType>()) 7238 T = PT->getPointeeType(); 7239 else if (const auto *RT = T->getAs<ReferenceType>()) 7240 T = RT->getPointeeType(); 7241 else 7242 break; 7243 } 7244 7245 return Qualifiers::OCL_None; 7246 } 7247 7248 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 7249 // Incomplete enum types are not treated as integer types. 7250 // FIXME: In C++, enum types are never integer types. 7251 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 7252 return ET->getDecl()->getIntegerType().getTypePtr(); 7253 return nullptr; 7254 } 7255 7256 /// getIntegerTypeOrder - Returns the highest ranked integer type: 7257 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 7258 /// LHS < RHS, return -1. 7259 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 7260 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 7261 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 7262 7263 // Unwrap enums to their underlying type. 7264 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 7265 LHSC = getIntegerTypeForEnum(ET); 7266 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 7267 RHSC = getIntegerTypeForEnum(ET); 7268 7269 if (LHSC == RHSC) return 0; 7270 7271 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 7272 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 7273 7274 unsigned LHSRank = getIntegerRank(LHSC); 7275 unsigned RHSRank = getIntegerRank(RHSC); 7276 7277 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 7278 if (LHSRank == RHSRank) return 0; 7279 return LHSRank > RHSRank ? 1 : -1; 7280 } 7281 7282 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 7283 if (LHSUnsigned) { 7284 // If the unsigned [LHS] type is larger, return it. 7285 if (LHSRank >= RHSRank) 7286 return 1; 7287 7288 // If the signed type can represent all values of the unsigned type, it 7289 // wins. Because we are dealing with 2's complement and types that are 7290 // powers of two larger than each other, this is always safe. 7291 return -1; 7292 } 7293 7294 // If the unsigned [RHS] type is larger, return it. 7295 if (RHSRank >= LHSRank) 7296 return -1; 7297 7298 // If the signed type can represent all values of the unsigned type, it 7299 // wins. Because we are dealing with 2's complement and types that are 7300 // powers of two larger than each other, this is always safe. 7301 return 1; 7302 } 7303 7304 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 7305 if (CFConstantStringTypeDecl) 7306 return CFConstantStringTypeDecl; 7307 7308 assert(!CFConstantStringTagDecl && 7309 "tag and typedef should be initialized together"); 7310 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 7311 CFConstantStringTagDecl->startDefinition(); 7312 7313 struct { 7314 QualType Type; 7315 const char *Name; 7316 } Fields[5]; 7317 unsigned Count = 0; 7318 7319 /// Objective-C ABI 7320 /// 7321 /// typedef struct __NSConstantString_tag { 7322 /// const int *isa; 7323 /// int flags; 7324 /// const char *str; 7325 /// long length; 7326 /// } __NSConstantString; 7327 /// 7328 /// Swift ABI (4.1, 4.2) 7329 /// 7330 /// typedef struct __NSConstantString_tag { 7331 /// uintptr_t _cfisa; 7332 /// uintptr_t _swift_rc; 7333 /// _Atomic(uint64_t) _cfinfoa; 7334 /// const char *_ptr; 7335 /// uint32_t _length; 7336 /// } __NSConstantString; 7337 /// 7338 /// Swift ABI (5.0) 7339 /// 7340 /// typedef struct __NSConstantString_tag { 7341 /// uintptr_t _cfisa; 7342 /// uintptr_t _swift_rc; 7343 /// _Atomic(uint64_t) _cfinfoa; 7344 /// const char *_ptr; 7345 /// uintptr_t _length; 7346 /// } __NSConstantString; 7347 7348 const auto CFRuntime = getLangOpts().CFRuntime; 7349 if (static_cast<unsigned>(CFRuntime) < 7350 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 7351 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 7352 Fields[Count++] = { IntTy, "flags" }; 7353 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 7354 Fields[Count++] = { LongTy, "length" }; 7355 } else { 7356 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 7357 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 7358 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 7359 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 7360 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 7361 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 7362 Fields[Count++] = { IntTy, "_ptr" }; 7363 else 7364 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 7365 } 7366 7367 // Create fields 7368 for (unsigned i = 0; i < Count; ++i) { 7369 FieldDecl *Field = 7370 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 7371 SourceLocation(), &Idents.get(Fields[i].Name), 7372 Fields[i].Type, /*TInfo=*/nullptr, 7373 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7374 Field->setAccess(AS_public); 7375 CFConstantStringTagDecl->addDecl(Field); 7376 } 7377 7378 CFConstantStringTagDecl->completeDefinition(); 7379 // This type is designed to be compatible with NSConstantString, but cannot 7380 // use the same name, since NSConstantString is an interface. 7381 auto tagType = getTagDeclType(CFConstantStringTagDecl); 7382 CFConstantStringTypeDecl = 7383 buildImplicitTypedef(tagType, "__NSConstantString"); 7384 7385 return CFConstantStringTypeDecl; 7386 } 7387 7388 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 7389 if (!CFConstantStringTagDecl) 7390 getCFConstantStringDecl(); // Build the tag and the typedef. 7391 return CFConstantStringTagDecl; 7392 } 7393 7394 // getCFConstantStringType - Return the type used for constant CFStrings. 7395 QualType ASTContext::getCFConstantStringType() const { 7396 return getTypedefType(getCFConstantStringDecl()); 7397 } 7398 7399 QualType ASTContext::getObjCSuperType() const { 7400 if (ObjCSuperType.isNull()) { 7401 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 7402 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 7403 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 7404 } 7405 return ObjCSuperType; 7406 } 7407 7408 void ASTContext::setCFConstantStringType(QualType T) { 7409 const auto *TD = T->castAs<TypedefType>(); 7410 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 7411 const auto *TagType = 7412 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 7413 CFConstantStringTagDecl = TagType->getDecl(); 7414 } 7415 7416 QualType ASTContext::getBlockDescriptorType() const { 7417 if (BlockDescriptorType) 7418 return getTagDeclType(BlockDescriptorType); 7419 7420 RecordDecl *RD; 7421 // FIXME: Needs the FlagAppleBlock bit. 7422 RD = buildImplicitRecord("__block_descriptor"); 7423 RD->startDefinition(); 7424 7425 QualType FieldTypes[] = { 7426 UnsignedLongTy, 7427 UnsignedLongTy, 7428 }; 7429 7430 static const char *const FieldNames[] = { 7431 "reserved", 7432 "Size" 7433 }; 7434 7435 for (size_t i = 0; i < 2; ++i) { 7436 FieldDecl *Field = FieldDecl::Create( 7437 *this, RD, SourceLocation(), SourceLocation(), 7438 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7439 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7440 Field->setAccess(AS_public); 7441 RD->addDecl(Field); 7442 } 7443 7444 RD->completeDefinition(); 7445 7446 BlockDescriptorType = RD; 7447 7448 return getTagDeclType(BlockDescriptorType); 7449 } 7450 7451 QualType ASTContext::getBlockDescriptorExtendedType() const { 7452 if (BlockDescriptorExtendedType) 7453 return getTagDeclType(BlockDescriptorExtendedType); 7454 7455 RecordDecl *RD; 7456 // FIXME: Needs the FlagAppleBlock bit. 7457 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 7458 RD->startDefinition(); 7459 7460 QualType FieldTypes[] = { 7461 UnsignedLongTy, 7462 UnsignedLongTy, 7463 getPointerType(VoidPtrTy), 7464 getPointerType(VoidPtrTy) 7465 }; 7466 7467 static const char *const FieldNames[] = { 7468 "reserved", 7469 "Size", 7470 "CopyFuncPtr", 7471 "DestroyFuncPtr" 7472 }; 7473 7474 for (size_t i = 0; i < 4; ++i) { 7475 FieldDecl *Field = FieldDecl::Create( 7476 *this, RD, SourceLocation(), SourceLocation(), 7477 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7478 /*BitWidth=*/nullptr, 7479 /*Mutable=*/false, ICIS_NoInit); 7480 Field->setAccess(AS_public); 7481 RD->addDecl(Field); 7482 } 7483 7484 RD->completeDefinition(); 7485 7486 BlockDescriptorExtendedType = RD; 7487 return getTagDeclType(BlockDescriptorExtendedType); 7488 } 7489 7490 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 7491 const auto *BT = dyn_cast<BuiltinType>(T); 7492 7493 if (!BT) { 7494 if (isa<PipeType>(T)) 7495 return OCLTK_Pipe; 7496 7497 return OCLTK_Default; 7498 } 7499 7500 switch (BT->getKind()) { 7501 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7502 case BuiltinType::Id: \ 7503 return OCLTK_Image; 7504 #include "clang/Basic/OpenCLImageTypes.def" 7505 7506 case BuiltinType::OCLClkEvent: 7507 return OCLTK_ClkEvent; 7508 7509 case BuiltinType::OCLEvent: 7510 return OCLTK_Event; 7511 7512 case BuiltinType::OCLQueue: 7513 return OCLTK_Queue; 7514 7515 case BuiltinType::OCLReserveID: 7516 return OCLTK_ReserveID; 7517 7518 case BuiltinType::OCLSampler: 7519 return OCLTK_Sampler; 7520 7521 default: 7522 return OCLTK_Default; 7523 } 7524 } 7525 7526 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 7527 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 7528 } 7529 7530 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 7531 /// requires copy/dispose. Note that this must match the logic 7532 /// in buildByrefHelpers. 7533 bool ASTContext::BlockRequiresCopying(QualType Ty, 7534 const VarDecl *D) { 7535 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 7536 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 7537 if (!copyExpr && record->hasTrivialDestructor()) return false; 7538 7539 return true; 7540 } 7541 7542 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 7543 // move or destroy. 7544 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 7545 return true; 7546 7547 if (!Ty->isObjCRetainableType()) return false; 7548 7549 Qualifiers qs = Ty.getQualifiers(); 7550 7551 // If we have lifetime, that dominates. 7552 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 7553 switch (lifetime) { 7554 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 7555 7556 // These are just bits as far as the runtime is concerned. 7557 case Qualifiers::OCL_ExplicitNone: 7558 case Qualifiers::OCL_Autoreleasing: 7559 return false; 7560 7561 // These cases should have been taken care of when checking the type's 7562 // non-triviality. 7563 case Qualifiers::OCL_Weak: 7564 case Qualifiers::OCL_Strong: 7565 llvm_unreachable("impossible"); 7566 } 7567 llvm_unreachable("fell out of lifetime switch!"); 7568 } 7569 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 7570 Ty->isObjCObjectPointerType()); 7571 } 7572 7573 bool ASTContext::getByrefLifetime(QualType Ty, 7574 Qualifiers::ObjCLifetime &LifeTime, 7575 bool &HasByrefExtendedLayout) const { 7576 if (!getLangOpts().ObjC || 7577 getLangOpts().getGC() != LangOptions::NonGC) 7578 return false; 7579 7580 HasByrefExtendedLayout = false; 7581 if (Ty->isRecordType()) { 7582 HasByrefExtendedLayout = true; 7583 LifeTime = Qualifiers::OCL_None; 7584 } else if ((LifeTime = Ty.getObjCLifetime())) { 7585 // Honor the ARC qualifiers. 7586 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 7587 // The MRR rule. 7588 LifeTime = Qualifiers::OCL_ExplicitNone; 7589 } else { 7590 LifeTime = Qualifiers::OCL_None; 7591 } 7592 return true; 7593 } 7594 7595 CanQualType ASTContext::getNSUIntegerType() const { 7596 assert(Target && "Expected target to be initialized"); 7597 const llvm::Triple &T = Target->getTriple(); 7598 // Windows is LLP64 rather than LP64 7599 if (T.isOSWindows() && T.isArch64Bit()) 7600 return UnsignedLongLongTy; 7601 return UnsignedLongTy; 7602 } 7603 7604 CanQualType ASTContext::getNSIntegerType() const { 7605 assert(Target && "Expected target to be initialized"); 7606 const llvm::Triple &T = Target->getTriple(); 7607 // Windows is LLP64 rather than LP64 7608 if (T.isOSWindows() && T.isArch64Bit()) 7609 return LongLongTy; 7610 return LongTy; 7611 } 7612 7613 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 7614 if (!ObjCInstanceTypeDecl) 7615 ObjCInstanceTypeDecl = 7616 buildImplicitTypedef(getObjCIdType(), "instancetype"); 7617 return ObjCInstanceTypeDecl; 7618 } 7619 7620 // This returns true if a type has been typedefed to BOOL: 7621 // typedef <type> BOOL; 7622 static bool isTypeTypedefedAsBOOL(QualType T) { 7623 if (const auto *TT = dyn_cast<TypedefType>(T)) 7624 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 7625 return II->isStr("BOOL"); 7626 7627 return false; 7628 } 7629 7630 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 7631 /// purpose. 7632 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 7633 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 7634 return CharUnits::Zero(); 7635 7636 CharUnits sz = getTypeSizeInChars(type); 7637 7638 // Make all integer and enum types at least as large as an int 7639 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 7640 sz = std::max(sz, getTypeSizeInChars(IntTy)); 7641 // Treat arrays as pointers, since that's how they're passed in. 7642 else if (type->isArrayType()) 7643 sz = getTypeSizeInChars(VoidPtrTy); 7644 return sz; 7645 } 7646 7647 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 7648 return getTargetInfo().getCXXABI().isMicrosoft() && 7649 VD->isStaticDataMember() && 7650 VD->getType()->isIntegralOrEnumerationType() && 7651 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 7652 } 7653 7654 ASTContext::InlineVariableDefinitionKind 7655 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 7656 if (!VD->isInline()) 7657 return InlineVariableDefinitionKind::None; 7658 7659 // In almost all cases, it's a weak definition. 7660 auto *First = VD->getFirstDecl(); 7661 if (First->isInlineSpecified() || !First->isStaticDataMember()) 7662 return InlineVariableDefinitionKind::Weak; 7663 7664 // If there's a file-context declaration in this translation unit, it's a 7665 // non-discardable definition. 7666 for (auto *D : VD->redecls()) 7667 if (D->getLexicalDeclContext()->isFileContext() && 7668 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 7669 return InlineVariableDefinitionKind::Strong; 7670 7671 // If we've not seen one yet, we don't know. 7672 return InlineVariableDefinitionKind::WeakUnknown; 7673 } 7674 7675 static std::string charUnitsToString(const CharUnits &CU) { 7676 return llvm::itostr(CU.getQuantity()); 7677 } 7678 7679 /// getObjCEncodingForBlock - Return the encoded type for this block 7680 /// declaration. 7681 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 7682 std::string S; 7683 7684 const BlockDecl *Decl = Expr->getBlockDecl(); 7685 QualType BlockTy = 7686 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 7687 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 7688 // Encode result type. 7689 if (getLangOpts().EncodeExtendedBlockSig) 7690 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 7691 true /*Extended*/); 7692 else 7693 getObjCEncodingForType(BlockReturnTy, S); 7694 // Compute size of all parameters. 7695 // Start with computing size of a pointer in number of bytes. 7696 // FIXME: There might(should) be a better way of doing this computation! 7697 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7698 CharUnits ParmOffset = PtrSize; 7699 for (auto *PI : Decl->parameters()) { 7700 QualType PType = PI->getType(); 7701 CharUnits sz = getObjCEncodingTypeSize(PType); 7702 if (sz.isZero()) 7703 continue; 7704 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 7705 ParmOffset += sz; 7706 } 7707 // Size of the argument frame 7708 S += charUnitsToString(ParmOffset); 7709 // Block pointer and offset. 7710 S += "@?0"; 7711 7712 // Argument types. 7713 ParmOffset = PtrSize; 7714 for (auto *PVDecl : Decl->parameters()) { 7715 QualType PType = PVDecl->getOriginalType(); 7716 if (const auto *AT = 7717 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7718 // Use array's original type only if it has known number of 7719 // elements. 7720 if (!isa<ConstantArrayType>(AT)) 7721 PType = PVDecl->getType(); 7722 } else if (PType->isFunctionType()) 7723 PType = PVDecl->getType(); 7724 if (getLangOpts().EncodeExtendedBlockSig) 7725 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 7726 S, true /*Extended*/); 7727 else 7728 getObjCEncodingForType(PType, S); 7729 S += charUnitsToString(ParmOffset); 7730 ParmOffset += getObjCEncodingTypeSize(PType); 7731 } 7732 7733 return S; 7734 } 7735 7736 std::string 7737 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 7738 std::string S; 7739 // Encode result type. 7740 getObjCEncodingForType(Decl->getReturnType(), S); 7741 CharUnits ParmOffset; 7742 // Compute size of all parameters. 7743 for (auto *PI : Decl->parameters()) { 7744 QualType PType = PI->getType(); 7745 CharUnits sz = getObjCEncodingTypeSize(PType); 7746 if (sz.isZero()) 7747 continue; 7748 7749 assert(sz.isPositive() && 7750 "getObjCEncodingForFunctionDecl - Incomplete param type"); 7751 ParmOffset += sz; 7752 } 7753 S += charUnitsToString(ParmOffset); 7754 ParmOffset = CharUnits::Zero(); 7755 7756 // Argument types. 7757 for (auto *PVDecl : Decl->parameters()) { 7758 QualType PType = PVDecl->getOriginalType(); 7759 if (const auto *AT = 7760 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7761 // Use array's original type only if it has known number of 7762 // elements. 7763 if (!isa<ConstantArrayType>(AT)) 7764 PType = PVDecl->getType(); 7765 } else if (PType->isFunctionType()) 7766 PType = PVDecl->getType(); 7767 getObjCEncodingForType(PType, S); 7768 S += charUnitsToString(ParmOffset); 7769 ParmOffset += getObjCEncodingTypeSize(PType); 7770 } 7771 7772 return S; 7773 } 7774 7775 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 7776 /// method parameter or return type. If Extended, include class names and 7777 /// block object types. 7778 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 7779 QualType T, std::string& S, 7780 bool Extended) const { 7781 // Encode type qualifier, 'in', 'inout', etc. for the parameter. 7782 getObjCEncodingForTypeQualifier(QT, S); 7783 // Encode parameter type. 7784 ObjCEncOptions Options = ObjCEncOptions() 7785 .setExpandPointedToStructures() 7786 .setExpandStructures() 7787 .setIsOutermostType(); 7788 if (Extended) 7789 Options.setEncodeBlockParameters().setEncodeClassNames(); 7790 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 7791 } 7792 7793 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 7794 /// declaration. 7795 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 7796 bool Extended) const { 7797 // FIXME: This is not very efficient. 7798 // Encode return type. 7799 std::string S; 7800 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 7801 Decl->getReturnType(), S, Extended); 7802 // Compute size of all parameters. 7803 // Start with computing size of a pointer in number of bytes. 7804 // FIXME: There might(should) be a better way of doing this computation! 7805 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7806 // The first two arguments (self and _cmd) are pointers; account for 7807 // their size. 7808 CharUnits ParmOffset = 2 * PtrSize; 7809 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7810 E = Decl->sel_param_end(); PI != E; ++PI) { 7811 QualType PType = (*PI)->getType(); 7812 CharUnits sz = getObjCEncodingTypeSize(PType); 7813 if (sz.isZero()) 7814 continue; 7815 7816 assert(sz.isPositive() && 7817 "getObjCEncodingForMethodDecl - Incomplete param type"); 7818 ParmOffset += sz; 7819 } 7820 S += charUnitsToString(ParmOffset); 7821 S += "@0:"; 7822 S += charUnitsToString(PtrSize); 7823 7824 // Argument types. 7825 ParmOffset = 2 * PtrSize; 7826 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7827 E = Decl->sel_param_end(); PI != E; ++PI) { 7828 const ParmVarDecl *PVDecl = *PI; 7829 QualType PType = PVDecl->getOriginalType(); 7830 if (const auto *AT = 7831 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7832 // Use array's original type only if it has known number of 7833 // elements. 7834 if (!isa<ConstantArrayType>(AT)) 7835 PType = PVDecl->getType(); 7836 } else if (PType->isFunctionType()) 7837 PType = PVDecl->getType(); 7838 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 7839 PType, S, Extended); 7840 S += charUnitsToString(ParmOffset); 7841 ParmOffset += getObjCEncodingTypeSize(PType); 7842 } 7843 7844 return S; 7845 } 7846 7847 ObjCPropertyImplDecl * 7848 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 7849 const ObjCPropertyDecl *PD, 7850 const Decl *Container) const { 7851 if (!Container) 7852 return nullptr; 7853 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 7854 for (auto *PID : CID->property_impls()) 7855 if (PID->getPropertyDecl() == PD) 7856 return PID; 7857 } else { 7858 const auto *OID = cast<ObjCImplementationDecl>(Container); 7859 for (auto *PID : OID->property_impls()) 7860 if (PID->getPropertyDecl() == PD) 7861 return PID; 7862 } 7863 return nullptr; 7864 } 7865 7866 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 7867 /// property declaration. If non-NULL, Container must be either an 7868 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 7869 /// NULL when getting encodings for protocol properties. 7870 /// Property attributes are stored as a comma-delimited C string. The simple 7871 /// attributes readonly and bycopy are encoded as single characters. The 7872 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 7873 /// encoded as single characters, followed by an identifier. Property types 7874 /// are also encoded as a parametrized attribute. The characters used to encode 7875 /// these attributes are defined by the following enumeration: 7876 /// @code 7877 /// enum PropertyAttributes { 7878 /// kPropertyReadOnly = 'R', // property is read-only. 7879 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 7880 /// kPropertyByref = '&', // property is a reference to the value last assigned 7881 /// kPropertyDynamic = 'D', // property is dynamic 7882 /// kPropertyGetter = 'G', // followed by getter selector name 7883 /// kPropertySetter = 'S', // followed by setter selector name 7884 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 7885 /// kPropertyType = 'T' // followed by old-style type encoding. 7886 /// kPropertyWeak = 'W' // 'weak' property 7887 /// kPropertyStrong = 'P' // property GC'able 7888 /// kPropertyNonAtomic = 'N' // property non-atomic 7889 /// }; 7890 /// @endcode 7891 std::string 7892 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 7893 const Decl *Container) const { 7894 // Collect information from the property implementation decl(s). 7895 bool Dynamic = false; 7896 ObjCPropertyImplDecl *SynthesizePID = nullptr; 7897 7898 if (ObjCPropertyImplDecl *PropertyImpDecl = 7899 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 7900 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 7901 Dynamic = true; 7902 else 7903 SynthesizePID = PropertyImpDecl; 7904 } 7905 7906 // FIXME: This is not very efficient. 7907 std::string S = "T"; 7908 7909 // Encode result type. 7910 // GCC has some special rules regarding encoding of properties which 7911 // closely resembles encoding of ivars. 7912 getObjCEncodingForPropertyType(PD->getType(), S); 7913 7914 if (PD->isReadOnly()) { 7915 S += ",R"; 7916 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 7917 S += ",C"; 7918 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 7919 S += ",&"; 7920 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 7921 S += ",W"; 7922 } else { 7923 switch (PD->getSetterKind()) { 7924 case ObjCPropertyDecl::Assign: break; 7925 case ObjCPropertyDecl::Copy: S += ",C"; break; 7926 case ObjCPropertyDecl::Retain: S += ",&"; break; 7927 case ObjCPropertyDecl::Weak: S += ",W"; break; 7928 } 7929 } 7930 7931 // It really isn't clear at all what this means, since properties 7932 // are "dynamic by default". 7933 if (Dynamic) 7934 S += ",D"; 7935 7936 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 7937 S += ",N"; 7938 7939 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 7940 S += ",G"; 7941 S += PD->getGetterName().getAsString(); 7942 } 7943 7944 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 7945 S += ",S"; 7946 S += PD->getSetterName().getAsString(); 7947 } 7948 7949 if (SynthesizePID) { 7950 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 7951 S += ",V"; 7952 S += OID->getNameAsString(); 7953 } 7954 7955 // FIXME: OBJCGC: weak & strong 7956 return S; 7957 } 7958 7959 /// getLegacyIntegralTypeEncoding - 7960 /// Another legacy compatibility encoding: 32-bit longs are encoded as 7961 /// 'l' or 'L' , but not always. For typedefs, we need to use 7962 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 7963 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 7964 if (PointeeTy->getAs<TypedefType>()) { 7965 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 7966 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 7967 PointeeTy = UnsignedIntTy; 7968 else 7969 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 7970 PointeeTy = IntTy; 7971 } 7972 } 7973 } 7974 7975 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 7976 const FieldDecl *Field, 7977 QualType *NotEncodedT) const { 7978 // We follow the behavior of gcc, expanding structures which are 7979 // directly pointed to, and expanding embedded structures. Note that 7980 // these rules are sufficient to prevent recursive encoding of the 7981 // same type. 7982 getObjCEncodingForTypeImpl(T, S, 7983 ObjCEncOptions() 7984 .setExpandPointedToStructures() 7985 .setExpandStructures() 7986 .setIsOutermostType(), 7987 Field, NotEncodedT); 7988 } 7989 7990 void ASTContext::getObjCEncodingForPropertyType(QualType T, 7991 std::string& S) const { 7992 // Encode result type. 7993 // GCC has some special rules regarding encoding of properties which 7994 // closely resembles encoding of ivars. 7995 getObjCEncodingForTypeImpl(T, S, 7996 ObjCEncOptions() 7997 .setExpandPointedToStructures() 7998 .setExpandStructures() 7999 .setIsOutermostType() 8000 .setEncodingProperty(), 8001 /*Field=*/nullptr); 8002 } 8003 8004 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 8005 const BuiltinType *BT) { 8006 BuiltinType::Kind kind = BT->getKind(); 8007 switch (kind) { 8008 case BuiltinType::Void: return 'v'; 8009 case BuiltinType::Bool: return 'B'; 8010 case BuiltinType::Char8: 8011 case BuiltinType::Char_U: 8012 case BuiltinType::UChar: return 'C'; 8013 case BuiltinType::Char16: 8014 case BuiltinType::UShort: return 'S'; 8015 case BuiltinType::Char32: 8016 case BuiltinType::UInt: return 'I'; 8017 case BuiltinType::ULong: 8018 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 8019 case BuiltinType::UInt128: return 'T'; 8020 case BuiltinType::ULongLong: return 'Q'; 8021 case BuiltinType::Char_S: 8022 case BuiltinType::SChar: return 'c'; 8023 case BuiltinType::Short: return 's'; 8024 case BuiltinType::WChar_S: 8025 case BuiltinType::WChar_U: 8026 case BuiltinType::Int: return 'i'; 8027 case BuiltinType::Long: 8028 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 8029 case BuiltinType::LongLong: return 'q'; 8030 case BuiltinType::Int128: return 't'; 8031 case BuiltinType::Float: return 'f'; 8032 case BuiltinType::Double: return 'd'; 8033 case BuiltinType::LongDouble: return 'D'; 8034 case BuiltinType::NullPtr: return '*'; // like char* 8035 8036 case BuiltinType::BFloat16: 8037 case BuiltinType::Float16: 8038 case BuiltinType::Float128: 8039 case BuiltinType::Ibm128: 8040 case BuiltinType::Half: 8041 case BuiltinType::ShortAccum: 8042 case BuiltinType::Accum: 8043 case BuiltinType::LongAccum: 8044 case BuiltinType::UShortAccum: 8045 case BuiltinType::UAccum: 8046 case BuiltinType::ULongAccum: 8047 case BuiltinType::ShortFract: 8048 case BuiltinType::Fract: 8049 case BuiltinType::LongFract: 8050 case BuiltinType::UShortFract: 8051 case BuiltinType::UFract: 8052 case BuiltinType::ULongFract: 8053 case BuiltinType::SatShortAccum: 8054 case BuiltinType::SatAccum: 8055 case BuiltinType::SatLongAccum: 8056 case BuiltinType::SatUShortAccum: 8057 case BuiltinType::SatUAccum: 8058 case BuiltinType::SatULongAccum: 8059 case BuiltinType::SatShortFract: 8060 case BuiltinType::SatFract: 8061 case BuiltinType::SatLongFract: 8062 case BuiltinType::SatUShortFract: 8063 case BuiltinType::SatUFract: 8064 case BuiltinType::SatULongFract: 8065 // FIXME: potentially need @encodes for these! 8066 return ' '; 8067 8068 #define SVE_TYPE(Name, Id, SingletonId) \ 8069 case BuiltinType::Id: 8070 #include "clang/Basic/AArch64SVEACLETypes.def" 8071 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8072 #include "clang/Basic/RISCVVTypes.def" 8073 #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8074 #include "clang/Basic/WebAssemblyReferenceTypes.def" 8075 { 8076 DiagnosticsEngine &Diags = C->getDiagnostics(); 8077 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 8078 "cannot yet @encode type %0"); 8079 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 8080 return ' '; 8081 } 8082 8083 case BuiltinType::ObjCId: 8084 case BuiltinType::ObjCClass: 8085 case BuiltinType::ObjCSel: 8086 llvm_unreachable("@encoding ObjC primitive type"); 8087 8088 // OpenCL and placeholder types don't need @encodings. 8089 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 8090 case BuiltinType::Id: 8091 #include "clang/Basic/OpenCLImageTypes.def" 8092 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 8093 case BuiltinType::Id: 8094 #include "clang/Basic/OpenCLExtensionTypes.def" 8095 case BuiltinType::OCLEvent: 8096 case BuiltinType::OCLClkEvent: 8097 case BuiltinType::OCLQueue: 8098 case BuiltinType::OCLReserveID: 8099 case BuiltinType::OCLSampler: 8100 case BuiltinType::Dependent: 8101 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 8102 case BuiltinType::Id: 8103 #include "clang/Basic/PPCTypes.def" 8104 #define BUILTIN_TYPE(KIND, ID) 8105 #define PLACEHOLDER_TYPE(KIND, ID) \ 8106 case BuiltinType::KIND: 8107 #include "clang/AST/BuiltinTypes.def" 8108 llvm_unreachable("invalid builtin type for @encode"); 8109 } 8110 llvm_unreachable("invalid BuiltinType::Kind value"); 8111 } 8112 8113 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 8114 EnumDecl *Enum = ET->getDecl(); 8115 8116 // The encoding of an non-fixed enum type is always 'i', regardless of size. 8117 if (!Enum->isFixed()) 8118 return 'i'; 8119 8120 // The encoding of a fixed enum type matches its fixed underlying type. 8121 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 8122 return getObjCEncodingForPrimitiveType(C, BT); 8123 } 8124 8125 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 8126 QualType T, const FieldDecl *FD) { 8127 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 8128 S += 'b'; 8129 // The NeXT runtime encodes bit fields as b followed by the number of bits. 8130 // The GNU runtime requires more information; bitfields are encoded as b, 8131 // then the offset (in bits) of the first element, then the type of the 8132 // bitfield, then the size in bits. For example, in this structure: 8133 // 8134 // struct 8135 // { 8136 // int integer; 8137 // int flags:2; 8138 // }; 8139 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 8140 // runtime, but b32i2 for the GNU runtime. The reason for this extra 8141 // information is not especially sensible, but we're stuck with it for 8142 // compatibility with GCC, although providing it breaks anything that 8143 // actually uses runtime introspection and wants to work on both runtimes... 8144 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 8145 uint64_t Offset; 8146 8147 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 8148 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 8149 IVD); 8150 } else { 8151 const RecordDecl *RD = FD->getParent(); 8152 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 8153 Offset = RL.getFieldOffset(FD->getFieldIndex()); 8154 } 8155 8156 S += llvm::utostr(Offset); 8157 8158 if (const auto *ET = T->getAs<EnumType>()) 8159 S += ObjCEncodingForEnumType(Ctx, ET); 8160 else { 8161 const auto *BT = T->castAs<BuiltinType>(); 8162 S += getObjCEncodingForPrimitiveType(Ctx, BT); 8163 } 8164 } 8165 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 8166 } 8167 8168 // Helper function for determining whether the encoded type string would include 8169 // a template specialization type. 8170 static bool hasTemplateSpecializationInEncodedString(const Type *T, 8171 bool VisitBasesAndFields) { 8172 T = T->getBaseElementTypeUnsafe(); 8173 8174 if (auto *PT = T->getAs<PointerType>()) 8175 return hasTemplateSpecializationInEncodedString( 8176 PT->getPointeeType().getTypePtr(), false); 8177 8178 auto *CXXRD = T->getAsCXXRecordDecl(); 8179 8180 if (!CXXRD) 8181 return false; 8182 8183 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 8184 return true; 8185 8186 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 8187 return false; 8188 8189 for (const auto &B : CXXRD->bases()) 8190 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 8191 true)) 8192 return true; 8193 8194 for (auto *FD : CXXRD->fields()) 8195 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 8196 true)) 8197 return true; 8198 8199 return false; 8200 } 8201 8202 // FIXME: Use SmallString for accumulating string. 8203 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 8204 const ObjCEncOptions Options, 8205 const FieldDecl *FD, 8206 QualType *NotEncodedT) const { 8207 CanQualType CT = getCanonicalType(T); 8208 switch (CT->getTypeClass()) { 8209 case Type::Builtin: 8210 case Type::Enum: 8211 if (FD && FD->isBitField()) 8212 return EncodeBitField(this, S, T, FD); 8213 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 8214 S += getObjCEncodingForPrimitiveType(this, BT); 8215 else 8216 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 8217 return; 8218 8219 case Type::Complex: 8220 S += 'j'; 8221 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 8222 ObjCEncOptions(), 8223 /*Field=*/nullptr); 8224 return; 8225 8226 case Type::Atomic: 8227 S += 'A'; 8228 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 8229 ObjCEncOptions(), 8230 /*Field=*/nullptr); 8231 return; 8232 8233 // encoding for pointer or reference types. 8234 case Type::Pointer: 8235 case Type::LValueReference: 8236 case Type::RValueReference: { 8237 QualType PointeeTy; 8238 if (isa<PointerType>(CT)) { 8239 const auto *PT = T->castAs<PointerType>(); 8240 if (PT->isObjCSelType()) { 8241 S += ':'; 8242 return; 8243 } 8244 PointeeTy = PT->getPointeeType(); 8245 } else { 8246 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 8247 } 8248 8249 bool isReadOnly = false; 8250 // For historical/compatibility reasons, the read-only qualifier of the 8251 // pointee gets emitted _before_ the '^'. The read-only qualifier of 8252 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 8253 // Also, do not emit the 'r' for anything but the outermost type! 8254 if (T->getAs<TypedefType>()) { 8255 if (Options.IsOutermostType() && T.isConstQualified()) { 8256 isReadOnly = true; 8257 S += 'r'; 8258 } 8259 } else if (Options.IsOutermostType()) { 8260 QualType P = PointeeTy; 8261 while (auto PT = P->getAs<PointerType>()) 8262 P = PT->getPointeeType(); 8263 if (P.isConstQualified()) { 8264 isReadOnly = true; 8265 S += 'r'; 8266 } 8267 } 8268 if (isReadOnly) { 8269 // Another legacy compatibility encoding. Some ObjC qualifier and type 8270 // combinations need to be rearranged. 8271 // Rewrite "in const" from "nr" to "rn" 8272 if (StringRef(S).endswith("nr")) 8273 S.replace(S.end()-2, S.end(), "rn"); 8274 } 8275 8276 if (PointeeTy->isCharType()) { 8277 // char pointer types should be encoded as '*' unless it is a 8278 // type that has been typedef'd to 'BOOL'. 8279 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 8280 S += '*'; 8281 return; 8282 } 8283 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 8284 // GCC binary compat: Need to convert "struct objc_class *" to "#". 8285 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 8286 S += '#'; 8287 return; 8288 } 8289 // GCC binary compat: Need to convert "struct objc_object *" to "@". 8290 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 8291 S += '@'; 8292 return; 8293 } 8294 // If the encoded string for the class includes template names, just emit 8295 // "^v" for pointers to the class. 8296 if (getLangOpts().CPlusPlus && 8297 (!getLangOpts().EncodeCXXClassTemplateSpec && 8298 hasTemplateSpecializationInEncodedString( 8299 RTy, Options.ExpandPointedToStructures()))) { 8300 S += "^v"; 8301 return; 8302 } 8303 // fall through... 8304 } 8305 S += '^'; 8306 getLegacyIntegralTypeEncoding(PointeeTy); 8307 8308 ObjCEncOptions NewOptions; 8309 if (Options.ExpandPointedToStructures()) 8310 NewOptions.setExpandStructures(); 8311 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 8312 /*Field=*/nullptr, NotEncodedT); 8313 return; 8314 } 8315 8316 case Type::ConstantArray: 8317 case Type::IncompleteArray: 8318 case Type::VariableArray: { 8319 const auto *AT = cast<ArrayType>(CT); 8320 8321 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 8322 // Incomplete arrays are encoded as a pointer to the array element. 8323 S += '^'; 8324 8325 getObjCEncodingForTypeImpl( 8326 AT->getElementType(), S, 8327 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 8328 } else { 8329 S += '['; 8330 8331 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 8332 S += llvm::utostr(CAT->getSize().getZExtValue()); 8333 else { 8334 //Variable length arrays are encoded as a regular array with 0 elements. 8335 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 8336 "Unknown array type!"); 8337 S += '0'; 8338 } 8339 8340 getObjCEncodingForTypeImpl( 8341 AT->getElementType(), S, 8342 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 8343 NotEncodedT); 8344 S += ']'; 8345 } 8346 return; 8347 } 8348 8349 case Type::FunctionNoProto: 8350 case Type::FunctionProto: 8351 S += '?'; 8352 return; 8353 8354 case Type::Record: { 8355 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 8356 S += RDecl->isUnion() ? '(' : '{'; 8357 // Anonymous structures print as '?' 8358 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 8359 S += II->getName(); 8360 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 8361 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 8362 llvm::raw_string_ostream OS(S); 8363 printTemplateArgumentList(OS, TemplateArgs.asArray(), 8364 getPrintingPolicy()); 8365 } 8366 } else { 8367 S += '?'; 8368 } 8369 if (Options.ExpandStructures()) { 8370 S += '='; 8371 if (!RDecl->isUnion()) { 8372 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 8373 } else { 8374 for (const auto *Field : RDecl->fields()) { 8375 if (FD) { 8376 S += '"'; 8377 S += Field->getNameAsString(); 8378 S += '"'; 8379 } 8380 8381 // Special case bit-fields. 8382 if (Field->isBitField()) { 8383 getObjCEncodingForTypeImpl(Field->getType(), S, 8384 ObjCEncOptions().setExpandStructures(), 8385 Field); 8386 } else { 8387 QualType qt = Field->getType(); 8388 getLegacyIntegralTypeEncoding(qt); 8389 getObjCEncodingForTypeImpl( 8390 qt, S, 8391 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 8392 NotEncodedT); 8393 } 8394 } 8395 } 8396 } 8397 S += RDecl->isUnion() ? ')' : '}'; 8398 return; 8399 } 8400 8401 case Type::BlockPointer: { 8402 const auto *BT = T->castAs<BlockPointerType>(); 8403 S += "@?"; // Unlike a pointer-to-function, which is "^?". 8404 if (Options.EncodeBlockParameters()) { 8405 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 8406 8407 S += '<'; 8408 // Block return type 8409 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 8410 Options.forComponentType(), FD, NotEncodedT); 8411 // Block self 8412 S += "@?"; 8413 // Block parameters 8414 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 8415 for (const auto &I : FPT->param_types()) 8416 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 8417 NotEncodedT); 8418 } 8419 S += '>'; 8420 } 8421 return; 8422 } 8423 8424 case Type::ObjCObject: { 8425 // hack to match legacy encoding of *id and *Class 8426 QualType Ty = getObjCObjectPointerType(CT); 8427 if (Ty->isObjCIdType()) { 8428 S += "{objc_object=}"; 8429 return; 8430 } 8431 else if (Ty->isObjCClassType()) { 8432 S += "{objc_class=}"; 8433 return; 8434 } 8435 // TODO: Double check to make sure this intentionally falls through. 8436 [[fallthrough]]; 8437 } 8438 8439 case Type::ObjCInterface: { 8440 // Ignore protocol qualifiers when mangling at this level. 8441 // @encode(class_name) 8442 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 8443 S += '{'; 8444 S += OI->getObjCRuntimeNameAsString(); 8445 if (Options.ExpandStructures()) { 8446 S += '='; 8447 SmallVector<const ObjCIvarDecl*, 32> Ivars; 8448 DeepCollectObjCIvars(OI, true, Ivars); 8449 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 8450 const FieldDecl *Field = Ivars[i]; 8451 if (Field->isBitField()) 8452 getObjCEncodingForTypeImpl(Field->getType(), S, 8453 ObjCEncOptions().setExpandStructures(), 8454 Field); 8455 else 8456 getObjCEncodingForTypeImpl(Field->getType(), S, 8457 ObjCEncOptions().setExpandStructures(), FD, 8458 NotEncodedT); 8459 } 8460 } 8461 S += '}'; 8462 return; 8463 } 8464 8465 case Type::ObjCObjectPointer: { 8466 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 8467 if (OPT->isObjCIdType()) { 8468 S += '@'; 8469 return; 8470 } 8471 8472 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 8473 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 8474 // Since this is a binary compatibility issue, need to consult with 8475 // runtime folks. Fortunately, this is a *very* obscure construct. 8476 S += '#'; 8477 return; 8478 } 8479 8480 if (OPT->isObjCQualifiedIdType()) { 8481 getObjCEncodingForTypeImpl( 8482 getObjCIdType(), S, 8483 Options.keepingOnly(ObjCEncOptions() 8484 .setExpandPointedToStructures() 8485 .setExpandStructures()), 8486 FD); 8487 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 8488 // Note that we do extended encoding of protocol qualifier list 8489 // Only when doing ivar or property encoding. 8490 S += '"'; 8491 for (const auto *I : OPT->quals()) { 8492 S += '<'; 8493 S += I->getObjCRuntimeNameAsString(); 8494 S += '>'; 8495 } 8496 S += '"'; 8497 } 8498 return; 8499 } 8500 8501 S += '@'; 8502 if (OPT->getInterfaceDecl() && 8503 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 8504 S += '"'; 8505 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 8506 for (const auto *I : OPT->quals()) { 8507 S += '<'; 8508 S += I->getObjCRuntimeNameAsString(); 8509 S += '>'; 8510 } 8511 S += '"'; 8512 } 8513 return; 8514 } 8515 8516 // gcc just blithely ignores member pointers. 8517 // FIXME: we should do better than that. 'M' is available. 8518 case Type::MemberPointer: 8519 // This matches gcc's encoding, even though technically it is insufficient. 8520 //FIXME. We should do a better job than gcc. 8521 case Type::Vector: 8522 case Type::ExtVector: 8523 // Until we have a coherent encoding of these three types, issue warning. 8524 if (NotEncodedT) 8525 *NotEncodedT = T; 8526 return; 8527 8528 case Type::ConstantMatrix: 8529 if (NotEncodedT) 8530 *NotEncodedT = T; 8531 return; 8532 8533 case Type::BitInt: 8534 if (NotEncodedT) 8535 *NotEncodedT = T; 8536 return; 8537 8538 // We could see an undeduced auto type here during error recovery. 8539 // Just ignore it. 8540 case Type::Auto: 8541 case Type::DeducedTemplateSpecialization: 8542 return; 8543 8544 case Type::Pipe: 8545 #define ABSTRACT_TYPE(KIND, BASE) 8546 #define TYPE(KIND, BASE) 8547 #define DEPENDENT_TYPE(KIND, BASE) \ 8548 case Type::KIND: 8549 #define NON_CANONICAL_TYPE(KIND, BASE) \ 8550 case Type::KIND: 8551 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 8552 case Type::KIND: 8553 #include "clang/AST/TypeNodes.inc" 8554 llvm_unreachable("@encode for dependent type!"); 8555 } 8556 llvm_unreachable("bad type kind!"); 8557 } 8558 8559 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 8560 std::string &S, 8561 const FieldDecl *FD, 8562 bool includeVBases, 8563 QualType *NotEncodedT) const { 8564 assert(RDecl && "Expected non-null RecordDecl"); 8565 assert(!RDecl->isUnion() && "Should not be called for unions"); 8566 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 8567 return; 8568 8569 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 8570 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 8571 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 8572 8573 if (CXXRec) { 8574 for (const auto &BI : CXXRec->bases()) { 8575 if (!BI.isVirtual()) { 8576 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8577 if (base->isEmpty()) 8578 continue; 8579 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 8580 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8581 std::make_pair(offs, base)); 8582 } 8583 } 8584 } 8585 8586 unsigned i = 0; 8587 for (FieldDecl *Field : RDecl->fields()) { 8588 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 8589 continue; 8590 uint64_t offs = layout.getFieldOffset(i); 8591 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8592 std::make_pair(offs, Field)); 8593 ++i; 8594 } 8595 8596 if (CXXRec && includeVBases) { 8597 for (const auto &BI : CXXRec->vbases()) { 8598 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8599 if (base->isEmpty()) 8600 continue; 8601 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 8602 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 8603 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 8604 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 8605 std::make_pair(offs, base)); 8606 } 8607 } 8608 8609 CharUnits size; 8610 if (CXXRec) { 8611 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 8612 } else { 8613 size = layout.getSize(); 8614 } 8615 8616 #ifndef NDEBUG 8617 uint64_t CurOffs = 0; 8618 #endif 8619 std::multimap<uint64_t, NamedDecl *>::iterator 8620 CurLayObj = FieldOrBaseOffsets.begin(); 8621 8622 if (CXXRec && CXXRec->isDynamicClass() && 8623 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 8624 if (FD) { 8625 S += "\"_vptr$"; 8626 std::string recname = CXXRec->getNameAsString(); 8627 if (recname.empty()) recname = "?"; 8628 S += recname; 8629 S += '"'; 8630 } 8631 S += "^^?"; 8632 #ifndef NDEBUG 8633 CurOffs += getTypeSize(VoidPtrTy); 8634 #endif 8635 } 8636 8637 if (!RDecl->hasFlexibleArrayMember()) { 8638 // Mark the end of the structure. 8639 uint64_t offs = toBits(size); 8640 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8641 std::make_pair(offs, nullptr)); 8642 } 8643 8644 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 8645 #ifndef NDEBUG 8646 assert(CurOffs <= CurLayObj->first); 8647 if (CurOffs < CurLayObj->first) { 8648 uint64_t padding = CurLayObj->first - CurOffs; 8649 // FIXME: There doesn't seem to be a way to indicate in the encoding that 8650 // packing/alignment of members is different that normal, in which case 8651 // the encoding will be out-of-sync with the real layout. 8652 // If the runtime switches to just consider the size of types without 8653 // taking into account alignment, we could make padding explicit in the 8654 // encoding (e.g. using arrays of chars). The encoding strings would be 8655 // longer then though. 8656 CurOffs += padding; 8657 } 8658 #endif 8659 8660 NamedDecl *dcl = CurLayObj->second; 8661 if (!dcl) 8662 break; // reached end of structure. 8663 8664 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 8665 // We expand the bases without their virtual bases since those are going 8666 // in the initial structure. Note that this differs from gcc which 8667 // expands virtual bases each time one is encountered in the hierarchy, 8668 // making the encoding type bigger than it really is. 8669 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 8670 NotEncodedT); 8671 assert(!base->isEmpty()); 8672 #ifndef NDEBUG 8673 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 8674 #endif 8675 } else { 8676 const auto *field = cast<FieldDecl>(dcl); 8677 if (FD) { 8678 S += '"'; 8679 S += field->getNameAsString(); 8680 S += '"'; 8681 } 8682 8683 if (field->isBitField()) { 8684 EncodeBitField(this, S, field->getType(), field); 8685 #ifndef NDEBUG 8686 CurOffs += field->getBitWidthValue(*this); 8687 #endif 8688 } else { 8689 QualType qt = field->getType(); 8690 getLegacyIntegralTypeEncoding(qt); 8691 getObjCEncodingForTypeImpl( 8692 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 8693 FD, NotEncodedT); 8694 #ifndef NDEBUG 8695 CurOffs += getTypeSize(field->getType()); 8696 #endif 8697 } 8698 } 8699 } 8700 } 8701 8702 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 8703 std::string& S) const { 8704 if (QT & Decl::OBJC_TQ_In) 8705 S += 'n'; 8706 if (QT & Decl::OBJC_TQ_Inout) 8707 S += 'N'; 8708 if (QT & Decl::OBJC_TQ_Out) 8709 S += 'o'; 8710 if (QT & Decl::OBJC_TQ_Bycopy) 8711 S += 'O'; 8712 if (QT & Decl::OBJC_TQ_Byref) 8713 S += 'R'; 8714 if (QT & Decl::OBJC_TQ_Oneway) 8715 S += 'V'; 8716 } 8717 8718 TypedefDecl *ASTContext::getObjCIdDecl() const { 8719 if (!ObjCIdDecl) { 8720 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 8721 T = getObjCObjectPointerType(T); 8722 ObjCIdDecl = buildImplicitTypedef(T, "id"); 8723 } 8724 return ObjCIdDecl; 8725 } 8726 8727 TypedefDecl *ASTContext::getObjCSelDecl() const { 8728 if (!ObjCSelDecl) { 8729 QualType T = getPointerType(ObjCBuiltinSelTy); 8730 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 8731 } 8732 return ObjCSelDecl; 8733 } 8734 8735 TypedefDecl *ASTContext::getObjCClassDecl() const { 8736 if (!ObjCClassDecl) { 8737 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 8738 T = getObjCObjectPointerType(T); 8739 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 8740 } 8741 return ObjCClassDecl; 8742 } 8743 8744 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 8745 if (!ObjCProtocolClassDecl) { 8746 ObjCProtocolClassDecl 8747 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 8748 SourceLocation(), 8749 &Idents.get("Protocol"), 8750 /*typeParamList=*/nullptr, 8751 /*PrevDecl=*/nullptr, 8752 SourceLocation(), true); 8753 } 8754 8755 return ObjCProtocolClassDecl; 8756 } 8757 8758 //===----------------------------------------------------------------------===// 8759 // __builtin_va_list Construction Functions 8760 //===----------------------------------------------------------------------===// 8761 8762 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 8763 StringRef Name) { 8764 // typedef char* __builtin[_ms]_va_list; 8765 QualType T = Context->getPointerType(Context->CharTy); 8766 return Context->buildImplicitTypedef(T, Name); 8767 } 8768 8769 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 8770 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 8771 } 8772 8773 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 8774 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 8775 } 8776 8777 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 8778 // typedef void* __builtin_va_list; 8779 QualType T = Context->getPointerType(Context->VoidTy); 8780 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8781 } 8782 8783 static TypedefDecl * 8784 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 8785 // struct __va_list 8786 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 8787 if (Context->getLangOpts().CPlusPlus) { 8788 // namespace std { struct __va_list { 8789 auto *NS = NamespaceDecl::Create( 8790 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), 8791 /*Inline=*/false, SourceLocation(), SourceLocation(), 8792 &Context->Idents.get("std"), 8793 /*PrevDecl=*/nullptr, /*Nested=*/false); 8794 NS->setImplicit(); 8795 VaListTagDecl->setDeclContext(NS); 8796 } 8797 8798 VaListTagDecl->startDefinition(); 8799 8800 const size_t NumFields = 5; 8801 QualType FieldTypes[NumFields]; 8802 const char *FieldNames[NumFields]; 8803 8804 // void *__stack; 8805 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8806 FieldNames[0] = "__stack"; 8807 8808 // void *__gr_top; 8809 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8810 FieldNames[1] = "__gr_top"; 8811 8812 // void *__vr_top; 8813 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8814 FieldNames[2] = "__vr_top"; 8815 8816 // int __gr_offs; 8817 FieldTypes[3] = Context->IntTy; 8818 FieldNames[3] = "__gr_offs"; 8819 8820 // int __vr_offs; 8821 FieldTypes[4] = Context->IntTy; 8822 FieldNames[4] = "__vr_offs"; 8823 8824 // Create fields 8825 for (unsigned i = 0; i < NumFields; ++i) { 8826 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8827 VaListTagDecl, 8828 SourceLocation(), 8829 SourceLocation(), 8830 &Context->Idents.get(FieldNames[i]), 8831 FieldTypes[i], /*TInfo=*/nullptr, 8832 /*BitWidth=*/nullptr, 8833 /*Mutable=*/false, 8834 ICIS_NoInit); 8835 Field->setAccess(AS_public); 8836 VaListTagDecl->addDecl(Field); 8837 } 8838 VaListTagDecl->completeDefinition(); 8839 Context->VaListTagDecl = VaListTagDecl; 8840 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8841 8842 // } __builtin_va_list; 8843 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 8844 } 8845 8846 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 8847 // typedef struct __va_list_tag { 8848 RecordDecl *VaListTagDecl; 8849 8850 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8851 VaListTagDecl->startDefinition(); 8852 8853 const size_t NumFields = 5; 8854 QualType FieldTypes[NumFields]; 8855 const char *FieldNames[NumFields]; 8856 8857 // unsigned char gpr; 8858 FieldTypes[0] = Context->UnsignedCharTy; 8859 FieldNames[0] = "gpr"; 8860 8861 // unsigned char fpr; 8862 FieldTypes[1] = Context->UnsignedCharTy; 8863 FieldNames[1] = "fpr"; 8864 8865 // unsigned short reserved; 8866 FieldTypes[2] = Context->UnsignedShortTy; 8867 FieldNames[2] = "reserved"; 8868 8869 // void* overflow_arg_area; 8870 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8871 FieldNames[3] = "overflow_arg_area"; 8872 8873 // void* reg_save_area; 8874 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 8875 FieldNames[4] = "reg_save_area"; 8876 8877 // Create fields 8878 for (unsigned i = 0; i < NumFields; ++i) { 8879 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 8880 SourceLocation(), 8881 SourceLocation(), 8882 &Context->Idents.get(FieldNames[i]), 8883 FieldTypes[i], /*TInfo=*/nullptr, 8884 /*BitWidth=*/nullptr, 8885 /*Mutable=*/false, 8886 ICIS_NoInit); 8887 Field->setAccess(AS_public); 8888 VaListTagDecl->addDecl(Field); 8889 } 8890 VaListTagDecl->completeDefinition(); 8891 Context->VaListTagDecl = VaListTagDecl; 8892 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8893 8894 // } __va_list_tag; 8895 TypedefDecl *VaListTagTypedefDecl = 8896 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8897 8898 QualType VaListTagTypedefType = 8899 Context->getTypedefType(VaListTagTypedefDecl); 8900 8901 // typedef __va_list_tag __builtin_va_list[1]; 8902 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8903 QualType VaListTagArrayType 8904 = Context->getConstantArrayType(VaListTagTypedefType, 8905 Size, nullptr, ArrayType::Normal, 0); 8906 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8907 } 8908 8909 static TypedefDecl * 8910 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 8911 // struct __va_list_tag { 8912 RecordDecl *VaListTagDecl; 8913 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8914 VaListTagDecl->startDefinition(); 8915 8916 const size_t NumFields = 4; 8917 QualType FieldTypes[NumFields]; 8918 const char *FieldNames[NumFields]; 8919 8920 // unsigned gp_offset; 8921 FieldTypes[0] = Context->UnsignedIntTy; 8922 FieldNames[0] = "gp_offset"; 8923 8924 // unsigned fp_offset; 8925 FieldTypes[1] = Context->UnsignedIntTy; 8926 FieldNames[1] = "fp_offset"; 8927 8928 // void* overflow_arg_area; 8929 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8930 FieldNames[2] = "overflow_arg_area"; 8931 8932 // void* reg_save_area; 8933 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8934 FieldNames[3] = "reg_save_area"; 8935 8936 // Create fields 8937 for (unsigned i = 0; i < NumFields; ++i) { 8938 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8939 VaListTagDecl, 8940 SourceLocation(), 8941 SourceLocation(), 8942 &Context->Idents.get(FieldNames[i]), 8943 FieldTypes[i], /*TInfo=*/nullptr, 8944 /*BitWidth=*/nullptr, 8945 /*Mutable=*/false, 8946 ICIS_NoInit); 8947 Field->setAccess(AS_public); 8948 VaListTagDecl->addDecl(Field); 8949 } 8950 VaListTagDecl->completeDefinition(); 8951 Context->VaListTagDecl = VaListTagDecl; 8952 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8953 8954 // }; 8955 8956 // typedef struct __va_list_tag __builtin_va_list[1]; 8957 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8958 QualType VaListTagArrayType = Context->getConstantArrayType( 8959 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8960 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8961 } 8962 8963 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 8964 // typedef int __builtin_va_list[4]; 8965 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 8966 QualType IntArrayType = Context->getConstantArrayType( 8967 Context->IntTy, Size, nullptr, ArrayType::Normal, 0); 8968 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 8969 } 8970 8971 static TypedefDecl * 8972 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 8973 // struct __va_list 8974 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 8975 if (Context->getLangOpts().CPlusPlus) { 8976 // namespace std { struct __va_list { 8977 NamespaceDecl *NS; 8978 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8979 Context->getTranslationUnitDecl(), 8980 /*Inline=*/false, SourceLocation(), 8981 SourceLocation(), &Context->Idents.get("std"), 8982 /*PrevDecl=*/nullptr, /*Nested=*/false); 8983 NS->setImplicit(); 8984 VaListDecl->setDeclContext(NS); 8985 } 8986 8987 VaListDecl->startDefinition(); 8988 8989 // void * __ap; 8990 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8991 VaListDecl, 8992 SourceLocation(), 8993 SourceLocation(), 8994 &Context->Idents.get("__ap"), 8995 Context->getPointerType(Context->VoidTy), 8996 /*TInfo=*/nullptr, 8997 /*BitWidth=*/nullptr, 8998 /*Mutable=*/false, 8999 ICIS_NoInit); 9000 Field->setAccess(AS_public); 9001 VaListDecl->addDecl(Field); 9002 9003 // }; 9004 VaListDecl->completeDefinition(); 9005 Context->VaListTagDecl = VaListDecl; 9006 9007 // typedef struct __va_list __builtin_va_list; 9008 QualType T = Context->getRecordType(VaListDecl); 9009 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 9010 } 9011 9012 static TypedefDecl * 9013 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 9014 // struct __va_list_tag { 9015 RecordDecl *VaListTagDecl; 9016 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 9017 VaListTagDecl->startDefinition(); 9018 9019 const size_t NumFields = 4; 9020 QualType FieldTypes[NumFields]; 9021 const char *FieldNames[NumFields]; 9022 9023 // long __gpr; 9024 FieldTypes[0] = Context->LongTy; 9025 FieldNames[0] = "__gpr"; 9026 9027 // long __fpr; 9028 FieldTypes[1] = Context->LongTy; 9029 FieldNames[1] = "__fpr"; 9030 9031 // void *__overflow_arg_area; 9032 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9033 FieldNames[2] = "__overflow_arg_area"; 9034 9035 // void *__reg_save_area; 9036 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 9037 FieldNames[3] = "__reg_save_area"; 9038 9039 // Create fields 9040 for (unsigned i = 0; i < NumFields; ++i) { 9041 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 9042 VaListTagDecl, 9043 SourceLocation(), 9044 SourceLocation(), 9045 &Context->Idents.get(FieldNames[i]), 9046 FieldTypes[i], /*TInfo=*/nullptr, 9047 /*BitWidth=*/nullptr, 9048 /*Mutable=*/false, 9049 ICIS_NoInit); 9050 Field->setAccess(AS_public); 9051 VaListTagDecl->addDecl(Field); 9052 } 9053 VaListTagDecl->completeDefinition(); 9054 Context->VaListTagDecl = VaListTagDecl; 9055 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9056 9057 // }; 9058 9059 // typedef __va_list_tag __builtin_va_list[1]; 9060 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9061 QualType VaListTagArrayType = Context->getConstantArrayType( 9062 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 9063 9064 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9065 } 9066 9067 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 9068 // typedef struct __va_list_tag { 9069 RecordDecl *VaListTagDecl; 9070 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 9071 VaListTagDecl->startDefinition(); 9072 9073 const size_t NumFields = 3; 9074 QualType FieldTypes[NumFields]; 9075 const char *FieldNames[NumFields]; 9076 9077 // void *CurrentSavedRegisterArea; 9078 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 9079 FieldNames[0] = "__current_saved_reg_area_pointer"; 9080 9081 // void *SavedRegAreaEnd; 9082 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 9083 FieldNames[1] = "__saved_reg_area_end_pointer"; 9084 9085 // void *OverflowArea; 9086 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9087 FieldNames[2] = "__overflow_area_pointer"; 9088 9089 // Create fields 9090 for (unsigned i = 0; i < NumFields; ++i) { 9091 FieldDecl *Field = FieldDecl::Create( 9092 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 9093 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 9094 /*TInfo=*/nullptr, 9095 /*BitWidth=*/nullptr, 9096 /*Mutable=*/false, ICIS_NoInit); 9097 Field->setAccess(AS_public); 9098 VaListTagDecl->addDecl(Field); 9099 } 9100 VaListTagDecl->completeDefinition(); 9101 Context->VaListTagDecl = VaListTagDecl; 9102 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9103 9104 // } __va_list_tag; 9105 TypedefDecl *VaListTagTypedefDecl = 9106 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 9107 9108 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 9109 9110 // typedef __va_list_tag __builtin_va_list[1]; 9111 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9112 QualType VaListTagArrayType = Context->getConstantArrayType( 9113 VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); 9114 9115 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9116 } 9117 9118 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 9119 TargetInfo::BuiltinVaListKind Kind) { 9120 switch (Kind) { 9121 case TargetInfo::CharPtrBuiltinVaList: 9122 return CreateCharPtrBuiltinVaListDecl(Context); 9123 case TargetInfo::VoidPtrBuiltinVaList: 9124 return CreateVoidPtrBuiltinVaListDecl(Context); 9125 case TargetInfo::AArch64ABIBuiltinVaList: 9126 return CreateAArch64ABIBuiltinVaListDecl(Context); 9127 case TargetInfo::PowerABIBuiltinVaList: 9128 return CreatePowerABIBuiltinVaListDecl(Context); 9129 case TargetInfo::X86_64ABIBuiltinVaList: 9130 return CreateX86_64ABIBuiltinVaListDecl(Context); 9131 case TargetInfo::PNaClABIBuiltinVaList: 9132 return CreatePNaClABIBuiltinVaListDecl(Context); 9133 case TargetInfo::AAPCSABIBuiltinVaList: 9134 return CreateAAPCSABIBuiltinVaListDecl(Context); 9135 case TargetInfo::SystemZBuiltinVaList: 9136 return CreateSystemZBuiltinVaListDecl(Context); 9137 case TargetInfo::HexagonBuiltinVaList: 9138 return CreateHexagonBuiltinVaListDecl(Context); 9139 } 9140 9141 llvm_unreachable("Unhandled __builtin_va_list type kind"); 9142 } 9143 9144 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 9145 if (!BuiltinVaListDecl) { 9146 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 9147 assert(BuiltinVaListDecl->isImplicit()); 9148 } 9149 9150 return BuiltinVaListDecl; 9151 } 9152 9153 Decl *ASTContext::getVaListTagDecl() const { 9154 // Force the creation of VaListTagDecl by building the __builtin_va_list 9155 // declaration. 9156 if (!VaListTagDecl) 9157 (void)getBuiltinVaListDecl(); 9158 9159 return VaListTagDecl; 9160 } 9161 9162 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 9163 if (!BuiltinMSVaListDecl) 9164 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 9165 9166 return BuiltinMSVaListDecl; 9167 } 9168 9169 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 9170 // Allow redecl custom type checking builtin for HLSL. 9171 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin && 9172 BuiltinInfo.hasCustomTypechecking(FD->getBuiltinID())) 9173 return true; 9174 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 9175 } 9176 9177 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 9178 assert(ObjCConstantStringType.isNull() && 9179 "'NSConstantString' type already set!"); 9180 9181 ObjCConstantStringType = getObjCInterfaceType(Decl); 9182 } 9183 9184 /// Retrieve the template name that corresponds to a non-empty 9185 /// lookup. 9186 TemplateName 9187 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 9188 UnresolvedSetIterator End) const { 9189 unsigned size = End - Begin; 9190 assert(size > 1 && "set is not overloaded!"); 9191 9192 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 9193 size * sizeof(FunctionTemplateDecl*)); 9194 auto *OT = new (memory) OverloadedTemplateStorage(size); 9195 9196 NamedDecl **Storage = OT->getStorage(); 9197 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 9198 NamedDecl *D = *I; 9199 assert(isa<FunctionTemplateDecl>(D) || 9200 isa<UnresolvedUsingValueDecl>(D) || 9201 (isa<UsingShadowDecl>(D) && 9202 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 9203 *Storage++ = D; 9204 } 9205 9206 return TemplateName(OT); 9207 } 9208 9209 /// Retrieve a template name representing an unqualified-id that has been 9210 /// assumed to name a template for ADL purposes. 9211 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 9212 auto *OT = new (*this) AssumedTemplateStorage(Name); 9213 return TemplateName(OT); 9214 } 9215 9216 /// Retrieve the template name that represents a qualified 9217 /// template name such as \c std::vector. 9218 TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 9219 bool TemplateKeyword, 9220 TemplateName Template) const { 9221 assert(NNS && "Missing nested-name-specifier in qualified template name"); 9222 9223 // FIXME: Canonicalization? 9224 llvm::FoldingSetNodeID ID; 9225 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 9226 9227 void *InsertPos = nullptr; 9228 QualifiedTemplateName *QTN = 9229 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9230 if (!QTN) { 9231 QTN = new (*this, alignof(QualifiedTemplateName)) 9232 QualifiedTemplateName(NNS, TemplateKeyword, Template); 9233 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 9234 } 9235 9236 return TemplateName(QTN); 9237 } 9238 9239 /// Retrieve the template name that represents a dependent 9240 /// template name such as \c MetaFun::template apply. 9241 TemplateName 9242 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9243 const IdentifierInfo *Name) const { 9244 assert((!NNS || NNS->isDependent()) && 9245 "Nested name specifier must be dependent"); 9246 9247 llvm::FoldingSetNodeID ID; 9248 DependentTemplateName::Profile(ID, NNS, Name); 9249 9250 void *InsertPos = nullptr; 9251 DependentTemplateName *QTN = 9252 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9253 9254 if (QTN) 9255 return TemplateName(QTN); 9256 9257 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9258 if (CanonNNS == NNS) { 9259 QTN = new (*this, alignof(DependentTemplateName)) 9260 DependentTemplateName(NNS, Name); 9261 } else { 9262 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 9263 QTN = new (*this, alignof(DependentTemplateName)) 9264 DependentTemplateName(NNS, Name, Canon); 9265 DependentTemplateName *CheckQTN = 9266 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9267 assert(!CheckQTN && "Dependent type name canonicalization broken"); 9268 (void)CheckQTN; 9269 } 9270 9271 DependentTemplateNames.InsertNode(QTN, InsertPos); 9272 return TemplateName(QTN); 9273 } 9274 9275 /// Retrieve the template name that represents a dependent 9276 /// template name such as \c MetaFun::template operator+. 9277 TemplateName 9278 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9279 OverloadedOperatorKind Operator) const { 9280 assert((!NNS || NNS->isDependent()) && 9281 "Nested name specifier must be dependent"); 9282 9283 llvm::FoldingSetNodeID ID; 9284 DependentTemplateName::Profile(ID, NNS, Operator); 9285 9286 void *InsertPos = nullptr; 9287 DependentTemplateName *QTN 9288 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9289 9290 if (QTN) 9291 return TemplateName(QTN); 9292 9293 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9294 if (CanonNNS == NNS) { 9295 QTN = new (*this, alignof(DependentTemplateName)) 9296 DependentTemplateName(NNS, Operator); 9297 } else { 9298 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 9299 QTN = new (*this, alignof(DependentTemplateName)) 9300 DependentTemplateName(NNS, Operator, Canon); 9301 9302 DependentTemplateName *CheckQTN 9303 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9304 assert(!CheckQTN && "Dependent template name canonicalization broken"); 9305 (void)CheckQTN; 9306 } 9307 9308 DependentTemplateNames.InsertNode(QTN, InsertPos); 9309 return TemplateName(QTN); 9310 } 9311 9312 TemplateName ASTContext::getSubstTemplateTemplateParm( 9313 TemplateName Replacement, Decl *AssociatedDecl, unsigned Index, 9314 std::optional<unsigned> PackIndex) const { 9315 llvm::FoldingSetNodeID ID; 9316 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl, 9317 Index, PackIndex); 9318 9319 void *insertPos = nullptr; 9320 SubstTemplateTemplateParmStorage *subst 9321 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 9322 9323 if (!subst) { 9324 subst = new (*this) SubstTemplateTemplateParmStorage( 9325 Replacement, AssociatedDecl, Index, PackIndex); 9326 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 9327 } 9328 9329 return TemplateName(subst); 9330 } 9331 9332 TemplateName 9333 ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack, 9334 Decl *AssociatedDecl, 9335 unsigned Index, bool Final) const { 9336 auto &Self = const_cast<ASTContext &>(*this); 9337 llvm::FoldingSetNodeID ID; 9338 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, ArgPack, 9339 AssociatedDecl, Index, Final); 9340 9341 void *InsertPos = nullptr; 9342 SubstTemplateTemplateParmPackStorage *Subst 9343 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 9344 9345 if (!Subst) { 9346 Subst = new (*this) SubstTemplateTemplateParmPackStorage( 9347 ArgPack.pack_elements(), AssociatedDecl, Index, Final); 9348 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 9349 } 9350 9351 return TemplateName(Subst); 9352 } 9353 9354 /// getFromTargetType - Given one of the integer types provided by 9355 /// TargetInfo, produce the corresponding type. The unsigned @p Type 9356 /// is actually a value of type @c TargetInfo::IntType. 9357 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 9358 switch (Type) { 9359 case TargetInfo::NoInt: return {}; 9360 case TargetInfo::SignedChar: return SignedCharTy; 9361 case TargetInfo::UnsignedChar: return UnsignedCharTy; 9362 case TargetInfo::SignedShort: return ShortTy; 9363 case TargetInfo::UnsignedShort: return UnsignedShortTy; 9364 case TargetInfo::SignedInt: return IntTy; 9365 case TargetInfo::UnsignedInt: return UnsignedIntTy; 9366 case TargetInfo::SignedLong: return LongTy; 9367 case TargetInfo::UnsignedLong: return UnsignedLongTy; 9368 case TargetInfo::SignedLongLong: return LongLongTy; 9369 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 9370 } 9371 9372 llvm_unreachable("Unhandled TargetInfo::IntType value"); 9373 } 9374 9375 //===----------------------------------------------------------------------===// 9376 // Type Predicates. 9377 //===----------------------------------------------------------------------===// 9378 9379 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 9380 /// garbage collection attribute. 9381 /// 9382 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 9383 if (getLangOpts().getGC() == LangOptions::NonGC) 9384 return Qualifiers::GCNone; 9385 9386 assert(getLangOpts().ObjC); 9387 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 9388 9389 // Default behaviour under objective-C's gc is for ObjC pointers 9390 // (or pointers to them) be treated as though they were declared 9391 // as __strong. 9392 if (GCAttrs == Qualifiers::GCNone) { 9393 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 9394 return Qualifiers::Strong; 9395 else if (Ty->isPointerType()) 9396 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 9397 } else { 9398 // It's not valid to set GC attributes on anything that isn't a 9399 // pointer. 9400 #ifndef NDEBUG 9401 QualType CT = Ty->getCanonicalTypeInternal(); 9402 while (const auto *AT = dyn_cast<ArrayType>(CT)) 9403 CT = AT->getElementType(); 9404 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 9405 #endif 9406 } 9407 return GCAttrs; 9408 } 9409 9410 //===----------------------------------------------------------------------===// 9411 // Type Compatibility Testing 9412 //===----------------------------------------------------------------------===// 9413 9414 /// areCompatVectorTypes - Return true if the two specified vector types are 9415 /// compatible. 9416 static bool areCompatVectorTypes(const VectorType *LHS, 9417 const VectorType *RHS) { 9418 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9419 return LHS->getElementType() == RHS->getElementType() && 9420 LHS->getNumElements() == RHS->getNumElements(); 9421 } 9422 9423 /// areCompatMatrixTypes - Return true if the two specified matrix types are 9424 /// compatible. 9425 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 9426 const ConstantMatrixType *RHS) { 9427 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9428 return LHS->getElementType() == RHS->getElementType() && 9429 LHS->getNumRows() == RHS->getNumRows() && 9430 LHS->getNumColumns() == RHS->getNumColumns(); 9431 } 9432 9433 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 9434 QualType SecondVec) { 9435 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 9436 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 9437 9438 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 9439 return true; 9440 9441 // Treat Neon vector types and most AltiVec vector types as if they are the 9442 // equivalent GCC vector types. 9443 const auto *First = FirstVec->castAs<VectorType>(); 9444 const auto *Second = SecondVec->castAs<VectorType>(); 9445 if (First->getNumElements() == Second->getNumElements() && 9446 hasSameType(First->getElementType(), Second->getElementType()) && 9447 First->getVectorKind() != VectorType::AltiVecPixel && 9448 First->getVectorKind() != VectorType::AltiVecBool && 9449 Second->getVectorKind() != VectorType::AltiVecPixel && 9450 Second->getVectorKind() != VectorType::AltiVecBool && 9451 First->getVectorKind() != VectorType::SveFixedLengthDataVector && 9452 First->getVectorKind() != VectorType::SveFixedLengthPredicateVector && 9453 Second->getVectorKind() != VectorType::SveFixedLengthDataVector && 9454 Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector && 9455 First->getVectorKind() != VectorType::RVVFixedLengthDataVector && 9456 Second->getVectorKind() != VectorType::RVVFixedLengthDataVector) 9457 return true; 9458 9459 return false; 9460 } 9461 9462 /// getSVETypeSize - Return SVE vector or predicate register size. 9463 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 9464 assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type"); 9465 if (Ty->getKind() == BuiltinType::SveBool || 9466 Ty->getKind() == BuiltinType::SveCount) 9467 return (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth(); 9468 return Context.getLangOpts().VScaleMin * 128; 9469 } 9470 9471 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 9472 QualType SecondType) { 9473 assert( 9474 ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) || 9475 (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) && 9476 "Expected SVE builtin type and vector type!"); 9477 9478 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9479 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9480 if (const auto *VT = SecondType->getAs<VectorType>()) { 9481 // Predicates have the same representation as uint8 so we also have to 9482 // check the kind to make these types incompatible. 9483 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 9484 return BT->getKind() == BuiltinType::SveBool; 9485 else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 9486 return VT->getElementType().getCanonicalType() == 9487 FirstType->getSveEltType(*this); 9488 else if (VT->getVectorKind() == VectorType::GenericVector) 9489 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 9490 hasSameType(VT->getElementType(), 9491 getBuiltinVectorTypeInfo(BT).ElementType); 9492 } 9493 } 9494 return false; 9495 }; 9496 9497 return IsValidCast(FirstType, SecondType) || 9498 IsValidCast(SecondType, FirstType); 9499 } 9500 9501 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 9502 QualType SecondType) { 9503 assert( 9504 ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) || 9505 (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) && 9506 "Expected SVE builtin type and vector type!"); 9507 9508 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9509 const auto *BT = FirstType->getAs<BuiltinType>(); 9510 if (!BT) 9511 return false; 9512 9513 const auto *VecTy = SecondType->getAs<VectorType>(); 9514 if (VecTy && 9515 (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector || 9516 VecTy->getVectorKind() == VectorType::GenericVector)) { 9517 const LangOptions::LaxVectorConversionKind LVCKind = 9518 getLangOpts().getLaxVectorConversions(); 9519 9520 // Can not convert between sve predicates and sve vectors because of 9521 // different size. 9522 if (BT->getKind() == BuiltinType::SveBool && 9523 VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector) 9524 return false; 9525 9526 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 9527 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 9528 // converts to VLAT and VLAT implicitly converts to GNUT." 9529 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 9530 // predicates. 9531 if (VecTy->getVectorKind() == VectorType::GenericVector && 9532 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 9533 return false; 9534 9535 // If -flax-vector-conversions=all is specified, the types are 9536 // certainly compatible. 9537 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9538 return true; 9539 9540 // If -flax-vector-conversions=integer is specified, the types are 9541 // compatible if the elements are integer types. 9542 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9543 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9544 FirstType->getSveEltType(*this)->isIntegerType(); 9545 } 9546 9547 return false; 9548 }; 9549 9550 return IsLaxCompatible(FirstType, SecondType) || 9551 IsLaxCompatible(SecondType, FirstType); 9552 } 9553 9554 /// getRVVTypeSize - Return RVV vector register size. 9555 static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) { 9556 assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type"); 9557 auto VScale = Context.getTargetInfo().getVScaleRange(Context.getLangOpts()); 9558 if (!VScale) 9559 return 0; 9560 9561 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty); 9562 9563 uint64_t EltSize = Context.getTypeSize(Info.ElementType); 9564 uint64_t MinElts = Info.EC.getKnownMinValue(); 9565 return VScale->first * MinElts * EltSize; 9566 } 9567 9568 bool ASTContext::areCompatibleRVVTypes(QualType FirstType, 9569 QualType SecondType) { 9570 assert( 9571 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || 9572 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && 9573 "Expected RVV builtin type and vector type!"); 9574 9575 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9576 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9577 if (const auto *VT = SecondType->getAs<VectorType>()) { 9578 if (VT->getVectorKind() == VectorType::RVVFixedLengthDataVector || 9579 VT->getVectorKind() == VectorType::GenericVector) 9580 return FirstType->isRVVVLSBuiltinType() && 9581 getTypeSize(SecondType) == getRVVTypeSize(*this, BT) && 9582 hasSameType(VT->getElementType(), 9583 getBuiltinVectorTypeInfo(BT).ElementType); 9584 } 9585 } 9586 return false; 9587 }; 9588 9589 return IsValidCast(FirstType, SecondType) || 9590 IsValidCast(SecondType, FirstType); 9591 } 9592 9593 bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType, 9594 QualType SecondType) { 9595 assert( 9596 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || 9597 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && 9598 "Expected RVV builtin type and vector type!"); 9599 9600 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9601 const auto *BT = FirstType->getAs<BuiltinType>(); 9602 if (!BT) 9603 return false; 9604 9605 if (!BT->isRVVVLSBuiltinType()) 9606 return false; 9607 9608 const auto *VecTy = SecondType->getAs<VectorType>(); 9609 if (VecTy && 9610 (VecTy->getVectorKind() == VectorType::RVVFixedLengthDataVector || 9611 VecTy->getVectorKind() == VectorType::GenericVector)) { 9612 const LangOptions::LaxVectorConversionKind LVCKind = 9613 getLangOpts().getLaxVectorConversions(); 9614 9615 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion. 9616 if (getTypeSize(SecondType) != getRVVTypeSize(*this, BT)) 9617 return false; 9618 9619 // If -flax-vector-conversions=all is specified, the types are 9620 // certainly compatible. 9621 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9622 return true; 9623 9624 // If -flax-vector-conversions=integer is specified, the types are 9625 // compatible if the elements are integer types. 9626 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9627 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9628 FirstType->getRVVEltType(*this)->isIntegerType(); 9629 } 9630 9631 return false; 9632 }; 9633 9634 return IsLaxCompatible(FirstType, SecondType) || 9635 IsLaxCompatible(SecondType, FirstType); 9636 } 9637 9638 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 9639 while (true) { 9640 // __strong id 9641 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 9642 if (Attr->getAttrKind() == attr::ObjCOwnership) 9643 return true; 9644 9645 Ty = Attr->getModifiedType(); 9646 9647 // X *__strong (...) 9648 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 9649 Ty = Paren->getInnerType(); 9650 9651 // We do not want to look through typedefs, typeof(expr), 9652 // typeof(type), or any other way that the type is somehow 9653 // abstracted. 9654 } else { 9655 return false; 9656 } 9657 } 9658 } 9659 9660 //===----------------------------------------------------------------------===// 9661 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 9662 //===----------------------------------------------------------------------===// 9663 9664 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 9665 /// inheritance hierarchy of 'rProto'. 9666 bool 9667 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 9668 ObjCProtocolDecl *rProto) const { 9669 if (declaresSameEntity(lProto, rProto)) 9670 return true; 9671 for (auto *PI : rProto->protocols()) 9672 if (ProtocolCompatibleWithProtocol(lProto, PI)) 9673 return true; 9674 return false; 9675 } 9676 9677 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 9678 /// Class<pr1, ...>. 9679 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 9680 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 9681 for (auto *lhsProto : lhs->quals()) { 9682 bool match = false; 9683 for (auto *rhsProto : rhs->quals()) { 9684 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 9685 match = true; 9686 break; 9687 } 9688 } 9689 if (!match) 9690 return false; 9691 } 9692 return true; 9693 } 9694 9695 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 9696 /// ObjCQualifiedIDType. 9697 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 9698 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 9699 bool compare) { 9700 // Allow id<P..> and an 'id' in all cases. 9701 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 9702 return true; 9703 9704 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 9705 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 9706 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 9707 return false; 9708 9709 if (lhs->isObjCQualifiedIdType()) { 9710 if (rhs->qual_empty()) { 9711 // If the RHS is a unqualified interface pointer "NSString*", 9712 // make sure we check the class hierarchy. 9713 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9714 for (auto *I : lhs->quals()) { 9715 // when comparing an id<P> on lhs with a static type on rhs, 9716 // see if static class implements all of id's protocols, directly or 9717 // through its super class and categories. 9718 if (!rhsID->ClassImplementsProtocol(I, true)) 9719 return false; 9720 } 9721 } 9722 // If there are no qualifiers and no interface, we have an 'id'. 9723 return true; 9724 } 9725 // Both the right and left sides have qualifiers. 9726 for (auto *lhsProto : lhs->quals()) { 9727 bool match = false; 9728 9729 // when comparing an id<P> on lhs with a static type on rhs, 9730 // see if static class implements all of id's protocols, directly or 9731 // through its super class and categories. 9732 for (auto *rhsProto : rhs->quals()) { 9733 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9734 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9735 match = true; 9736 break; 9737 } 9738 } 9739 // If the RHS is a qualified interface pointer "NSString<P>*", 9740 // make sure we check the class hierarchy. 9741 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9742 for (auto *I : lhs->quals()) { 9743 // when comparing an id<P> on lhs with a static type on rhs, 9744 // see if static class implements all of id's protocols, directly or 9745 // through its super class and categories. 9746 if (rhsID->ClassImplementsProtocol(I, true)) { 9747 match = true; 9748 break; 9749 } 9750 } 9751 } 9752 if (!match) 9753 return false; 9754 } 9755 9756 return true; 9757 } 9758 9759 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 9760 9761 if (lhs->getInterfaceType()) { 9762 // If both the right and left sides have qualifiers. 9763 for (auto *lhsProto : lhs->quals()) { 9764 bool match = false; 9765 9766 // when comparing an id<P> on rhs with a static type on lhs, 9767 // see if static class implements all of id's protocols, directly or 9768 // through its super class and categories. 9769 // First, lhs protocols in the qualifier list must be found, direct 9770 // or indirect in rhs's qualifier list or it is a mismatch. 9771 for (auto *rhsProto : rhs->quals()) { 9772 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9773 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9774 match = true; 9775 break; 9776 } 9777 } 9778 if (!match) 9779 return false; 9780 } 9781 9782 // Static class's protocols, or its super class or category protocols 9783 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 9784 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 9785 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 9786 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 9787 // This is rather dubious but matches gcc's behavior. If lhs has 9788 // no type qualifier and its class has no static protocol(s) 9789 // assume that it is mismatch. 9790 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 9791 return false; 9792 for (auto *lhsProto : LHSInheritedProtocols) { 9793 bool match = false; 9794 for (auto *rhsProto : rhs->quals()) { 9795 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9796 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9797 match = true; 9798 break; 9799 } 9800 } 9801 if (!match) 9802 return false; 9803 } 9804 } 9805 return true; 9806 } 9807 return false; 9808 } 9809 9810 /// canAssignObjCInterfaces - Return true if the two interface types are 9811 /// compatible for assignment from RHS to LHS. This handles validation of any 9812 /// protocol qualifiers on the LHS or RHS. 9813 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 9814 const ObjCObjectPointerType *RHSOPT) { 9815 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9816 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9817 9818 // If either type represents the built-in 'id' type, return true. 9819 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 9820 return true; 9821 9822 // Function object that propagates a successful result or handles 9823 // __kindof types. 9824 auto finish = [&](bool succeeded) -> bool { 9825 if (succeeded) 9826 return true; 9827 9828 if (!RHS->isKindOfType()) 9829 return false; 9830 9831 // Strip off __kindof and protocol qualifiers, then check whether 9832 // we can assign the other way. 9833 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9834 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 9835 }; 9836 9837 // Casts from or to id<P> are allowed when the other side has compatible 9838 // protocols. 9839 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 9840 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 9841 } 9842 9843 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 9844 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 9845 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 9846 } 9847 9848 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 9849 if (LHS->isObjCClass() && RHS->isObjCClass()) { 9850 return true; 9851 } 9852 9853 // If we have 2 user-defined types, fall into that path. 9854 if (LHS->getInterface() && RHS->getInterface()) { 9855 return finish(canAssignObjCInterfaces(LHS, RHS)); 9856 } 9857 9858 return false; 9859 } 9860 9861 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 9862 /// for providing type-safety for objective-c pointers used to pass/return 9863 /// arguments in block literals. When passed as arguments, passing 'A*' where 9864 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 9865 /// not OK. For the return type, the opposite is not OK. 9866 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 9867 const ObjCObjectPointerType *LHSOPT, 9868 const ObjCObjectPointerType *RHSOPT, 9869 bool BlockReturnType) { 9870 9871 // Function object that propagates a successful result or handles 9872 // __kindof types. 9873 auto finish = [&](bool succeeded) -> bool { 9874 if (succeeded) 9875 return true; 9876 9877 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 9878 if (!Expected->isKindOfType()) 9879 return false; 9880 9881 // Strip off __kindof and protocol qualifiers, then check whether 9882 // we can assign the other way. 9883 return canAssignObjCInterfacesInBlockPointer( 9884 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9885 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 9886 BlockReturnType); 9887 }; 9888 9889 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 9890 return true; 9891 9892 if (LHSOPT->isObjCBuiltinType()) { 9893 return finish(RHSOPT->isObjCBuiltinType() || 9894 RHSOPT->isObjCQualifiedIdType()); 9895 } 9896 9897 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 9898 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 9899 // Use for block parameters previous type checking for compatibility. 9900 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 9901 // Or corrected type checking as in non-compat mode. 9902 (!BlockReturnType && 9903 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 9904 else 9905 return finish(ObjCQualifiedIdTypesAreCompatible( 9906 (BlockReturnType ? LHSOPT : RHSOPT), 9907 (BlockReturnType ? RHSOPT : LHSOPT), false)); 9908 } 9909 9910 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 9911 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 9912 if (LHS && RHS) { // We have 2 user-defined types. 9913 if (LHS != RHS) { 9914 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 9915 return finish(BlockReturnType); 9916 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 9917 return finish(!BlockReturnType); 9918 } 9919 else 9920 return true; 9921 } 9922 return false; 9923 } 9924 9925 /// Comparison routine for Objective-C protocols to be used with 9926 /// llvm::array_pod_sort. 9927 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 9928 ObjCProtocolDecl * const *rhs) { 9929 return (*lhs)->getName().compare((*rhs)->getName()); 9930 } 9931 9932 /// getIntersectionOfProtocols - This routine finds the intersection of set 9933 /// of protocols inherited from two distinct objective-c pointer objects with 9934 /// the given common base. 9935 /// It is used to build composite qualifier list of the composite type of 9936 /// the conditional expression involving two objective-c pointer objects. 9937 static 9938 void getIntersectionOfProtocols(ASTContext &Context, 9939 const ObjCInterfaceDecl *CommonBase, 9940 const ObjCObjectPointerType *LHSOPT, 9941 const ObjCObjectPointerType *RHSOPT, 9942 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 9943 9944 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9945 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9946 assert(LHS->getInterface() && "LHS must have an interface base"); 9947 assert(RHS->getInterface() && "RHS must have an interface base"); 9948 9949 // Add all of the protocols for the LHS. 9950 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 9951 9952 // Start with the protocol qualifiers. 9953 for (auto *proto : LHS->quals()) { 9954 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 9955 } 9956 9957 // Also add the protocols associated with the LHS interface. 9958 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 9959 9960 // Add all of the protocols for the RHS. 9961 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 9962 9963 // Start with the protocol qualifiers. 9964 for (auto *proto : RHS->quals()) { 9965 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 9966 } 9967 9968 // Also add the protocols associated with the RHS interface. 9969 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 9970 9971 // Compute the intersection of the collected protocol sets. 9972 for (auto *proto : LHSProtocolSet) { 9973 if (RHSProtocolSet.count(proto)) 9974 IntersectionSet.push_back(proto); 9975 } 9976 9977 // Compute the set of protocols that is implied by either the common type or 9978 // the protocols within the intersection. 9979 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 9980 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 9981 9982 // Remove any implied protocols from the list of inherited protocols. 9983 if (!ImpliedProtocols.empty()) { 9984 llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { 9985 return ImpliedProtocols.contains(proto); 9986 }); 9987 } 9988 9989 // Sort the remaining protocols by name. 9990 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 9991 compareObjCProtocolsByName); 9992 } 9993 9994 /// Determine whether the first type is a subtype of the second. 9995 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 9996 QualType rhs) { 9997 // Common case: two object pointers. 9998 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 9999 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 10000 if (lhsOPT && rhsOPT) 10001 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 10002 10003 // Two block pointers. 10004 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 10005 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 10006 if (lhsBlock && rhsBlock) 10007 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 10008 10009 // If either is an unqualified 'id' and the other is a block, it's 10010 // acceptable. 10011 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 10012 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 10013 return true; 10014 10015 return false; 10016 } 10017 10018 // Check that the given Objective-C type argument lists are equivalent. 10019 static bool sameObjCTypeArgs(ASTContext &ctx, 10020 const ObjCInterfaceDecl *iface, 10021 ArrayRef<QualType> lhsArgs, 10022 ArrayRef<QualType> rhsArgs, 10023 bool stripKindOf) { 10024 if (lhsArgs.size() != rhsArgs.size()) 10025 return false; 10026 10027 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 10028 if (!typeParams) 10029 return false; 10030 10031 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 10032 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 10033 continue; 10034 10035 switch (typeParams->begin()[i]->getVariance()) { 10036 case ObjCTypeParamVariance::Invariant: 10037 if (!stripKindOf || 10038 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 10039 rhsArgs[i].stripObjCKindOfType(ctx))) { 10040 return false; 10041 } 10042 break; 10043 10044 case ObjCTypeParamVariance::Covariant: 10045 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 10046 return false; 10047 break; 10048 10049 case ObjCTypeParamVariance::Contravariant: 10050 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 10051 return false; 10052 break; 10053 } 10054 } 10055 10056 return true; 10057 } 10058 10059 QualType ASTContext::areCommonBaseCompatible( 10060 const ObjCObjectPointerType *Lptr, 10061 const ObjCObjectPointerType *Rptr) { 10062 const ObjCObjectType *LHS = Lptr->getObjectType(); 10063 const ObjCObjectType *RHS = Rptr->getObjectType(); 10064 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 10065 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 10066 10067 if (!LDecl || !RDecl) 10068 return {}; 10069 10070 // When either LHS or RHS is a kindof type, we should return a kindof type. 10071 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 10072 // kindof(A). 10073 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 10074 10075 // Follow the left-hand side up the class hierarchy until we either hit a 10076 // root or find the RHS. Record the ancestors in case we don't find it. 10077 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 10078 LHSAncestors; 10079 while (true) { 10080 // Record this ancestor. We'll need this if the common type isn't in the 10081 // path from the LHS to the root. 10082 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 10083 10084 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 10085 // Get the type arguments. 10086 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 10087 bool anyChanges = false; 10088 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10089 // Both have type arguments, compare them. 10090 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10091 LHS->getTypeArgs(), RHS->getTypeArgs(), 10092 /*stripKindOf=*/true)) 10093 return {}; 10094 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10095 // If only one has type arguments, the result will not have type 10096 // arguments. 10097 LHSTypeArgs = {}; 10098 anyChanges = true; 10099 } 10100 10101 // Compute the intersection of protocols. 10102 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10103 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 10104 Protocols); 10105 if (!Protocols.empty()) 10106 anyChanges = true; 10107 10108 // If anything in the LHS will have changed, build a new result type. 10109 // If we need to return a kindof type but LHS is not a kindof type, we 10110 // build a new result type. 10111 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 10112 QualType Result = getObjCInterfaceType(LHS->getInterface()); 10113 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 10114 anyKindOf || LHS->isKindOfType()); 10115 return getObjCObjectPointerType(Result); 10116 } 10117 10118 return getObjCObjectPointerType(QualType(LHS, 0)); 10119 } 10120 10121 // Find the superclass. 10122 QualType LHSSuperType = LHS->getSuperClassType(); 10123 if (LHSSuperType.isNull()) 10124 break; 10125 10126 LHS = LHSSuperType->castAs<ObjCObjectType>(); 10127 } 10128 10129 // We didn't find anything by following the LHS to its root; now check 10130 // the RHS against the cached set of ancestors. 10131 while (true) { 10132 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 10133 if (KnownLHS != LHSAncestors.end()) { 10134 LHS = KnownLHS->second; 10135 10136 // Get the type arguments. 10137 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 10138 bool anyChanges = false; 10139 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10140 // Both have type arguments, compare them. 10141 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10142 LHS->getTypeArgs(), RHS->getTypeArgs(), 10143 /*stripKindOf=*/true)) 10144 return {}; 10145 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10146 // If only one has type arguments, the result will not have type 10147 // arguments. 10148 RHSTypeArgs = {}; 10149 anyChanges = true; 10150 } 10151 10152 // Compute the intersection of protocols. 10153 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10154 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 10155 Protocols); 10156 if (!Protocols.empty()) 10157 anyChanges = true; 10158 10159 // If we need to return a kindof type but RHS is not a kindof type, we 10160 // build a new result type. 10161 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 10162 QualType Result = getObjCInterfaceType(RHS->getInterface()); 10163 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 10164 anyKindOf || RHS->isKindOfType()); 10165 return getObjCObjectPointerType(Result); 10166 } 10167 10168 return getObjCObjectPointerType(QualType(RHS, 0)); 10169 } 10170 10171 // Find the superclass of the RHS. 10172 QualType RHSSuperType = RHS->getSuperClassType(); 10173 if (RHSSuperType.isNull()) 10174 break; 10175 10176 RHS = RHSSuperType->castAs<ObjCObjectType>(); 10177 } 10178 10179 return {}; 10180 } 10181 10182 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 10183 const ObjCObjectType *RHS) { 10184 assert(LHS->getInterface() && "LHS is not an interface type"); 10185 assert(RHS->getInterface() && "RHS is not an interface type"); 10186 10187 // Verify that the base decls are compatible: the RHS must be a subclass of 10188 // the LHS. 10189 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 10190 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 10191 if (!IsSuperClass) 10192 return false; 10193 10194 // If the LHS has protocol qualifiers, determine whether all of them are 10195 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 10196 // LHS). 10197 if (LHS->getNumProtocols() > 0) { 10198 // OK if conversion of LHS to SuperClass results in narrowing of types 10199 // ; i.e., SuperClass may implement at least one of the protocols 10200 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 10201 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 10202 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 10203 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 10204 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 10205 // qualifiers. 10206 for (auto *RHSPI : RHS->quals()) 10207 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 10208 // If there is no protocols associated with RHS, it is not a match. 10209 if (SuperClassInheritedProtocols.empty()) 10210 return false; 10211 10212 for (const auto *LHSProto : LHS->quals()) { 10213 bool SuperImplementsProtocol = false; 10214 for (auto *SuperClassProto : SuperClassInheritedProtocols) 10215 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 10216 SuperImplementsProtocol = true; 10217 break; 10218 } 10219 if (!SuperImplementsProtocol) 10220 return false; 10221 } 10222 } 10223 10224 // If the LHS is specialized, we may need to check type arguments. 10225 if (LHS->isSpecialized()) { 10226 // Follow the superclass chain until we've matched the LHS class in the 10227 // hierarchy. This substitutes type arguments through. 10228 const ObjCObjectType *RHSSuper = RHS; 10229 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 10230 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 10231 10232 // If the RHS is specializd, compare type arguments. 10233 if (RHSSuper->isSpecialized() && 10234 !sameObjCTypeArgs(*this, LHS->getInterface(), 10235 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 10236 /*stripKindOf=*/true)) { 10237 return false; 10238 } 10239 } 10240 10241 return true; 10242 } 10243 10244 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 10245 // get the "pointed to" types 10246 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 10247 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 10248 10249 if (!LHSOPT || !RHSOPT) 10250 return false; 10251 10252 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 10253 canAssignObjCInterfaces(RHSOPT, LHSOPT); 10254 } 10255 10256 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 10257 return canAssignObjCInterfaces( 10258 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 10259 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 10260 } 10261 10262 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 10263 /// both shall have the identically qualified version of a compatible type. 10264 /// C99 6.2.7p1: Two types have compatible types if their types are the 10265 /// same. See 6.7.[2,3,5] for additional rules. 10266 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 10267 bool CompareUnqualified) { 10268 if (getLangOpts().CPlusPlus) 10269 return hasSameType(LHS, RHS); 10270 10271 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 10272 } 10273 10274 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 10275 return typesAreCompatible(LHS, RHS); 10276 } 10277 10278 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 10279 return !mergeTypes(LHS, RHS, true).isNull(); 10280 } 10281 10282 /// mergeTransparentUnionType - if T is a transparent union type and a member 10283 /// of T is compatible with SubType, return the merged type, else return 10284 /// QualType() 10285 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 10286 bool OfBlockPointer, 10287 bool Unqualified) { 10288 if (const RecordType *UT = T->getAsUnionType()) { 10289 RecordDecl *UD = UT->getDecl(); 10290 if (UD->hasAttr<TransparentUnionAttr>()) { 10291 for (const auto *I : UD->fields()) { 10292 QualType ET = I->getType().getUnqualifiedType(); 10293 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 10294 if (!MT.isNull()) 10295 return MT; 10296 } 10297 } 10298 } 10299 10300 return {}; 10301 } 10302 10303 /// mergeFunctionParameterTypes - merge two types which appear as function 10304 /// parameter types 10305 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 10306 bool OfBlockPointer, 10307 bool Unqualified) { 10308 // GNU extension: two types are compatible if they appear as a function 10309 // argument, one of the types is a transparent union type and the other 10310 // type is compatible with a union member 10311 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 10312 Unqualified); 10313 if (!lmerge.isNull()) 10314 return lmerge; 10315 10316 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 10317 Unqualified); 10318 if (!rmerge.isNull()) 10319 return rmerge; 10320 10321 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 10322 } 10323 10324 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 10325 bool OfBlockPointer, bool Unqualified, 10326 bool AllowCXX, 10327 bool IsConditionalOperator) { 10328 const auto *lbase = lhs->castAs<FunctionType>(); 10329 const auto *rbase = rhs->castAs<FunctionType>(); 10330 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 10331 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 10332 bool allLTypes = true; 10333 bool allRTypes = true; 10334 10335 // Check return type 10336 QualType retType; 10337 if (OfBlockPointer) { 10338 QualType RHS = rbase->getReturnType(); 10339 QualType LHS = lbase->getReturnType(); 10340 bool UnqualifiedResult = Unqualified; 10341 if (!UnqualifiedResult) 10342 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 10343 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 10344 } 10345 else 10346 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 10347 Unqualified); 10348 if (retType.isNull()) 10349 return {}; 10350 10351 if (Unqualified) 10352 retType = retType.getUnqualifiedType(); 10353 10354 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 10355 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 10356 if (Unqualified) { 10357 LRetType = LRetType.getUnqualifiedType(); 10358 RRetType = RRetType.getUnqualifiedType(); 10359 } 10360 10361 if (getCanonicalType(retType) != LRetType) 10362 allLTypes = false; 10363 if (getCanonicalType(retType) != RRetType) 10364 allRTypes = false; 10365 10366 // FIXME: double check this 10367 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 10368 // rbase->getRegParmAttr() != 0 && 10369 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 10370 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 10371 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 10372 10373 // Compatible functions must have compatible calling conventions 10374 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 10375 return {}; 10376 10377 // Regparm is part of the calling convention. 10378 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 10379 return {}; 10380 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 10381 return {}; 10382 10383 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 10384 return {}; 10385 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 10386 return {}; 10387 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 10388 return {}; 10389 10390 // When merging declarations, it's common for supplemental information like 10391 // attributes to only be present in one of the declarations, and we generally 10392 // want type merging to preserve the union of information. So a merged 10393 // function type should be noreturn if it was noreturn in *either* operand 10394 // type. 10395 // 10396 // But for the conditional operator, this is backwards. The result of the 10397 // operator could be either operand, and its type should conservatively 10398 // reflect that. So a function type in a composite type is noreturn only 10399 // if it's noreturn in *both* operand types. 10400 // 10401 // Arguably, noreturn is a kind of subtype, and the conditional operator 10402 // ought to produce the most specific common supertype of its operand types. 10403 // That would differ from this rule in contravariant positions. However, 10404 // neither C nor C++ generally uses this kind of subtype reasoning. Also, 10405 // as a practical matter, it would only affect C code that does abstraction of 10406 // higher-order functions (taking noreturn callbacks!), which is uncommon to 10407 // say the least. So we use the simpler rule. 10408 bool NoReturn = IsConditionalOperator 10409 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn() 10410 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 10411 if (lbaseInfo.getNoReturn() != NoReturn) 10412 allLTypes = false; 10413 if (rbaseInfo.getNoReturn() != NoReturn) 10414 allRTypes = false; 10415 10416 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 10417 10418 if (lproto && rproto) { // two C99 style function prototypes 10419 assert((AllowCXX || 10420 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 10421 "C++ shouldn't be here"); 10422 // Compatible functions must have the same number of parameters 10423 if (lproto->getNumParams() != rproto->getNumParams()) 10424 return {}; 10425 10426 // Variadic and non-variadic functions aren't compatible 10427 if (lproto->isVariadic() != rproto->isVariadic()) 10428 return {}; 10429 10430 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 10431 return {}; 10432 10433 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 10434 bool canUseLeft, canUseRight; 10435 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 10436 newParamInfos)) 10437 return {}; 10438 10439 if (!canUseLeft) 10440 allLTypes = false; 10441 if (!canUseRight) 10442 allRTypes = false; 10443 10444 // Check parameter type compatibility 10445 SmallVector<QualType, 10> types; 10446 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 10447 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 10448 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 10449 QualType paramType = mergeFunctionParameterTypes( 10450 lParamType, rParamType, OfBlockPointer, Unqualified); 10451 if (paramType.isNull()) 10452 return {}; 10453 10454 if (Unqualified) 10455 paramType = paramType.getUnqualifiedType(); 10456 10457 types.push_back(paramType); 10458 if (Unqualified) { 10459 lParamType = lParamType.getUnqualifiedType(); 10460 rParamType = rParamType.getUnqualifiedType(); 10461 } 10462 10463 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 10464 allLTypes = false; 10465 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 10466 allRTypes = false; 10467 } 10468 10469 if (allLTypes) return lhs; 10470 if (allRTypes) return rhs; 10471 10472 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 10473 EPI.ExtInfo = einfo; 10474 EPI.ExtParameterInfos = 10475 newParamInfos.empty() ? nullptr : newParamInfos.data(); 10476 return getFunctionType(retType, types, EPI); 10477 } 10478 10479 if (lproto) allRTypes = false; 10480 if (rproto) allLTypes = false; 10481 10482 const FunctionProtoType *proto = lproto ? lproto : rproto; 10483 if (proto) { 10484 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 10485 if (proto->isVariadic()) 10486 return {}; 10487 // Check that the types are compatible with the types that 10488 // would result from default argument promotions (C99 6.7.5.3p15). 10489 // The only types actually affected are promotable integer 10490 // types and floats, which would be passed as a different 10491 // type depending on whether the prototype is visible. 10492 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 10493 QualType paramTy = proto->getParamType(i); 10494 10495 // Look at the converted type of enum types, since that is the type used 10496 // to pass enum values. 10497 if (const auto *Enum = paramTy->getAs<EnumType>()) { 10498 paramTy = Enum->getDecl()->getIntegerType(); 10499 if (paramTy.isNull()) 10500 return {}; 10501 } 10502 10503 if (isPromotableIntegerType(paramTy) || 10504 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 10505 return {}; 10506 } 10507 10508 if (allLTypes) return lhs; 10509 if (allRTypes) return rhs; 10510 10511 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 10512 EPI.ExtInfo = einfo; 10513 return getFunctionType(retType, proto->getParamTypes(), EPI); 10514 } 10515 10516 if (allLTypes) return lhs; 10517 if (allRTypes) return rhs; 10518 return getFunctionNoProtoType(retType, einfo); 10519 } 10520 10521 /// Given that we have an enum type and a non-enum type, try to merge them. 10522 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 10523 QualType other, bool isBlockReturnType) { 10524 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 10525 // a signed integer type, or an unsigned integer type. 10526 // Compatibility is based on the underlying type, not the promotion 10527 // type. 10528 QualType underlyingType = ET->getDecl()->getIntegerType(); 10529 if (underlyingType.isNull()) 10530 return {}; 10531 if (Context.hasSameType(underlyingType, other)) 10532 return other; 10533 10534 // In block return types, we're more permissive and accept any 10535 // integral type of the same size. 10536 if (isBlockReturnType && other->isIntegerType() && 10537 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 10538 return other; 10539 10540 return {}; 10541 } 10542 10543 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer, 10544 bool Unqualified, bool BlockReturnType, 10545 bool IsConditionalOperator) { 10546 // For C++ we will not reach this code with reference types (see below), 10547 // for OpenMP variant call overloading we might. 10548 // 10549 // C++ [expr]: If an expression initially has the type "reference to T", the 10550 // type is adjusted to "T" prior to any further analysis, the expression 10551 // designates the object or function denoted by the reference, and the 10552 // expression is an lvalue unless the reference is an rvalue reference and 10553 // the expression is a function call (possibly inside parentheses). 10554 auto *LHSRefTy = LHS->getAs<ReferenceType>(); 10555 auto *RHSRefTy = RHS->getAs<ReferenceType>(); 10556 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy && 10557 LHS->getTypeClass() == RHS->getTypeClass()) 10558 return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(), 10559 OfBlockPointer, Unqualified, BlockReturnType); 10560 if (LHSRefTy || RHSRefTy) 10561 return {}; 10562 10563 if (Unqualified) { 10564 LHS = LHS.getUnqualifiedType(); 10565 RHS = RHS.getUnqualifiedType(); 10566 } 10567 10568 QualType LHSCan = getCanonicalType(LHS), 10569 RHSCan = getCanonicalType(RHS); 10570 10571 // If two types are identical, they are compatible. 10572 if (LHSCan == RHSCan) 10573 return LHS; 10574 10575 // If the qualifiers are different, the types aren't compatible... mostly. 10576 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10577 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10578 if (LQuals != RQuals) { 10579 // If any of these qualifiers are different, we have a type 10580 // mismatch. 10581 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10582 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 10583 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 10584 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 10585 return {}; 10586 10587 // Exactly one GC qualifier difference is allowed: __strong is 10588 // okay if the other type has no GC qualifier but is an Objective 10589 // C object pointer (i.e. implicitly strong by default). We fix 10590 // this by pretending that the unqualified type was actually 10591 // qualified __strong. 10592 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10593 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10594 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10595 10596 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10597 return {}; 10598 10599 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 10600 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 10601 } 10602 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 10603 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 10604 } 10605 return {}; 10606 } 10607 10608 // Okay, qualifiers are equal. 10609 10610 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 10611 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 10612 10613 // We want to consider the two function types to be the same for these 10614 // comparisons, just force one to the other. 10615 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 10616 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 10617 10618 // Same as above for arrays 10619 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 10620 LHSClass = Type::ConstantArray; 10621 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 10622 RHSClass = Type::ConstantArray; 10623 10624 // ObjCInterfaces are just specialized ObjCObjects. 10625 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 10626 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 10627 10628 // Canonicalize ExtVector -> Vector. 10629 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 10630 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 10631 10632 // If the canonical type classes don't match. 10633 if (LHSClass != RHSClass) { 10634 // Note that we only have special rules for turning block enum 10635 // returns into block int returns, not vice-versa. 10636 if (const auto *ETy = LHS->getAs<EnumType>()) { 10637 return mergeEnumWithInteger(*this, ETy, RHS, false); 10638 } 10639 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 10640 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 10641 } 10642 // allow block pointer type to match an 'id' type. 10643 if (OfBlockPointer && !BlockReturnType) { 10644 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 10645 return LHS; 10646 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 10647 return RHS; 10648 } 10649 // Allow __auto_type to match anything; it merges to the type with more 10650 // information. 10651 if (const auto *AT = LHS->getAs<AutoType>()) { 10652 if (!AT->isDeduced() && AT->isGNUAutoType()) 10653 return RHS; 10654 } 10655 if (const auto *AT = RHS->getAs<AutoType>()) { 10656 if (!AT->isDeduced() && AT->isGNUAutoType()) 10657 return LHS; 10658 } 10659 return {}; 10660 } 10661 10662 // The canonical type classes match. 10663 switch (LHSClass) { 10664 #define TYPE(Class, Base) 10665 #define ABSTRACT_TYPE(Class, Base) 10666 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 10667 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 10668 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 10669 #include "clang/AST/TypeNodes.inc" 10670 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 10671 10672 case Type::Auto: 10673 case Type::DeducedTemplateSpecialization: 10674 case Type::LValueReference: 10675 case Type::RValueReference: 10676 case Type::MemberPointer: 10677 llvm_unreachable("C++ should never be in mergeTypes"); 10678 10679 case Type::ObjCInterface: 10680 case Type::IncompleteArray: 10681 case Type::VariableArray: 10682 case Type::FunctionProto: 10683 case Type::ExtVector: 10684 llvm_unreachable("Types are eliminated above"); 10685 10686 case Type::Pointer: 10687 { 10688 // Merge two pointer types, while trying to preserve typedef info 10689 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 10690 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 10691 if (Unqualified) { 10692 LHSPointee = LHSPointee.getUnqualifiedType(); 10693 RHSPointee = RHSPointee.getUnqualifiedType(); 10694 } 10695 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 10696 Unqualified); 10697 if (ResultType.isNull()) 10698 return {}; 10699 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10700 return LHS; 10701 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10702 return RHS; 10703 return getPointerType(ResultType); 10704 } 10705 case Type::BlockPointer: 10706 { 10707 // Merge two block pointer types, while trying to preserve typedef info 10708 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 10709 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 10710 if (Unqualified) { 10711 LHSPointee = LHSPointee.getUnqualifiedType(); 10712 RHSPointee = RHSPointee.getUnqualifiedType(); 10713 } 10714 if (getLangOpts().OpenCL) { 10715 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 10716 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 10717 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 10718 // 6.12.5) thus the following check is asymmetric. 10719 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 10720 return {}; 10721 LHSPteeQual.removeAddressSpace(); 10722 RHSPteeQual.removeAddressSpace(); 10723 LHSPointee = 10724 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 10725 RHSPointee = 10726 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 10727 } 10728 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 10729 Unqualified); 10730 if (ResultType.isNull()) 10731 return {}; 10732 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10733 return LHS; 10734 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10735 return RHS; 10736 return getBlockPointerType(ResultType); 10737 } 10738 case Type::Atomic: 10739 { 10740 // Merge two pointer types, while trying to preserve typedef info 10741 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 10742 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 10743 if (Unqualified) { 10744 LHSValue = LHSValue.getUnqualifiedType(); 10745 RHSValue = RHSValue.getUnqualifiedType(); 10746 } 10747 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 10748 Unqualified); 10749 if (ResultType.isNull()) 10750 return {}; 10751 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 10752 return LHS; 10753 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 10754 return RHS; 10755 return getAtomicType(ResultType); 10756 } 10757 case Type::ConstantArray: 10758 { 10759 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 10760 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 10761 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 10762 return {}; 10763 10764 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 10765 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 10766 if (Unqualified) { 10767 LHSElem = LHSElem.getUnqualifiedType(); 10768 RHSElem = RHSElem.getUnqualifiedType(); 10769 } 10770 10771 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 10772 if (ResultType.isNull()) 10773 return {}; 10774 10775 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 10776 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 10777 10778 // If either side is a variable array, and both are complete, check whether 10779 // the current dimension is definite. 10780 if (LVAT || RVAT) { 10781 auto SizeFetch = [this](const VariableArrayType* VAT, 10782 const ConstantArrayType* CAT) 10783 -> std::pair<bool,llvm::APInt> { 10784 if (VAT) { 10785 std::optional<llvm::APSInt> TheInt; 10786 Expr *E = VAT->getSizeExpr(); 10787 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 10788 return std::make_pair(true, *TheInt); 10789 return std::make_pair(false, llvm::APSInt()); 10790 } 10791 if (CAT) 10792 return std::make_pair(true, CAT->getSize()); 10793 return std::make_pair(false, llvm::APInt()); 10794 }; 10795 10796 bool HaveLSize, HaveRSize; 10797 llvm::APInt LSize, RSize; 10798 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 10799 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 10800 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 10801 return {}; // Definite, but unequal, array dimension 10802 } 10803 10804 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10805 return LHS; 10806 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10807 return RHS; 10808 if (LCAT) 10809 return getConstantArrayType(ResultType, LCAT->getSize(), 10810 LCAT->getSizeExpr(), 10811 ArrayType::ArraySizeModifier(), 0); 10812 if (RCAT) 10813 return getConstantArrayType(ResultType, RCAT->getSize(), 10814 RCAT->getSizeExpr(), 10815 ArrayType::ArraySizeModifier(), 0); 10816 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10817 return LHS; 10818 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10819 return RHS; 10820 if (LVAT) { 10821 // FIXME: This isn't correct! But tricky to implement because 10822 // the array's size has to be the size of LHS, but the type 10823 // has to be different. 10824 return LHS; 10825 } 10826 if (RVAT) { 10827 // FIXME: This isn't correct! But tricky to implement because 10828 // the array's size has to be the size of RHS, but the type 10829 // has to be different. 10830 return RHS; 10831 } 10832 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 10833 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 10834 return getIncompleteArrayType(ResultType, 10835 ArrayType::ArraySizeModifier(), 0); 10836 } 10837 case Type::FunctionNoProto: 10838 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified, 10839 /*AllowCXX=*/false, IsConditionalOperator); 10840 case Type::Record: 10841 case Type::Enum: 10842 return {}; 10843 case Type::Builtin: 10844 // Only exactly equal builtin types are compatible, which is tested above. 10845 return {}; 10846 case Type::Complex: 10847 // Distinct complex types are incompatible. 10848 return {}; 10849 case Type::Vector: 10850 // FIXME: The merged type should be an ExtVector! 10851 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 10852 RHSCan->castAs<VectorType>())) 10853 return LHS; 10854 return {}; 10855 case Type::ConstantMatrix: 10856 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 10857 RHSCan->castAs<ConstantMatrixType>())) 10858 return LHS; 10859 return {}; 10860 case Type::ObjCObject: { 10861 // Check if the types are assignment compatible. 10862 // FIXME: This should be type compatibility, e.g. whether 10863 // "LHS x; RHS x;" at global scope is legal. 10864 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 10865 RHS->castAs<ObjCObjectType>())) 10866 return LHS; 10867 return {}; 10868 } 10869 case Type::ObjCObjectPointer: 10870 if (OfBlockPointer) { 10871 if (canAssignObjCInterfacesInBlockPointer( 10872 LHS->castAs<ObjCObjectPointerType>(), 10873 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 10874 return LHS; 10875 return {}; 10876 } 10877 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 10878 RHS->castAs<ObjCObjectPointerType>())) 10879 return LHS; 10880 return {}; 10881 case Type::Pipe: 10882 assert(LHS != RHS && 10883 "Equivalent pipe types should have already been handled!"); 10884 return {}; 10885 case Type::BitInt: { 10886 // Merge two bit-precise int types, while trying to preserve typedef info. 10887 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); 10888 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned(); 10889 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits(); 10890 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits(); 10891 10892 // Like unsigned/int, shouldn't have a type if they don't match. 10893 if (LHSUnsigned != RHSUnsigned) 10894 return {}; 10895 10896 if (LHSBits != RHSBits) 10897 return {}; 10898 return LHS; 10899 } 10900 } 10901 10902 llvm_unreachable("Invalid Type::Class!"); 10903 } 10904 10905 bool ASTContext::mergeExtParameterInfo( 10906 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 10907 bool &CanUseFirst, bool &CanUseSecond, 10908 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 10909 assert(NewParamInfos.empty() && "param info list not empty"); 10910 CanUseFirst = CanUseSecond = true; 10911 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 10912 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 10913 10914 // Fast path: if the first type doesn't have ext parameter infos, 10915 // we match if and only if the second type also doesn't have them. 10916 if (!FirstHasInfo && !SecondHasInfo) 10917 return true; 10918 10919 bool NeedParamInfo = false; 10920 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 10921 : SecondFnType->getExtParameterInfos().size(); 10922 10923 for (size_t I = 0; I < E; ++I) { 10924 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 10925 if (FirstHasInfo) 10926 FirstParam = FirstFnType->getExtParameterInfo(I); 10927 if (SecondHasInfo) 10928 SecondParam = SecondFnType->getExtParameterInfo(I); 10929 10930 // Cannot merge unless everything except the noescape flag matches. 10931 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 10932 return false; 10933 10934 bool FirstNoEscape = FirstParam.isNoEscape(); 10935 bool SecondNoEscape = SecondParam.isNoEscape(); 10936 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 10937 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 10938 if (NewParamInfos.back().getOpaqueValue()) 10939 NeedParamInfo = true; 10940 if (FirstNoEscape != IsNoEscape) 10941 CanUseFirst = false; 10942 if (SecondNoEscape != IsNoEscape) 10943 CanUseSecond = false; 10944 } 10945 10946 if (!NeedParamInfo) 10947 NewParamInfos.clear(); 10948 10949 return true; 10950 } 10951 10952 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 10953 ObjCLayouts[CD] = nullptr; 10954 } 10955 10956 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 10957 /// 'RHS' attributes and returns the merged version; including for function 10958 /// return types. 10959 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 10960 QualType LHSCan = getCanonicalType(LHS), 10961 RHSCan = getCanonicalType(RHS); 10962 // If two types are identical, they are compatible. 10963 if (LHSCan == RHSCan) 10964 return LHS; 10965 if (RHSCan->isFunctionType()) { 10966 if (!LHSCan->isFunctionType()) 10967 return {}; 10968 QualType OldReturnType = 10969 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 10970 QualType NewReturnType = 10971 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 10972 QualType ResReturnType = 10973 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 10974 if (ResReturnType.isNull()) 10975 return {}; 10976 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 10977 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 10978 // In either case, use OldReturnType to build the new function type. 10979 const auto *F = LHS->castAs<FunctionType>(); 10980 if (const auto *FPT = cast<FunctionProtoType>(F)) { 10981 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 10982 EPI.ExtInfo = getFunctionExtInfo(LHS); 10983 QualType ResultType = 10984 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 10985 return ResultType; 10986 } 10987 } 10988 return {}; 10989 } 10990 10991 // If the qualifiers are different, the types can still be merged. 10992 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10993 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10994 if (LQuals != RQuals) { 10995 // If any of these qualifiers are different, we have a type mismatch. 10996 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10997 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 10998 return {}; 10999 11000 // Exactly one GC qualifier difference is allowed: __strong is 11001 // okay if the other type has no GC qualifier but is an Objective 11002 // C object pointer (i.e. implicitly strong by default). We fix 11003 // this by pretending that the unqualified type was actually 11004 // qualified __strong. 11005 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 11006 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 11007 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 11008 11009 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 11010 return {}; 11011 11012 if (GC_L == Qualifiers::Strong) 11013 return LHS; 11014 if (GC_R == Qualifiers::Strong) 11015 return RHS; 11016 return {}; 11017 } 11018 11019 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 11020 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 11021 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 11022 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 11023 if (ResQT == LHSBaseQT) 11024 return LHS; 11025 if (ResQT == RHSBaseQT) 11026 return RHS; 11027 } 11028 return {}; 11029 } 11030 11031 //===----------------------------------------------------------------------===// 11032 // Integer Predicates 11033 //===----------------------------------------------------------------------===// 11034 11035 unsigned ASTContext::getIntWidth(QualType T) const { 11036 if (const auto *ET = T->getAs<EnumType>()) 11037 T = ET->getDecl()->getIntegerType(); 11038 if (T->isBooleanType()) 11039 return 1; 11040 if (const auto *EIT = T->getAs<BitIntType>()) 11041 return EIT->getNumBits(); 11042 // For builtin types, just use the standard type sizing method 11043 return (unsigned)getTypeSize(T); 11044 } 11045 11046 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 11047 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 11048 T->isFixedPointType()) && 11049 "Unexpected type"); 11050 11051 // Turn <4 x signed int> -> <4 x unsigned int> 11052 if (const auto *VTy = T->getAs<VectorType>()) 11053 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 11054 VTy->getNumElements(), VTy->getVectorKind()); 11055 11056 // For _BitInt, return an unsigned _BitInt with same width. 11057 if (const auto *EITy = T->getAs<BitIntType>()) 11058 return getBitIntType(/*Unsigned=*/true, EITy->getNumBits()); 11059 11060 // For enums, get the underlying integer type of the enum, and let the general 11061 // integer type signchanging code handle it. 11062 if (const auto *ETy = T->getAs<EnumType>()) 11063 T = ETy->getDecl()->getIntegerType(); 11064 11065 switch (T->castAs<BuiltinType>()->getKind()) { 11066 case BuiltinType::Char_U: 11067 // Plain `char` is mapped to `unsigned char` even if it's already unsigned 11068 case BuiltinType::Char_S: 11069 case BuiltinType::SChar: 11070 case BuiltinType::Char8: 11071 return UnsignedCharTy; 11072 case BuiltinType::Short: 11073 return UnsignedShortTy; 11074 case BuiltinType::Int: 11075 return UnsignedIntTy; 11076 case BuiltinType::Long: 11077 return UnsignedLongTy; 11078 case BuiltinType::LongLong: 11079 return UnsignedLongLongTy; 11080 case BuiltinType::Int128: 11081 return UnsignedInt128Ty; 11082 // wchar_t is special. It is either signed or not, but when it's signed, 11083 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 11084 // version of its underlying type instead. 11085 case BuiltinType::WChar_S: 11086 return getUnsignedWCharType(); 11087 11088 case BuiltinType::ShortAccum: 11089 return UnsignedShortAccumTy; 11090 case BuiltinType::Accum: 11091 return UnsignedAccumTy; 11092 case BuiltinType::LongAccum: 11093 return UnsignedLongAccumTy; 11094 case BuiltinType::SatShortAccum: 11095 return SatUnsignedShortAccumTy; 11096 case BuiltinType::SatAccum: 11097 return SatUnsignedAccumTy; 11098 case BuiltinType::SatLongAccum: 11099 return SatUnsignedLongAccumTy; 11100 case BuiltinType::ShortFract: 11101 return UnsignedShortFractTy; 11102 case BuiltinType::Fract: 11103 return UnsignedFractTy; 11104 case BuiltinType::LongFract: 11105 return UnsignedLongFractTy; 11106 case BuiltinType::SatShortFract: 11107 return SatUnsignedShortFractTy; 11108 case BuiltinType::SatFract: 11109 return SatUnsignedFractTy; 11110 case BuiltinType::SatLongFract: 11111 return SatUnsignedLongFractTy; 11112 default: 11113 assert((T->hasUnsignedIntegerRepresentation() || 11114 T->isUnsignedFixedPointType()) && 11115 "Unexpected signed integer or fixed point type"); 11116 return T; 11117 } 11118 } 11119 11120 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 11121 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 11122 T->isFixedPointType()) && 11123 "Unexpected type"); 11124 11125 // Turn <4 x unsigned int> -> <4 x signed int> 11126 if (const auto *VTy = T->getAs<VectorType>()) 11127 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 11128 VTy->getNumElements(), VTy->getVectorKind()); 11129 11130 // For _BitInt, return a signed _BitInt with same width. 11131 if (const auto *EITy = T->getAs<BitIntType>()) 11132 return getBitIntType(/*Unsigned=*/false, EITy->getNumBits()); 11133 11134 // For enums, get the underlying integer type of the enum, and let the general 11135 // integer type signchanging code handle it. 11136 if (const auto *ETy = T->getAs<EnumType>()) 11137 T = ETy->getDecl()->getIntegerType(); 11138 11139 switch (T->castAs<BuiltinType>()->getKind()) { 11140 case BuiltinType::Char_S: 11141 // Plain `char` is mapped to `signed char` even if it's already signed 11142 case BuiltinType::Char_U: 11143 case BuiltinType::UChar: 11144 case BuiltinType::Char8: 11145 return SignedCharTy; 11146 case BuiltinType::UShort: 11147 return ShortTy; 11148 case BuiltinType::UInt: 11149 return IntTy; 11150 case BuiltinType::ULong: 11151 return LongTy; 11152 case BuiltinType::ULongLong: 11153 return LongLongTy; 11154 case BuiltinType::UInt128: 11155 return Int128Ty; 11156 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 11157 // there's no matching "signed wchar_t". Therefore we return the signed 11158 // version of its underlying type instead. 11159 case BuiltinType::WChar_U: 11160 return getSignedWCharType(); 11161 11162 case BuiltinType::UShortAccum: 11163 return ShortAccumTy; 11164 case BuiltinType::UAccum: 11165 return AccumTy; 11166 case BuiltinType::ULongAccum: 11167 return LongAccumTy; 11168 case BuiltinType::SatUShortAccum: 11169 return SatShortAccumTy; 11170 case BuiltinType::SatUAccum: 11171 return SatAccumTy; 11172 case BuiltinType::SatULongAccum: 11173 return SatLongAccumTy; 11174 case BuiltinType::UShortFract: 11175 return ShortFractTy; 11176 case BuiltinType::UFract: 11177 return FractTy; 11178 case BuiltinType::ULongFract: 11179 return LongFractTy; 11180 case BuiltinType::SatUShortFract: 11181 return SatShortFractTy; 11182 case BuiltinType::SatUFract: 11183 return SatFractTy; 11184 case BuiltinType::SatULongFract: 11185 return SatLongFractTy; 11186 default: 11187 assert( 11188 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 11189 "Unexpected signed integer or fixed point type"); 11190 return T; 11191 } 11192 } 11193 11194 ASTMutationListener::~ASTMutationListener() = default; 11195 11196 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 11197 QualType ReturnType) {} 11198 11199 //===----------------------------------------------------------------------===// 11200 // Builtin Type Computation 11201 //===----------------------------------------------------------------------===// 11202 11203 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 11204 /// pointer over the consumed characters. This returns the resultant type. If 11205 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 11206 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 11207 /// a vector of "i*". 11208 /// 11209 /// RequiresICE is filled in on return to indicate whether the value is required 11210 /// to be an Integer Constant Expression. 11211 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 11212 ASTContext::GetBuiltinTypeError &Error, 11213 bool &RequiresICE, 11214 bool AllowTypeModifiers) { 11215 // Modifiers. 11216 int HowLong = 0; 11217 bool Signed = false, Unsigned = false; 11218 RequiresICE = false; 11219 11220 // Read the prefixed modifiers first. 11221 bool Done = false; 11222 #ifndef NDEBUG 11223 bool IsSpecial = false; 11224 #endif 11225 while (!Done) { 11226 switch (*Str++) { 11227 default: Done = true; --Str; break; 11228 case 'I': 11229 RequiresICE = true; 11230 break; 11231 case 'S': 11232 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 11233 assert(!Signed && "Can't use 'S' modifier multiple times!"); 11234 Signed = true; 11235 break; 11236 case 'U': 11237 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 11238 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 11239 Unsigned = true; 11240 break; 11241 case 'L': 11242 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 11243 assert(HowLong <= 2 && "Can't have LLLL modifier"); 11244 ++HowLong; 11245 break; 11246 case 'N': 11247 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 11248 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11249 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 11250 #ifndef NDEBUG 11251 IsSpecial = true; 11252 #endif 11253 if (Context.getTargetInfo().getLongWidth() == 32) 11254 ++HowLong; 11255 break; 11256 case 'W': 11257 // This modifier represents int64 type. 11258 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11259 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 11260 #ifndef NDEBUG 11261 IsSpecial = true; 11262 #endif 11263 switch (Context.getTargetInfo().getInt64Type()) { 11264 default: 11265 llvm_unreachable("Unexpected integer type"); 11266 case TargetInfo::SignedLong: 11267 HowLong = 1; 11268 break; 11269 case TargetInfo::SignedLongLong: 11270 HowLong = 2; 11271 break; 11272 } 11273 break; 11274 case 'Z': 11275 // This modifier represents int32 type. 11276 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11277 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 11278 #ifndef NDEBUG 11279 IsSpecial = true; 11280 #endif 11281 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 11282 default: 11283 llvm_unreachable("Unexpected integer type"); 11284 case TargetInfo::SignedInt: 11285 HowLong = 0; 11286 break; 11287 case TargetInfo::SignedLong: 11288 HowLong = 1; 11289 break; 11290 case TargetInfo::SignedLongLong: 11291 HowLong = 2; 11292 break; 11293 } 11294 break; 11295 case 'O': 11296 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11297 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 11298 #ifndef NDEBUG 11299 IsSpecial = true; 11300 #endif 11301 if (Context.getLangOpts().OpenCL) 11302 HowLong = 1; 11303 else 11304 HowLong = 2; 11305 break; 11306 } 11307 } 11308 11309 QualType Type; 11310 11311 // Read the base type. 11312 switch (*Str++) { 11313 default: llvm_unreachable("Unknown builtin type letter!"); 11314 case 'x': 11315 assert(HowLong == 0 && !Signed && !Unsigned && 11316 "Bad modifiers used with 'x'!"); 11317 Type = Context.Float16Ty; 11318 break; 11319 case 'y': 11320 assert(HowLong == 0 && !Signed && !Unsigned && 11321 "Bad modifiers used with 'y'!"); 11322 Type = Context.BFloat16Ty; 11323 break; 11324 case 'v': 11325 assert(HowLong == 0 && !Signed && !Unsigned && 11326 "Bad modifiers used with 'v'!"); 11327 Type = Context.VoidTy; 11328 break; 11329 case 'h': 11330 assert(HowLong == 0 && !Signed && !Unsigned && 11331 "Bad modifiers used with 'h'!"); 11332 Type = Context.HalfTy; 11333 break; 11334 case 'f': 11335 assert(HowLong == 0 && !Signed && !Unsigned && 11336 "Bad modifiers used with 'f'!"); 11337 Type = Context.FloatTy; 11338 break; 11339 case 'd': 11340 assert(HowLong < 3 && !Signed && !Unsigned && 11341 "Bad modifiers used with 'd'!"); 11342 if (HowLong == 1) 11343 Type = Context.LongDoubleTy; 11344 else if (HowLong == 2) 11345 Type = Context.Float128Ty; 11346 else 11347 Type = Context.DoubleTy; 11348 break; 11349 case 's': 11350 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 11351 if (Unsigned) 11352 Type = Context.UnsignedShortTy; 11353 else 11354 Type = Context.ShortTy; 11355 break; 11356 case 'i': 11357 if (HowLong == 3) 11358 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 11359 else if (HowLong == 2) 11360 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 11361 else if (HowLong == 1) 11362 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 11363 else 11364 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 11365 break; 11366 case 'c': 11367 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 11368 if (Signed) 11369 Type = Context.SignedCharTy; 11370 else if (Unsigned) 11371 Type = Context.UnsignedCharTy; 11372 else 11373 Type = Context.CharTy; 11374 break; 11375 case 'b': // boolean 11376 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 11377 Type = Context.BoolTy; 11378 break; 11379 case 'z': // size_t. 11380 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 11381 Type = Context.getSizeType(); 11382 break; 11383 case 'w': // wchar_t. 11384 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 11385 Type = Context.getWideCharType(); 11386 break; 11387 case 'F': 11388 Type = Context.getCFConstantStringType(); 11389 break; 11390 case 'G': 11391 Type = Context.getObjCIdType(); 11392 break; 11393 case 'H': 11394 Type = Context.getObjCSelType(); 11395 break; 11396 case 'M': 11397 Type = Context.getObjCSuperType(); 11398 break; 11399 case 'a': 11400 Type = Context.getBuiltinVaListType(); 11401 assert(!Type.isNull() && "builtin va list type not initialized!"); 11402 break; 11403 case 'A': 11404 // This is a "reference" to a va_list; however, what exactly 11405 // this means depends on how va_list is defined. There are two 11406 // different kinds of va_list: ones passed by value, and ones 11407 // passed by reference. An example of a by-value va_list is 11408 // x86, where va_list is a char*. An example of by-ref va_list 11409 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 11410 // we want this argument to be a char*&; for x86-64, we want 11411 // it to be a __va_list_tag*. 11412 Type = Context.getBuiltinVaListType(); 11413 assert(!Type.isNull() && "builtin va list type not initialized!"); 11414 if (Type->isArrayType()) 11415 Type = Context.getArrayDecayedType(Type); 11416 else 11417 Type = Context.getLValueReferenceType(Type); 11418 break; 11419 case 'q': { 11420 char *End; 11421 unsigned NumElements = strtoul(Str, &End, 10); 11422 assert(End != Str && "Missing vector size"); 11423 Str = End; 11424 11425 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11426 RequiresICE, false); 11427 assert(!RequiresICE && "Can't require vector ICE"); 11428 11429 Type = Context.getScalableVectorType(ElementType, NumElements); 11430 break; 11431 } 11432 case 'Q': { 11433 switch (*Str++) { 11434 case 'a': { 11435 Type = Context.SveCountTy; 11436 break; 11437 } 11438 default: 11439 llvm_unreachable("Unexpected target builtin type"); 11440 } 11441 break; 11442 } 11443 case 'V': { 11444 char *End; 11445 unsigned NumElements = strtoul(Str, &End, 10); 11446 assert(End != Str && "Missing vector size"); 11447 Str = End; 11448 11449 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11450 RequiresICE, false); 11451 assert(!RequiresICE && "Can't require vector ICE"); 11452 11453 // TODO: No way to make AltiVec vectors in builtins yet. 11454 Type = Context.getVectorType(ElementType, NumElements, 11455 VectorType::GenericVector); 11456 break; 11457 } 11458 case 'E': { 11459 char *End; 11460 11461 unsigned NumElements = strtoul(Str, &End, 10); 11462 assert(End != Str && "Missing vector size"); 11463 11464 Str = End; 11465 11466 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11467 false); 11468 Type = Context.getExtVectorType(ElementType, NumElements); 11469 break; 11470 } 11471 case 'X': { 11472 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11473 false); 11474 assert(!RequiresICE && "Can't require complex ICE"); 11475 Type = Context.getComplexType(ElementType); 11476 break; 11477 } 11478 case 'Y': 11479 Type = Context.getPointerDiffType(); 11480 break; 11481 case 'P': 11482 Type = Context.getFILEType(); 11483 if (Type.isNull()) { 11484 Error = ASTContext::GE_Missing_stdio; 11485 return {}; 11486 } 11487 break; 11488 case 'J': 11489 if (Signed) 11490 Type = Context.getsigjmp_bufType(); 11491 else 11492 Type = Context.getjmp_bufType(); 11493 11494 if (Type.isNull()) { 11495 Error = ASTContext::GE_Missing_setjmp; 11496 return {}; 11497 } 11498 break; 11499 case 'K': 11500 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 11501 Type = Context.getucontext_tType(); 11502 11503 if (Type.isNull()) { 11504 Error = ASTContext::GE_Missing_ucontext; 11505 return {}; 11506 } 11507 break; 11508 case 'p': 11509 Type = Context.getProcessIDType(); 11510 break; 11511 } 11512 11513 // If there are modifiers and if we're allowed to parse them, go for it. 11514 Done = !AllowTypeModifiers; 11515 while (!Done) { 11516 switch (char c = *Str++) { 11517 default: Done = true; --Str; break; 11518 case '*': 11519 case '&': { 11520 // Both pointers and references can have their pointee types 11521 // qualified with an address space. 11522 char *End; 11523 unsigned AddrSpace = strtoul(Str, &End, 10); 11524 if (End != Str) { 11525 // Note AddrSpace == 0 is not the same as an unspecified address space. 11526 Type = Context.getAddrSpaceQualType( 11527 Type, 11528 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 11529 Str = End; 11530 } 11531 if (c == '*') 11532 Type = Context.getPointerType(Type); 11533 else 11534 Type = Context.getLValueReferenceType(Type); 11535 break; 11536 } 11537 // FIXME: There's no way to have a built-in with an rvalue ref arg. 11538 case 'C': 11539 Type = Type.withConst(); 11540 break; 11541 case 'D': 11542 Type = Context.getVolatileType(Type); 11543 break; 11544 case 'R': 11545 Type = Type.withRestrict(); 11546 break; 11547 } 11548 } 11549 11550 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 11551 "Integer constant 'I' type must be an integer"); 11552 11553 return Type; 11554 } 11555 11556 // On some targets such as PowerPC, some of the builtins are defined with custom 11557 // type descriptors for target-dependent types. These descriptors are decoded in 11558 // other functions, but it may be useful to be able to fall back to default 11559 // descriptor decoding to define builtins mixing target-dependent and target- 11560 // independent types. This function allows decoding one type descriptor with 11561 // default decoding. 11562 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 11563 GetBuiltinTypeError &Error, bool &RequireICE, 11564 bool AllowTypeModifiers) const { 11565 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 11566 } 11567 11568 /// GetBuiltinType - Return the type for the specified builtin. 11569 QualType ASTContext::GetBuiltinType(unsigned Id, 11570 GetBuiltinTypeError &Error, 11571 unsigned *IntegerConstantArgs) const { 11572 const char *TypeStr = BuiltinInfo.getTypeString(Id); 11573 if (TypeStr[0] == '\0') { 11574 Error = GE_Missing_type; 11575 return {}; 11576 } 11577 11578 SmallVector<QualType, 8> ArgTypes; 11579 11580 bool RequiresICE = false; 11581 Error = GE_None; 11582 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 11583 RequiresICE, true); 11584 if (Error != GE_None) 11585 return {}; 11586 11587 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 11588 11589 while (TypeStr[0] && TypeStr[0] != '.') { 11590 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 11591 if (Error != GE_None) 11592 return {}; 11593 11594 // If this argument is required to be an IntegerConstantExpression and the 11595 // caller cares, fill in the bitmask we return. 11596 if (RequiresICE && IntegerConstantArgs) 11597 *IntegerConstantArgs |= 1 << ArgTypes.size(); 11598 11599 // Do array -> pointer decay. The builtin should use the decayed type. 11600 if (Ty->isArrayType()) 11601 Ty = getArrayDecayedType(Ty); 11602 11603 ArgTypes.push_back(Ty); 11604 } 11605 11606 if (Id == Builtin::BI__GetExceptionInfo) 11607 return {}; 11608 11609 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 11610 "'.' should only occur at end of builtin type list!"); 11611 11612 bool Variadic = (TypeStr[0] == '.'); 11613 11614 FunctionType::ExtInfo EI(getDefaultCallingConvention( 11615 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 11616 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 11617 11618 11619 // We really shouldn't be making a no-proto type here. 11620 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes()) 11621 return getFunctionNoProtoType(ResType, EI); 11622 11623 FunctionProtoType::ExtProtoInfo EPI; 11624 EPI.ExtInfo = EI; 11625 EPI.Variadic = Variadic; 11626 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 11627 EPI.ExceptionSpec.Type = 11628 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 11629 11630 return getFunctionType(ResType, ArgTypes, EPI); 11631 } 11632 11633 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 11634 const FunctionDecl *FD) { 11635 if (!FD->isExternallyVisible()) 11636 return GVA_Internal; 11637 11638 // Non-user-provided functions get emitted as weak definitions with every 11639 // use, no matter whether they've been explicitly instantiated etc. 11640 if (!FD->isUserProvided()) 11641 return GVA_DiscardableODR; 11642 11643 GVALinkage External; 11644 switch (FD->getTemplateSpecializationKind()) { 11645 case TSK_Undeclared: 11646 case TSK_ExplicitSpecialization: 11647 External = GVA_StrongExternal; 11648 break; 11649 11650 case TSK_ExplicitInstantiationDefinition: 11651 return GVA_StrongODR; 11652 11653 // C++11 [temp.explicit]p10: 11654 // [ Note: The intent is that an inline function that is the subject of 11655 // an explicit instantiation declaration will still be implicitly 11656 // instantiated when used so that the body can be considered for 11657 // inlining, but that no out-of-line copy of the inline function would be 11658 // generated in the translation unit. -- end note ] 11659 case TSK_ExplicitInstantiationDeclaration: 11660 return GVA_AvailableExternally; 11661 11662 case TSK_ImplicitInstantiation: 11663 External = GVA_DiscardableODR; 11664 break; 11665 } 11666 11667 if (!FD->isInlined()) 11668 return External; 11669 11670 if ((!Context.getLangOpts().CPlusPlus && 11671 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 11672 !FD->hasAttr<DLLExportAttr>()) || 11673 FD->hasAttr<GNUInlineAttr>()) { 11674 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 11675 11676 // GNU or C99 inline semantics. Determine whether this symbol should be 11677 // externally visible. 11678 if (FD->isInlineDefinitionExternallyVisible()) 11679 return External; 11680 11681 // C99 inline semantics, where the symbol is not externally visible. 11682 return GVA_AvailableExternally; 11683 } 11684 11685 // Functions specified with extern and inline in -fms-compatibility mode 11686 // forcibly get emitted. While the body of the function cannot be later 11687 // replaced, the function definition cannot be discarded. 11688 if (FD->isMSExternInline()) 11689 return GVA_StrongODR; 11690 11691 return GVA_DiscardableODR; 11692 } 11693 11694 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 11695 const Decl *D, GVALinkage L) { 11696 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 11697 // dllexport/dllimport on inline functions. 11698 if (D->hasAttr<DLLImportAttr>()) { 11699 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 11700 return GVA_AvailableExternally; 11701 } else if (D->hasAttr<DLLExportAttr>()) { 11702 if (L == GVA_DiscardableODR) 11703 return GVA_StrongODR; 11704 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 11705 // Device-side functions with __global__ attribute must always be 11706 // visible externally so they can be launched from host. 11707 if (D->hasAttr<CUDAGlobalAttr>() && 11708 (L == GVA_DiscardableODR || L == GVA_Internal)) 11709 return GVA_StrongODR; 11710 // Single source offloading languages like CUDA/HIP need to be able to 11711 // access static device variables from host code of the same compilation 11712 // unit. This is done by externalizing the static variable with a shared 11713 // name between the host and device compilation which is the same for the 11714 // same compilation unit whereas different among different compilation 11715 // units. 11716 if (Context.shouldExternalize(D)) 11717 return GVA_StrongExternal; 11718 } 11719 return L; 11720 } 11721 11722 /// Adjust the GVALinkage for a declaration based on what an external AST source 11723 /// knows about whether there can be other definitions of this declaration. 11724 static GVALinkage 11725 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 11726 GVALinkage L) { 11727 ExternalASTSource *Source = Ctx.getExternalSource(); 11728 if (!Source) 11729 return L; 11730 11731 switch (Source->hasExternalDefinitions(D)) { 11732 case ExternalASTSource::EK_Never: 11733 // Other translation units rely on us to provide the definition. 11734 if (L == GVA_DiscardableODR) 11735 return GVA_StrongODR; 11736 break; 11737 11738 case ExternalASTSource::EK_Always: 11739 return GVA_AvailableExternally; 11740 11741 case ExternalASTSource::EK_ReplyHazy: 11742 break; 11743 } 11744 return L; 11745 } 11746 11747 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 11748 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 11749 adjustGVALinkageForAttributes(*this, FD, 11750 basicGVALinkageForFunction(*this, FD))); 11751 } 11752 11753 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 11754 const VarDecl *VD) { 11755 if (!VD->isExternallyVisible()) 11756 return GVA_Internal; 11757 11758 if (VD->isStaticLocal()) { 11759 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 11760 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 11761 LexicalContext = LexicalContext->getLexicalParent(); 11762 11763 // ObjC Blocks can create local variables that don't have a FunctionDecl 11764 // LexicalContext. 11765 if (!LexicalContext) 11766 return GVA_DiscardableODR; 11767 11768 // Otherwise, let the static local variable inherit its linkage from the 11769 // nearest enclosing function. 11770 auto StaticLocalLinkage = 11771 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 11772 11773 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 11774 // be emitted in any object with references to the symbol for the object it 11775 // contains, whether inline or out-of-line." 11776 // Similar behavior is observed with MSVC. An alternative ABI could use 11777 // StrongODR/AvailableExternally to match the function, but none are 11778 // known/supported currently. 11779 if (StaticLocalLinkage == GVA_StrongODR || 11780 StaticLocalLinkage == GVA_AvailableExternally) 11781 return GVA_DiscardableODR; 11782 return StaticLocalLinkage; 11783 } 11784 11785 // MSVC treats in-class initialized static data members as definitions. 11786 // By giving them non-strong linkage, out-of-line definitions won't 11787 // cause link errors. 11788 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 11789 return GVA_DiscardableODR; 11790 11791 // Most non-template variables have strong linkage; inline variables are 11792 // linkonce_odr or (occasionally, for compatibility) weak_odr. 11793 GVALinkage StrongLinkage; 11794 switch (Context.getInlineVariableDefinitionKind(VD)) { 11795 case ASTContext::InlineVariableDefinitionKind::None: 11796 StrongLinkage = GVA_StrongExternal; 11797 break; 11798 case ASTContext::InlineVariableDefinitionKind::Weak: 11799 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 11800 StrongLinkage = GVA_DiscardableODR; 11801 break; 11802 case ASTContext::InlineVariableDefinitionKind::Strong: 11803 StrongLinkage = GVA_StrongODR; 11804 break; 11805 } 11806 11807 switch (VD->getTemplateSpecializationKind()) { 11808 case TSK_Undeclared: 11809 return StrongLinkage; 11810 11811 case TSK_ExplicitSpecialization: 11812 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 11813 VD->isStaticDataMember() 11814 ? GVA_StrongODR 11815 : StrongLinkage; 11816 11817 case TSK_ExplicitInstantiationDefinition: 11818 return GVA_StrongODR; 11819 11820 case TSK_ExplicitInstantiationDeclaration: 11821 return GVA_AvailableExternally; 11822 11823 case TSK_ImplicitInstantiation: 11824 return GVA_DiscardableODR; 11825 } 11826 11827 llvm_unreachable("Invalid Linkage!"); 11828 } 11829 11830 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const { 11831 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 11832 adjustGVALinkageForAttributes(*this, VD, 11833 basicGVALinkageForVariable(*this, VD))); 11834 } 11835 11836 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 11837 if (const auto *VD = dyn_cast<VarDecl>(D)) { 11838 if (!VD->isFileVarDecl()) 11839 return false; 11840 // Global named register variables (GNU extension) are never emitted. 11841 if (VD->getStorageClass() == SC_Register) 11842 return false; 11843 if (VD->getDescribedVarTemplate() || 11844 isa<VarTemplatePartialSpecializationDecl>(VD)) 11845 return false; 11846 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11847 // We never need to emit an uninstantiated function template. 11848 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11849 return false; 11850 } else if (isa<PragmaCommentDecl>(D)) 11851 return true; 11852 else if (isa<PragmaDetectMismatchDecl>(D)) 11853 return true; 11854 else if (isa<OMPRequiresDecl>(D)) 11855 return true; 11856 else if (isa<OMPThreadPrivateDecl>(D)) 11857 return !D->getDeclContext()->isDependentContext(); 11858 else if (isa<OMPAllocateDecl>(D)) 11859 return !D->getDeclContext()->isDependentContext(); 11860 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 11861 return !D->getDeclContext()->isDependentContext(); 11862 else if (isa<ImportDecl>(D)) 11863 return true; 11864 else 11865 return false; 11866 11867 // If this is a member of a class template, we do not need to emit it. 11868 if (D->getDeclContext()->isDependentContext()) 11869 return false; 11870 11871 // Weak references don't produce any output by themselves. 11872 if (D->hasAttr<WeakRefAttr>()) 11873 return false; 11874 11875 // Aliases and used decls are required. 11876 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 11877 return true; 11878 11879 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11880 // Forward declarations aren't required. 11881 if (!FD->doesThisDeclarationHaveABody()) 11882 return FD->doesDeclarationForceExternallyVisibleDefinition(); 11883 11884 // Constructors and destructors are required. 11885 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 11886 return true; 11887 11888 // The key function for a class is required. This rule only comes 11889 // into play when inline functions can be key functions, though. 11890 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 11891 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 11892 const CXXRecordDecl *RD = MD->getParent(); 11893 if (MD->isOutOfLine() && RD->isDynamicClass()) { 11894 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 11895 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 11896 return true; 11897 } 11898 } 11899 } 11900 11901 GVALinkage Linkage = GetGVALinkageForFunction(FD); 11902 11903 // static, static inline, always_inline, and extern inline functions can 11904 // always be deferred. Normal inline functions can be deferred in C99/C++. 11905 // Implicit template instantiations can also be deferred in C++. 11906 return !isDiscardableGVALinkage(Linkage); 11907 } 11908 11909 const auto *VD = cast<VarDecl>(D); 11910 assert(VD->isFileVarDecl() && "Expected file scoped var"); 11911 11912 // If the decl is marked as `declare target to`, it should be emitted for the 11913 // host and for the device. 11914 if (LangOpts.OpenMP && 11915 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 11916 return true; 11917 11918 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 11919 !isMSStaticDataMemberInlineDefinition(VD)) 11920 return false; 11921 11922 // Variables in other module units shouldn't be forced to be emitted. 11923 if (VD->isInAnotherModuleUnit()) 11924 return false; 11925 11926 // Variables that can be needed in other TUs are required. 11927 auto Linkage = GetGVALinkageForVariable(VD); 11928 if (!isDiscardableGVALinkage(Linkage)) 11929 return true; 11930 11931 // We never need to emit a variable that is available in another TU. 11932 if (Linkage == GVA_AvailableExternally) 11933 return false; 11934 11935 // Variables that have destruction with side-effects are required. 11936 if (VD->needsDestruction(*this)) 11937 return true; 11938 11939 // Variables that have initialization with side-effects are required. 11940 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 11941 // We can get a value-dependent initializer during error recovery. 11942 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 11943 return true; 11944 11945 // Likewise, variables with tuple-like bindings are required if their 11946 // bindings have side-effects. 11947 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 11948 for (const auto *BD : DD->bindings()) 11949 if (const auto *BindingVD = BD->getHoldingVar()) 11950 if (DeclMustBeEmitted(BindingVD)) 11951 return true; 11952 11953 return false; 11954 } 11955 11956 void ASTContext::forEachMultiversionedFunctionVersion( 11957 const FunctionDecl *FD, 11958 llvm::function_ref<void(FunctionDecl *)> Pred) const { 11959 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 11960 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 11961 FD = FD->getMostRecentDecl(); 11962 // FIXME: The order of traversal here matters and depends on the order of 11963 // lookup results, which happens to be (mostly) oldest-to-newest, but we 11964 // shouldn't rely on that. 11965 for (auto *CurDecl : 11966 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 11967 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 11968 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 11969 !SeenDecls.contains(CurFD)) { 11970 SeenDecls.insert(CurFD); 11971 Pred(CurFD); 11972 } 11973 } 11974 } 11975 11976 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 11977 bool IsCXXMethod, 11978 bool IsBuiltin) const { 11979 // Pass through to the C++ ABI object 11980 if (IsCXXMethod) 11981 return ABI->getDefaultMethodCallConv(IsVariadic); 11982 11983 // Builtins ignore user-specified default calling convention and remain the 11984 // Target's default calling convention. 11985 if (!IsBuiltin) { 11986 switch (LangOpts.getDefaultCallingConv()) { 11987 case LangOptions::DCC_None: 11988 break; 11989 case LangOptions::DCC_CDecl: 11990 return CC_C; 11991 case LangOptions::DCC_FastCall: 11992 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 11993 return CC_X86FastCall; 11994 break; 11995 case LangOptions::DCC_StdCall: 11996 if (!IsVariadic) 11997 return CC_X86StdCall; 11998 break; 11999 case LangOptions::DCC_VectorCall: 12000 // __vectorcall cannot be applied to variadic functions. 12001 if (!IsVariadic) 12002 return CC_X86VectorCall; 12003 break; 12004 case LangOptions::DCC_RegCall: 12005 // __regcall cannot be applied to variadic functions. 12006 if (!IsVariadic) 12007 return CC_X86RegCall; 12008 break; 12009 } 12010 } 12011 return Target->getDefaultCallingConv(); 12012 } 12013 12014 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 12015 // Pass through to the C++ ABI object 12016 return ABI->isNearlyEmpty(RD); 12017 } 12018 12019 VTableContextBase *ASTContext::getVTableContext() { 12020 if (!VTContext.get()) { 12021 auto ABI = Target->getCXXABI(); 12022 if (ABI.isMicrosoft()) 12023 VTContext.reset(new MicrosoftVTableContext(*this)); 12024 else { 12025 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 12026 ? ItaniumVTableContext::Relative 12027 : ItaniumVTableContext::Pointer; 12028 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 12029 } 12030 } 12031 return VTContext.get(); 12032 } 12033 12034 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 12035 if (!T) 12036 T = Target; 12037 switch (T->getCXXABI().getKind()) { 12038 case TargetCXXABI::AppleARM64: 12039 case TargetCXXABI::Fuchsia: 12040 case TargetCXXABI::GenericAArch64: 12041 case TargetCXXABI::GenericItanium: 12042 case TargetCXXABI::GenericARM: 12043 case TargetCXXABI::GenericMIPS: 12044 case TargetCXXABI::iOS: 12045 case TargetCXXABI::WebAssembly: 12046 case TargetCXXABI::WatchOS: 12047 case TargetCXXABI::XL: 12048 return ItaniumMangleContext::create(*this, getDiagnostics()); 12049 case TargetCXXABI::Microsoft: 12050 return MicrosoftMangleContext::create(*this, getDiagnostics()); 12051 } 12052 llvm_unreachable("Unsupported ABI"); 12053 } 12054 12055 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 12056 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 12057 "Device mangle context does not support Microsoft mangling."); 12058 switch (T.getCXXABI().getKind()) { 12059 case TargetCXXABI::AppleARM64: 12060 case TargetCXXABI::Fuchsia: 12061 case TargetCXXABI::GenericAArch64: 12062 case TargetCXXABI::GenericItanium: 12063 case TargetCXXABI::GenericARM: 12064 case TargetCXXABI::GenericMIPS: 12065 case TargetCXXABI::iOS: 12066 case TargetCXXABI::WebAssembly: 12067 case TargetCXXABI::WatchOS: 12068 case TargetCXXABI::XL: 12069 return ItaniumMangleContext::create( 12070 *this, getDiagnostics(), 12071 [](ASTContext &, const NamedDecl *ND) -> std::optional<unsigned> { 12072 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 12073 return RD->getDeviceLambdaManglingNumber(); 12074 return std::nullopt; 12075 }, 12076 /*IsAux=*/true); 12077 case TargetCXXABI::Microsoft: 12078 return MicrosoftMangleContext::create(*this, getDiagnostics(), 12079 /*IsAux=*/true); 12080 } 12081 llvm_unreachable("Unsupported ABI"); 12082 } 12083 12084 CXXABI::~CXXABI() = default; 12085 12086 size_t ASTContext::getSideTableAllocatedMemory() const { 12087 return ASTRecordLayouts.getMemorySize() + 12088 llvm::capacity_in_bytes(ObjCLayouts) + 12089 llvm::capacity_in_bytes(KeyFunctions) + 12090 llvm::capacity_in_bytes(ObjCImpls) + 12091 llvm::capacity_in_bytes(BlockVarCopyInits) + 12092 llvm::capacity_in_bytes(DeclAttrs) + 12093 llvm::capacity_in_bytes(TemplateOrInstantiation) + 12094 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 12095 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 12096 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 12097 llvm::capacity_in_bytes(OverriddenMethods) + 12098 llvm::capacity_in_bytes(Types) + 12099 llvm::capacity_in_bytes(VariableArrayTypes); 12100 } 12101 12102 /// getIntTypeForBitwidth - 12103 /// sets integer QualTy according to specified details: 12104 /// bitwidth, signed/unsigned. 12105 /// Returns empty type if there is no appropriate target types. 12106 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 12107 unsigned Signed) const { 12108 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 12109 CanQualType QualTy = getFromTargetType(Ty); 12110 if (!QualTy && DestWidth == 128) 12111 return Signed ? Int128Ty : UnsignedInt128Ty; 12112 return QualTy; 12113 } 12114 12115 /// getRealTypeForBitwidth - 12116 /// sets floating point QualTy according to specified bitwidth. 12117 /// Returns empty type if there is no appropriate target types. 12118 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 12119 FloatModeKind ExplicitType) const { 12120 FloatModeKind Ty = 12121 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); 12122 switch (Ty) { 12123 case FloatModeKind::Half: 12124 return HalfTy; 12125 case FloatModeKind::Float: 12126 return FloatTy; 12127 case FloatModeKind::Double: 12128 return DoubleTy; 12129 case FloatModeKind::LongDouble: 12130 return LongDoubleTy; 12131 case FloatModeKind::Float128: 12132 return Float128Ty; 12133 case FloatModeKind::Ibm128: 12134 return Ibm128Ty; 12135 case FloatModeKind::NoFloat: 12136 return {}; 12137 } 12138 12139 llvm_unreachable("Unhandled TargetInfo::RealType value"); 12140 } 12141 12142 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 12143 if (Number > 1) 12144 MangleNumbers[ND] = Number; 12145 } 12146 12147 unsigned ASTContext::getManglingNumber(const NamedDecl *ND, 12148 bool ForAuxTarget) const { 12149 auto I = MangleNumbers.find(ND); 12150 unsigned Res = I != MangleNumbers.end() ? I->second : 1; 12151 // CUDA/HIP host compilation encodes host and device mangling numbers 12152 // as lower and upper half of 32 bit integer. 12153 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) { 12154 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF; 12155 } else { 12156 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling " 12157 "number for aux target"); 12158 } 12159 return Res > 1 ? Res : 1; 12160 } 12161 12162 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 12163 if (Number > 1) 12164 StaticLocalNumbers[VD] = Number; 12165 } 12166 12167 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 12168 auto I = StaticLocalNumbers.find(VD); 12169 return I != StaticLocalNumbers.end() ? I->second : 1; 12170 } 12171 12172 MangleNumberingContext & 12173 ASTContext::getManglingNumberContext(const DeclContext *DC) { 12174 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12175 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 12176 if (!MCtx) 12177 MCtx = createMangleNumberingContext(); 12178 return *MCtx; 12179 } 12180 12181 MangleNumberingContext & 12182 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 12183 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12184 std::unique_ptr<MangleNumberingContext> &MCtx = 12185 ExtraMangleNumberingContexts[D]; 12186 if (!MCtx) 12187 MCtx = createMangleNumberingContext(); 12188 return *MCtx; 12189 } 12190 12191 std::unique_ptr<MangleNumberingContext> 12192 ASTContext::createMangleNumberingContext() const { 12193 return ABI->createMangleNumberingContext(); 12194 } 12195 12196 const CXXConstructorDecl * 12197 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 12198 return ABI->getCopyConstructorForExceptionObject( 12199 cast<CXXRecordDecl>(RD->getFirstDecl())); 12200 } 12201 12202 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 12203 CXXConstructorDecl *CD) { 12204 return ABI->addCopyConstructorForExceptionObject( 12205 cast<CXXRecordDecl>(RD->getFirstDecl()), 12206 cast<CXXConstructorDecl>(CD->getFirstDecl())); 12207 } 12208 12209 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 12210 TypedefNameDecl *DD) { 12211 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 12212 } 12213 12214 TypedefNameDecl * 12215 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 12216 return ABI->getTypedefNameForUnnamedTagDecl(TD); 12217 } 12218 12219 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 12220 DeclaratorDecl *DD) { 12221 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 12222 } 12223 12224 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 12225 return ABI->getDeclaratorForUnnamedTagDecl(TD); 12226 } 12227 12228 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 12229 ParamIndices[D] = index; 12230 } 12231 12232 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 12233 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 12234 assert(I != ParamIndices.end() && 12235 "ParmIndices lacks entry set by ParmVarDecl"); 12236 return I->second; 12237 } 12238 12239 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 12240 unsigned Length) const { 12241 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 12242 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 12243 EltTy = EltTy.withConst(); 12244 12245 EltTy = adjustStringLiteralBaseType(EltTy); 12246 12247 // Get an array type for the string, according to C99 6.4.5. This includes 12248 // the null terminator character. 12249 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 12250 ArrayType::Normal, /*IndexTypeQuals*/ 0); 12251 } 12252 12253 StringLiteral * 12254 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 12255 StringLiteral *&Result = StringLiteralCache[Key]; 12256 if (!Result) 12257 Result = StringLiteral::Create( 12258 *this, Key, StringLiteral::Ordinary, 12259 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 12260 SourceLocation()); 12261 return Result; 12262 } 12263 12264 MSGuidDecl * 12265 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 12266 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 12267 12268 llvm::FoldingSetNodeID ID; 12269 MSGuidDecl::Profile(ID, Parts); 12270 12271 void *InsertPos; 12272 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 12273 return Existing; 12274 12275 QualType GUIDType = getMSGuidType().withConst(); 12276 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 12277 MSGuidDecls.InsertNode(New, InsertPos); 12278 return New; 12279 } 12280 12281 UnnamedGlobalConstantDecl * 12282 ASTContext::getUnnamedGlobalConstantDecl(QualType Ty, 12283 const APValue &APVal) const { 12284 llvm::FoldingSetNodeID ID; 12285 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal); 12286 12287 void *InsertPos; 12288 if (UnnamedGlobalConstantDecl *Existing = 12289 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos)) 12290 return Existing; 12291 12292 UnnamedGlobalConstantDecl *New = 12293 UnnamedGlobalConstantDecl::Create(*this, Ty, APVal); 12294 UnnamedGlobalConstantDecls.InsertNode(New, InsertPos); 12295 return New; 12296 } 12297 12298 TemplateParamObjectDecl * 12299 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 12300 assert(T->isRecordType() && "template param object of unexpected type"); 12301 12302 // C++ [temp.param]p8: 12303 // [...] a static storage duration object of type 'const T' [...] 12304 T.addConst(); 12305 12306 llvm::FoldingSetNodeID ID; 12307 TemplateParamObjectDecl::Profile(ID, T, V); 12308 12309 void *InsertPos; 12310 if (TemplateParamObjectDecl *Existing = 12311 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 12312 return Existing; 12313 12314 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 12315 TemplateParamObjectDecls.InsertNode(New, InsertPos); 12316 return New; 12317 } 12318 12319 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 12320 const llvm::Triple &T = getTargetInfo().getTriple(); 12321 if (!T.isOSDarwin()) 12322 return false; 12323 12324 if (!(T.isiOS() && T.isOSVersionLT(7)) && 12325 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 12326 return false; 12327 12328 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 12329 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 12330 uint64_t Size = sizeChars.getQuantity(); 12331 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 12332 unsigned Align = alignChars.getQuantity(); 12333 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 12334 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 12335 } 12336 12337 bool 12338 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 12339 const ObjCMethodDecl *MethodImpl) { 12340 // No point trying to match an unavailable/deprecated mothod. 12341 if (MethodDecl->hasAttr<UnavailableAttr>() 12342 || MethodDecl->hasAttr<DeprecatedAttr>()) 12343 return false; 12344 if (MethodDecl->getObjCDeclQualifier() != 12345 MethodImpl->getObjCDeclQualifier()) 12346 return false; 12347 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 12348 return false; 12349 12350 if (MethodDecl->param_size() != MethodImpl->param_size()) 12351 return false; 12352 12353 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 12354 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 12355 EF = MethodDecl->param_end(); 12356 IM != EM && IF != EF; ++IM, ++IF) { 12357 const ParmVarDecl *DeclVar = (*IF); 12358 const ParmVarDecl *ImplVar = (*IM); 12359 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 12360 return false; 12361 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 12362 return false; 12363 } 12364 12365 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 12366 } 12367 12368 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 12369 LangAS AS; 12370 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 12371 AS = LangAS::Default; 12372 else 12373 AS = QT->getPointeeType().getAddressSpace(); 12374 12375 return getTargetInfo().getNullPointerValue(AS); 12376 } 12377 12378 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 12379 return getTargetInfo().getTargetAddressSpace(AS); 12380 } 12381 12382 bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const { 12383 if (X == Y) 12384 return true; 12385 if (!X || !Y) 12386 return false; 12387 llvm::FoldingSetNodeID IDX, IDY; 12388 X->Profile(IDX, *this, /*Canonical=*/true); 12389 Y->Profile(IDY, *this, /*Canonical=*/true); 12390 return IDX == IDY; 12391 } 12392 12393 // The getCommon* helpers return, for given 'same' X and Y entities given as 12394 // inputs, another entity which is also the 'same' as the inputs, but which 12395 // is closer to the canonical form of the inputs, each according to a given 12396 // criteria. 12397 // The getCommon*Checked variants are 'null inputs not-allowed' equivalents of 12398 // the regular ones. 12399 12400 static Decl *getCommonDecl(Decl *X, Decl *Y) { 12401 if (!declaresSameEntity(X, Y)) 12402 return nullptr; 12403 for (const Decl *DX : X->redecls()) { 12404 // If we reach Y before reaching the first decl, that means X is older. 12405 if (DX == Y) 12406 return X; 12407 // If we reach the first decl, then Y is older. 12408 if (DX->isFirstDecl()) 12409 return Y; 12410 } 12411 llvm_unreachable("Corrupt redecls chain"); 12412 } 12413 12414 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12415 static T *getCommonDecl(T *X, T *Y) { 12416 return cast_or_null<T>( 12417 getCommonDecl(const_cast<Decl *>(cast_or_null<Decl>(X)), 12418 const_cast<Decl *>(cast_or_null<Decl>(Y)))); 12419 } 12420 12421 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12422 static T *getCommonDeclChecked(T *X, T *Y) { 12423 return cast<T>(getCommonDecl(const_cast<Decl *>(cast<Decl>(X)), 12424 const_cast<Decl *>(cast<Decl>(Y)))); 12425 } 12426 12427 static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X, 12428 TemplateName Y) { 12429 if (X.getAsVoidPointer() == Y.getAsVoidPointer()) 12430 return X; 12431 // FIXME: There are cases here where we could find a common template name 12432 // with more sugar. For example one could be a SubstTemplateTemplate* 12433 // replacing the other. 12434 TemplateName CX = Ctx.getCanonicalTemplateName(X); 12435 if (CX.getAsVoidPointer() != 12436 Ctx.getCanonicalTemplateName(Y).getAsVoidPointer()) 12437 return TemplateName(); 12438 return CX; 12439 } 12440 12441 static TemplateName 12442 getCommonTemplateNameChecked(ASTContext &Ctx, TemplateName X, TemplateName Y) { 12443 TemplateName R = getCommonTemplateName(Ctx, X, Y); 12444 assert(R.getAsVoidPointer() != nullptr); 12445 return R; 12446 } 12447 12448 static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs, 12449 ArrayRef<QualType> Ys, bool Unqualified = false) { 12450 assert(Xs.size() == Ys.size()); 12451 SmallVector<QualType, 8> Rs(Xs.size()); 12452 for (size_t I = 0; I < Rs.size(); ++I) 12453 Rs[I] = Ctx.getCommonSugaredType(Xs[I], Ys[I], Unqualified); 12454 return Rs; 12455 } 12456 12457 template <class T> 12458 static SourceLocation getCommonAttrLoc(const T *X, const T *Y) { 12459 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc() 12460 : SourceLocation(); 12461 } 12462 12463 static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx, 12464 const TemplateArgument &X, 12465 const TemplateArgument &Y) { 12466 if (X.getKind() != Y.getKind()) 12467 return TemplateArgument(); 12468 12469 switch (X.getKind()) { 12470 case TemplateArgument::ArgKind::Type: 12471 if (!Ctx.hasSameType(X.getAsType(), Y.getAsType())) 12472 return TemplateArgument(); 12473 return TemplateArgument( 12474 Ctx.getCommonSugaredType(X.getAsType(), Y.getAsType())); 12475 case TemplateArgument::ArgKind::NullPtr: 12476 if (!Ctx.hasSameType(X.getNullPtrType(), Y.getNullPtrType())) 12477 return TemplateArgument(); 12478 return TemplateArgument( 12479 Ctx.getCommonSugaredType(X.getNullPtrType(), Y.getNullPtrType()), 12480 /*Unqualified=*/true); 12481 case TemplateArgument::ArgKind::Expression: 12482 if (!Ctx.hasSameType(X.getAsExpr()->getType(), Y.getAsExpr()->getType())) 12483 return TemplateArgument(); 12484 // FIXME: Try to keep the common sugar. 12485 return X; 12486 case TemplateArgument::ArgKind::Template: { 12487 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate(); 12488 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12489 if (!CTN.getAsVoidPointer()) 12490 return TemplateArgument(); 12491 return TemplateArgument(CTN); 12492 } 12493 case TemplateArgument::ArgKind::TemplateExpansion: { 12494 TemplateName TX = X.getAsTemplateOrTemplatePattern(), 12495 TY = Y.getAsTemplateOrTemplatePattern(); 12496 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12497 if (!CTN.getAsVoidPointer()) 12498 return TemplateName(); 12499 auto NExpX = X.getNumTemplateExpansions(); 12500 assert(NExpX == Y.getNumTemplateExpansions()); 12501 return TemplateArgument(CTN, NExpX); 12502 } 12503 default: 12504 // FIXME: Handle the other argument kinds. 12505 return X; 12506 } 12507 } 12508 12509 static bool getCommonTemplateArguments(ASTContext &Ctx, 12510 SmallVectorImpl<TemplateArgument> &R, 12511 ArrayRef<TemplateArgument> Xs, 12512 ArrayRef<TemplateArgument> Ys) { 12513 if (Xs.size() != Ys.size()) 12514 return true; 12515 R.resize(Xs.size()); 12516 for (size_t I = 0; I < R.size(); ++I) { 12517 R[I] = getCommonTemplateArgument(Ctx, Xs[I], Ys[I]); 12518 if (R[I].isNull()) 12519 return true; 12520 } 12521 return false; 12522 } 12523 12524 static auto getCommonTemplateArguments(ASTContext &Ctx, 12525 ArrayRef<TemplateArgument> Xs, 12526 ArrayRef<TemplateArgument> Ys) { 12527 SmallVector<TemplateArgument, 8> R; 12528 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys); 12529 assert(!Different); 12530 (void)Different; 12531 return R; 12532 } 12533 12534 template <class T> 12535 static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) { 12536 return X->getKeyword() == Y->getKeyword() ? X->getKeyword() 12537 : ElaboratedTypeKeyword::ETK_None; 12538 } 12539 12540 template <class T> 12541 static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx, const T *X, 12542 const T *Y) { 12543 // FIXME: Try to keep the common NNS sugar. 12544 return X->getQualifier() == Y->getQualifier() 12545 ? X->getQualifier() 12546 : Ctx.getCanonicalNestedNameSpecifier(X->getQualifier()); 12547 } 12548 12549 template <class T> 12550 static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) { 12551 return Ctx.getCommonSugaredType(X->getElementType(), Y->getElementType()); 12552 } 12553 12554 template <class T> 12555 static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X, 12556 Qualifiers &QX, const T *Y, 12557 Qualifiers &QY) { 12558 QualType EX = X->getElementType(), EY = Y->getElementType(); 12559 QualType R = Ctx.getCommonSugaredType(EX, EY, 12560 /*Unqualified=*/true); 12561 Qualifiers RQ = R.getQualifiers(); 12562 QX += EX.getQualifiers() - RQ; 12563 QY += EY.getQualifiers() - RQ; 12564 return R; 12565 } 12566 12567 template <class T> 12568 static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) { 12569 return Ctx.getCommonSugaredType(X->getPointeeType(), Y->getPointeeType()); 12570 } 12571 12572 template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) { 12573 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr())); 12574 return X->getSizeExpr(); 12575 } 12576 12577 static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) { 12578 assert(X->getSizeModifier() == Y->getSizeModifier()); 12579 return X->getSizeModifier(); 12580 } 12581 12582 static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X, 12583 const ArrayType *Y) { 12584 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers()); 12585 return X->getIndexTypeCVRQualifiers(); 12586 } 12587 12588 // Merges two type lists such that the resulting vector will contain 12589 // each type (in a canonical sense) only once, in the order they appear 12590 // from X to Y. If they occur in both X and Y, the result will contain 12591 // the common sugared type between them. 12592 static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out, 12593 ArrayRef<QualType> X, ArrayRef<QualType> Y) { 12594 llvm::DenseMap<QualType, unsigned> Found; 12595 for (auto Ts : {X, Y}) { 12596 for (QualType T : Ts) { 12597 auto Res = Found.try_emplace(Ctx.getCanonicalType(T), Out.size()); 12598 if (!Res.second) { 12599 QualType &U = Out[Res.first->second]; 12600 U = Ctx.getCommonSugaredType(U, T); 12601 } else { 12602 Out.emplace_back(T); 12603 } 12604 } 12605 } 12606 } 12607 12608 FunctionProtoType::ExceptionSpecInfo 12609 ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1, 12610 FunctionProtoType::ExceptionSpecInfo ESI2, 12611 SmallVectorImpl<QualType> &ExceptionTypeStorage, 12612 bool AcceptDependent) { 12613 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type; 12614 12615 // If either of them can throw anything, that is the result. 12616 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) { 12617 if (EST1 == I) 12618 return ESI1; 12619 if (EST2 == I) 12620 return ESI2; 12621 } 12622 12623 // If either of them is non-throwing, the result is the other. 12624 for (auto I : 12625 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) { 12626 if (EST1 == I) 12627 return ESI2; 12628 if (EST2 == I) 12629 return ESI1; 12630 } 12631 12632 // If we're left with value-dependent computed noexcept expressions, we're 12633 // stuck. Before C++17, we can just drop the exception specification entirely, 12634 // since it's not actually part of the canonical type. And this should never 12635 // happen in C++17, because it would mean we were computing the composite 12636 // pointer type of dependent types, which should never happen. 12637 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) { 12638 assert(AcceptDependent && 12639 "computing composite pointer type of dependent types"); 12640 return FunctionProtoType::ExceptionSpecInfo(); 12641 } 12642 12643 // Switch over the possibilities so that people adding new values know to 12644 // update this function. 12645 switch (EST1) { 12646 case EST_None: 12647 case EST_DynamicNone: 12648 case EST_MSAny: 12649 case EST_BasicNoexcept: 12650 case EST_DependentNoexcept: 12651 case EST_NoexceptFalse: 12652 case EST_NoexceptTrue: 12653 case EST_NoThrow: 12654 llvm_unreachable("These ESTs should be handled above"); 12655 12656 case EST_Dynamic: { 12657 // This is the fun case: both exception specifications are dynamic. Form 12658 // the union of the two lists. 12659 assert(EST2 == EST_Dynamic && "other cases should already be handled"); 12660 mergeTypeLists(*this, ExceptionTypeStorage, ESI1.Exceptions, 12661 ESI2.Exceptions); 12662 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic); 12663 Result.Exceptions = ExceptionTypeStorage; 12664 return Result; 12665 } 12666 12667 case EST_Unevaluated: 12668 case EST_Uninstantiated: 12669 case EST_Unparsed: 12670 llvm_unreachable("shouldn't see unresolved exception specifications here"); 12671 } 12672 12673 llvm_unreachable("invalid ExceptionSpecificationType"); 12674 } 12675 12676 static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X, 12677 Qualifiers &QX, const Type *Y, 12678 Qualifiers &QY) { 12679 Type::TypeClass TC = X->getTypeClass(); 12680 assert(TC == Y->getTypeClass()); 12681 switch (TC) { 12682 #define UNEXPECTED_TYPE(Class, Kind) \ 12683 case Type::Class: \ 12684 llvm_unreachable("Unexpected " Kind ": " #Class); 12685 12686 #define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical") 12687 #define TYPE(Class, Base) 12688 #include "clang/AST/TypeNodes.inc" 12689 12690 #define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free") 12691 SUGAR_FREE_TYPE(Builtin) 12692 SUGAR_FREE_TYPE(Decltype) 12693 SUGAR_FREE_TYPE(DeducedTemplateSpecialization) 12694 SUGAR_FREE_TYPE(DependentBitInt) 12695 SUGAR_FREE_TYPE(Enum) 12696 SUGAR_FREE_TYPE(BitInt) 12697 SUGAR_FREE_TYPE(ObjCInterface) 12698 SUGAR_FREE_TYPE(Record) 12699 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack) 12700 SUGAR_FREE_TYPE(UnresolvedUsing) 12701 #undef SUGAR_FREE_TYPE 12702 #define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique") 12703 NON_UNIQUE_TYPE(TypeOfExpr) 12704 NON_UNIQUE_TYPE(VariableArray) 12705 #undef NON_UNIQUE_TYPE 12706 12707 UNEXPECTED_TYPE(TypeOf, "sugar") 12708 12709 #undef UNEXPECTED_TYPE 12710 12711 case Type::Auto: { 12712 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 12713 assert(AX->getDeducedType().isNull()); 12714 assert(AY->getDeducedType().isNull()); 12715 assert(AX->getKeyword() == AY->getKeyword()); 12716 assert(AX->isInstantiationDependentType() == 12717 AY->isInstantiationDependentType()); 12718 auto As = getCommonTemplateArguments(Ctx, AX->getTypeConstraintArguments(), 12719 AY->getTypeConstraintArguments()); 12720 return Ctx.getAutoType(QualType(), AX->getKeyword(), 12721 AX->isInstantiationDependentType(), 12722 AX->containsUnexpandedParameterPack(), 12723 getCommonDeclChecked(AX->getTypeConstraintConcept(), 12724 AY->getTypeConstraintConcept()), 12725 As); 12726 } 12727 case Type::IncompleteArray: { 12728 const auto *AX = cast<IncompleteArrayType>(X), 12729 *AY = cast<IncompleteArrayType>(Y); 12730 return Ctx.getIncompleteArrayType( 12731 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12732 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12733 } 12734 case Type::DependentSizedArray: { 12735 const auto *AX = cast<DependentSizedArrayType>(X), 12736 *AY = cast<DependentSizedArrayType>(Y); 12737 return Ctx.getDependentSizedArrayType( 12738 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12739 getCommonSizeExpr(Ctx, AX, AY), getCommonSizeModifier(AX, AY), 12740 getCommonIndexTypeCVRQualifiers(AX, AY), 12741 AX->getBracketsRange() == AY->getBracketsRange() 12742 ? AX->getBracketsRange() 12743 : SourceRange()); 12744 } 12745 case Type::ConstantArray: { 12746 const auto *AX = cast<ConstantArrayType>(X), 12747 *AY = cast<ConstantArrayType>(Y); 12748 assert(AX->getSize() == AY->getSize()); 12749 const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr()) 12750 ? AX->getSizeExpr() 12751 : nullptr; 12752 return Ctx.getConstantArrayType( 12753 getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr, 12754 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12755 } 12756 case Type::Atomic: { 12757 const auto *AX = cast<AtomicType>(X), *AY = cast<AtomicType>(Y); 12758 return Ctx.getAtomicType( 12759 Ctx.getCommonSugaredType(AX->getValueType(), AY->getValueType())); 12760 } 12761 case Type::Complex: { 12762 const auto *CX = cast<ComplexType>(X), *CY = cast<ComplexType>(Y); 12763 return Ctx.getComplexType(getCommonArrayElementType(Ctx, CX, QX, CY, QY)); 12764 } 12765 case Type::Pointer: { 12766 const auto *PX = cast<PointerType>(X), *PY = cast<PointerType>(Y); 12767 return Ctx.getPointerType(getCommonPointeeType(Ctx, PX, PY)); 12768 } 12769 case Type::BlockPointer: { 12770 const auto *PX = cast<BlockPointerType>(X), *PY = cast<BlockPointerType>(Y); 12771 return Ctx.getBlockPointerType(getCommonPointeeType(Ctx, PX, PY)); 12772 } 12773 case Type::ObjCObjectPointer: { 12774 const auto *PX = cast<ObjCObjectPointerType>(X), 12775 *PY = cast<ObjCObjectPointerType>(Y); 12776 return Ctx.getObjCObjectPointerType(getCommonPointeeType(Ctx, PX, PY)); 12777 } 12778 case Type::MemberPointer: { 12779 const auto *PX = cast<MemberPointerType>(X), 12780 *PY = cast<MemberPointerType>(Y); 12781 return Ctx.getMemberPointerType( 12782 getCommonPointeeType(Ctx, PX, PY), 12783 Ctx.getCommonSugaredType(QualType(PX->getClass(), 0), 12784 QualType(PY->getClass(), 0)) 12785 .getTypePtr()); 12786 } 12787 case Type::LValueReference: { 12788 const auto *PX = cast<LValueReferenceType>(X), 12789 *PY = cast<LValueReferenceType>(Y); 12790 // FIXME: Preserve PointeeTypeAsWritten. 12791 return Ctx.getLValueReferenceType(getCommonPointeeType(Ctx, PX, PY), 12792 PX->isSpelledAsLValue() || 12793 PY->isSpelledAsLValue()); 12794 } 12795 case Type::RValueReference: { 12796 const auto *PX = cast<RValueReferenceType>(X), 12797 *PY = cast<RValueReferenceType>(Y); 12798 // FIXME: Preserve PointeeTypeAsWritten. 12799 return Ctx.getRValueReferenceType(getCommonPointeeType(Ctx, PX, PY)); 12800 } 12801 case Type::DependentAddressSpace: { 12802 const auto *PX = cast<DependentAddressSpaceType>(X), 12803 *PY = cast<DependentAddressSpaceType>(Y); 12804 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr())); 12805 return Ctx.getDependentAddressSpaceType(getCommonPointeeType(Ctx, PX, PY), 12806 PX->getAddrSpaceExpr(), 12807 getCommonAttrLoc(PX, PY)); 12808 } 12809 case Type::FunctionNoProto: { 12810 const auto *FX = cast<FunctionNoProtoType>(X), 12811 *FY = cast<FunctionNoProtoType>(Y); 12812 assert(FX->getExtInfo() == FY->getExtInfo()); 12813 return Ctx.getFunctionNoProtoType( 12814 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()), 12815 FX->getExtInfo()); 12816 } 12817 case Type::FunctionProto: { 12818 const auto *FX = cast<FunctionProtoType>(X), 12819 *FY = cast<FunctionProtoType>(Y); 12820 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(), 12821 EPIY = FY->getExtProtoInfo(); 12822 assert(EPIX.ExtInfo == EPIY.ExtInfo); 12823 assert(EPIX.ExtParameterInfos == EPIY.ExtParameterInfos); 12824 assert(EPIX.RefQualifier == EPIY.RefQualifier); 12825 assert(EPIX.TypeQuals == EPIY.TypeQuals); 12826 assert(EPIX.Variadic == EPIY.Variadic); 12827 12828 // FIXME: Can we handle an empty EllipsisLoc? 12829 // Use emtpy EllipsisLoc if X and Y differ. 12830 12831 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn; 12832 12833 QualType R = 12834 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()); 12835 auto P = getCommonTypes(Ctx, FX->param_types(), FY->param_types(), 12836 /*Unqualified=*/true); 12837 12838 SmallVector<QualType, 8> Exceptions; 12839 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs( 12840 EPIX.ExceptionSpec, EPIY.ExceptionSpec, Exceptions, true); 12841 return Ctx.getFunctionType(R, P, EPIX); 12842 } 12843 case Type::ObjCObject: { 12844 const auto *OX = cast<ObjCObjectType>(X), *OY = cast<ObjCObjectType>(Y); 12845 assert( 12846 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(), 12847 OY->getProtocols().begin(), OY->getProtocols().end(), 12848 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) { 12849 return P0->getCanonicalDecl() == P1->getCanonicalDecl(); 12850 }) && 12851 "protocol lists must be the same"); 12852 auto TAs = getCommonTypes(Ctx, OX->getTypeArgsAsWritten(), 12853 OY->getTypeArgsAsWritten()); 12854 return Ctx.getObjCObjectType( 12855 Ctx.getCommonSugaredType(OX->getBaseType(), OY->getBaseType()), TAs, 12856 OX->getProtocols(), 12857 OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten()); 12858 } 12859 case Type::ConstantMatrix: { 12860 const auto *MX = cast<ConstantMatrixType>(X), 12861 *MY = cast<ConstantMatrixType>(Y); 12862 assert(MX->getNumRows() == MY->getNumRows()); 12863 assert(MX->getNumColumns() == MY->getNumColumns()); 12864 return Ctx.getConstantMatrixType(getCommonElementType(Ctx, MX, MY), 12865 MX->getNumRows(), MX->getNumColumns()); 12866 } 12867 case Type::DependentSizedMatrix: { 12868 const auto *MX = cast<DependentSizedMatrixType>(X), 12869 *MY = cast<DependentSizedMatrixType>(Y); 12870 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr())); 12871 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr())); 12872 return Ctx.getDependentSizedMatrixType( 12873 getCommonElementType(Ctx, MX, MY), MX->getRowExpr(), 12874 MX->getColumnExpr(), getCommonAttrLoc(MX, MY)); 12875 } 12876 case Type::Vector: { 12877 const auto *VX = cast<VectorType>(X), *VY = cast<VectorType>(Y); 12878 assert(VX->getNumElements() == VY->getNumElements()); 12879 assert(VX->getVectorKind() == VY->getVectorKind()); 12880 return Ctx.getVectorType(getCommonElementType(Ctx, VX, VY), 12881 VX->getNumElements(), VX->getVectorKind()); 12882 } 12883 case Type::ExtVector: { 12884 const auto *VX = cast<ExtVectorType>(X), *VY = cast<ExtVectorType>(Y); 12885 assert(VX->getNumElements() == VY->getNumElements()); 12886 return Ctx.getExtVectorType(getCommonElementType(Ctx, VX, VY), 12887 VX->getNumElements()); 12888 } 12889 case Type::DependentSizedExtVector: { 12890 const auto *VX = cast<DependentSizedExtVectorType>(X), 12891 *VY = cast<DependentSizedExtVectorType>(Y); 12892 return Ctx.getDependentSizedExtVectorType(getCommonElementType(Ctx, VX, VY), 12893 getCommonSizeExpr(Ctx, VX, VY), 12894 getCommonAttrLoc(VX, VY)); 12895 } 12896 case Type::DependentVector: { 12897 const auto *VX = cast<DependentVectorType>(X), 12898 *VY = cast<DependentVectorType>(Y); 12899 assert(VX->getVectorKind() == VY->getVectorKind()); 12900 return Ctx.getDependentVectorType( 12901 getCommonElementType(Ctx, VX, VY), getCommonSizeExpr(Ctx, VX, VY), 12902 getCommonAttrLoc(VX, VY), VX->getVectorKind()); 12903 } 12904 case Type::InjectedClassName: { 12905 const auto *IX = cast<InjectedClassNameType>(X), 12906 *IY = cast<InjectedClassNameType>(Y); 12907 return Ctx.getInjectedClassNameType( 12908 getCommonDeclChecked(IX->getDecl(), IY->getDecl()), 12909 Ctx.getCommonSugaredType(IX->getInjectedSpecializationType(), 12910 IY->getInjectedSpecializationType())); 12911 } 12912 case Type::TemplateSpecialization: { 12913 const auto *TX = cast<TemplateSpecializationType>(X), 12914 *TY = cast<TemplateSpecializationType>(Y); 12915 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12916 TY->template_arguments()); 12917 return Ctx.getTemplateSpecializationType( 12918 ::getCommonTemplateNameChecked(Ctx, TX->getTemplateName(), 12919 TY->getTemplateName()), 12920 As, X->getCanonicalTypeInternal()); 12921 } 12922 case Type::DependentName: { 12923 const auto *NX = cast<DependentNameType>(X), 12924 *NY = cast<DependentNameType>(Y); 12925 assert(NX->getIdentifier() == NY->getIdentifier()); 12926 return Ctx.getDependentNameType( 12927 getCommonTypeKeyword(NX, NY), getCommonNNS(Ctx, NX, NY), 12928 NX->getIdentifier(), NX->getCanonicalTypeInternal()); 12929 } 12930 case Type::DependentTemplateSpecialization: { 12931 const auto *TX = cast<DependentTemplateSpecializationType>(X), 12932 *TY = cast<DependentTemplateSpecializationType>(Y); 12933 assert(TX->getIdentifier() == TY->getIdentifier()); 12934 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12935 TY->template_arguments()); 12936 return Ctx.getDependentTemplateSpecializationType( 12937 getCommonTypeKeyword(TX, TY), getCommonNNS(Ctx, TX, TY), 12938 TX->getIdentifier(), As); 12939 } 12940 case Type::UnaryTransform: { 12941 const auto *TX = cast<UnaryTransformType>(X), 12942 *TY = cast<UnaryTransformType>(Y); 12943 assert(TX->getUTTKind() == TY->getUTTKind()); 12944 return Ctx.getUnaryTransformType( 12945 Ctx.getCommonSugaredType(TX->getBaseType(), TY->getBaseType()), 12946 Ctx.getCommonSugaredType(TX->getUnderlyingType(), 12947 TY->getUnderlyingType()), 12948 TX->getUTTKind()); 12949 } 12950 case Type::PackExpansion: { 12951 const auto *PX = cast<PackExpansionType>(X), 12952 *PY = cast<PackExpansionType>(Y); 12953 assert(PX->getNumExpansions() == PY->getNumExpansions()); 12954 return Ctx.getPackExpansionType( 12955 Ctx.getCommonSugaredType(PX->getPattern(), PY->getPattern()), 12956 PX->getNumExpansions(), false); 12957 } 12958 case Type::Pipe: { 12959 const auto *PX = cast<PipeType>(X), *PY = cast<PipeType>(Y); 12960 assert(PX->isReadOnly() == PY->isReadOnly()); 12961 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType 12962 : &ASTContext::getWritePipeType; 12963 return (Ctx.*MP)(getCommonElementType(Ctx, PX, PY)); 12964 } 12965 case Type::TemplateTypeParm: { 12966 const auto *TX = cast<TemplateTypeParmType>(X), 12967 *TY = cast<TemplateTypeParmType>(Y); 12968 assert(TX->getDepth() == TY->getDepth()); 12969 assert(TX->getIndex() == TY->getIndex()); 12970 assert(TX->isParameterPack() == TY->isParameterPack()); 12971 return Ctx.getTemplateTypeParmType( 12972 TX->getDepth(), TX->getIndex(), TX->isParameterPack(), 12973 getCommonDecl(TX->getDecl(), TY->getDecl())); 12974 } 12975 } 12976 llvm_unreachable("Unknown Type Class"); 12977 } 12978 12979 static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X, 12980 const Type *Y, 12981 SplitQualType Underlying) { 12982 Type::TypeClass TC = X->getTypeClass(); 12983 if (TC != Y->getTypeClass()) 12984 return QualType(); 12985 switch (TC) { 12986 #define UNEXPECTED_TYPE(Class, Kind) \ 12987 case Type::Class: \ 12988 llvm_unreachable("Unexpected " Kind ": " #Class); 12989 #define TYPE(Class, Base) 12990 #define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent") 12991 #include "clang/AST/TypeNodes.inc" 12992 12993 #define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical") 12994 CANONICAL_TYPE(Atomic) 12995 CANONICAL_TYPE(BitInt) 12996 CANONICAL_TYPE(BlockPointer) 12997 CANONICAL_TYPE(Builtin) 12998 CANONICAL_TYPE(Complex) 12999 CANONICAL_TYPE(ConstantArray) 13000 CANONICAL_TYPE(ConstantMatrix) 13001 CANONICAL_TYPE(Enum) 13002 CANONICAL_TYPE(ExtVector) 13003 CANONICAL_TYPE(FunctionNoProto) 13004 CANONICAL_TYPE(FunctionProto) 13005 CANONICAL_TYPE(IncompleteArray) 13006 CANONICAL_TYPE(LValueReference) 13007 CANONICAL_TYPE(MemberPointer) 13008 CANONICAL_TYPE(ObjCInterface) 13009 CANONICAL_TYPE(ObjCObject) 13010 CANONICAL_TYPE(ObjCObjectPointer) 13011 CANONICAL_TYPE(Pipe) 13012 CANONICAL_TYPE(Pointer) 13013 CANONICAL_TYPE(Record) 13014 CANONICAL_TYPE(RValueReference) 13015 CANONICAL_TYPE(VariableArray) 13016 CANONICAL_TYPE(Vector) 13017 #undef CANONICAL_TYPE 13018 13019 #undef UNEXPECTED_TYPE 13020 13021 case Type::Adjusted: { 13022 const auto *AX = cast<AdjustedType>(X), *AY = cast<AdjustedType>(Y); 13023 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType(); 13024 if (!Ctx.hasSameType(OX, OY)) 13025 return QualType(); 13026 // FIXME: It's inefficient to have to unify the original types. 13027 return Ctx.getAdjustedType(Ctx.getCommonSugaredType(OX, OY), 13028 Ctx.getQualifiedType(Underlying)); 13029 } 13030 case Type::Decayed: { 13031 const auto *DX = cast<DecayedType>(X), *DY = cast<DecayedType>(Y); 13032 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType(); 13033 if (!Ctx.hasSameType(OX, OY)) 13034 return QualType(); 13035 // FIXME: It's inefficient to have to unify the original types. 13036 return Ctx.getDecayedType(Ctx.getCommonSugaredType(OX, OY), 13037 Ctx.getQualifiedType(Underlying)); 13038 } 13039 case Type::Attributed: { 13040 const auto *AX = cast<AttributedType>(X), *AY = cast<AttributedType>(Y); 13041 AttributedType::Kind Kind = AX->getAttrKind(); 13042 if (Kind != AY->getAttrKind()) 13043 return QualType(); 13044 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType(); 13045 if (!Ctx.hasSameType(MX, MY)) 13046 return QualType(); 13047 // FIXME: It's inefficient to have to unify the modified types. 13048 return Ctx.getAttributedType(Kind, Ctx.getCommonSugaredType(MX, MY), 13049 Ctx.getQualifiedType(Underlying)); 13050 } 13051 case Type::BTFTagAttributed: { 13052 const auto *BX = cast<BTFTagAttributedType>(X); 13053 const BTFTypeTagAttr *AX = BX->getAttr(); 13054 // The attribute is not uniqued, so just compare the tag. 13055 if (AX->getBTFTypeTag() != 13056 cast<BTFTagAttributedType>(Y)->getAttr()->getBTFTypeTag()) 13057 return QualType(); 13058 return Ctx.getBTFTagAttributedType(AX, Ctx.getQualifiedType(Underlying)); 13059 } 13060 case Type::Auto: { 13061 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 13062 13063 AutoTypeKeyword KW = AX->getKeyword(); 13064 if (KW != AY->getKeyword()) 13065 return QualType(); 13066 13067 ConceptDecl *CD = ::getCommonDecl(AX->getTypeConstraintConcept(), 13068 AY->getTypeConstraintConcept()); 13069 SmallVector<TemplateArgument, 8> As; 13070 if (CD && 13071 getCommonTemplateArguments(Ctx, As, AX->getTypeConstraintArguments(), 13072 AY->getTypeConstraintArguments())) { 13073 CD = nullptr; // The arguments differ, so make it unconstrained. 13074 As.clear(); 13075 } 13076 13077 // Both auto types can't be dependent, otherwise they wouldn't have been 13078 // sugar. This implies they can't contain unexpanded packs either. 13079 return Ctx.getAutoType(Ctx.getQualifiedType(Underlying), AX->getKeyword(), 13080 /*IsDependent=*/false, /*IsPack=*/false, CD, As); 13081 } 13082 case Type::Decltype: 13083 return QualType(); 13084 case Type::DeducedTemplateSpecialization: 13085 // FIXME: Try to merge these. 13086 return QualType(); 13087 13088 case Type::Elaborated: { 13089 const auto *EX = cast<ElaboratedType>(X), *EY = cast<ElaboratedType>(Y); 13090 return Ctx.getElaboratedType( 13091 ::getCommonTypeKeyword(EX, EY), ::getCommonNNS(Ctx, EX, EY), 13092 Ctx.getQualifiedType(Underlying), 13093 ::getCommonDecl(EX->getOwnedTagDecl(), EY->getOwnedTagDecl())); 13094 } 13095 case Type::MacroQualified: { 13096 const auto *MX = cast<MacroQualifiedType>(X), 13097 *MY = cast<MacroQualifiedType>(Y); 13098 const IdentifierInfo *IX = MX->getMacroIdentifier(); 13099 if (IX != MY->getMacroIdentifier()) 13100 return QualType(); 13101 return Ctx.getMacroQualifiedType(Ctx.getQualifiedType(Underlying), IX); 13102 } 13103 case Type::SubstTemplateTypeParm: { 13104 const auto *SX = cast<SubstTemplateTypeParmType>(X), 13105 *SY = cast<SubstTemplateTypeParmType>(Y); 13106 Decl *CD = 13107 ::getCommonDecl(SX->getAssociatedDecl(), SY->getAssociatedDecl()); 13108 if (!CD) 13109 return QualType(); 13110 unsigned Index = SX->getIndex(); 13111 if (Index != SY->getIndex()) 13112 return QualType(); 13113 auto PackIndex = SX->getPackIndex(); 13114 if (PackIndex != SY->getPackIndex()) 13115 return QualType(); 13116 return Ctx.getSubstTemplateTypeParmType(Ctx.getQualifiedType(Underlying), 13117 CD, Index, PackIndex); 13118 } 13119 case Type::ObjCTypeParam: 13120 // FIXME: Try to merge these. 13121 return QualType(); 13122 case Type::Paren: 13123 return Ctx.getParenType(Ctx.getQualifiedType(Underlying)); 13124 13125 case Type::TemplateSpecialization: { 13126 const auto *TX = cast<TemplateSpecializationType>(X), 13127 *TY = cast<TemplateSpecializationType>(Y); 13128 TemplateName CTN = ::getCommonTemplateName(Ctx, TX->getTemplateName(), 13129 TY->getTemplateName()); 13130 if (!CTN.getAsVoidPointer()) 13131 return QualType(); 13132 SmallVector<TemplateArgument, 8> Args; 13133 if (getCommonTemplateArguments(Ctx, Args, TX->template_arguments(), 13134 TY->template_arguments())) 13135 return QualType(); 13136 return Ctx.getTemplateSpecializationType(CTN, Args, 13137 Ctx.getQualifiedType(Underlying)); 13138 } 13139 case Type::Typedef: { 13140 const auto *TX = cast<TypedefType>(X), *TY = cast<TypedefType>(Y); 13141 const TypedefNameDecl *CD = ::getCommonDecl(TX->getDecl(), TY->getDecl()); 13142 if (!CD) 13143 return QualType(); 13144 return Ctx.getTypedefType(CD, Ctx.getQualifiedType(Underlying)); 13145 } 13146 case Type::TypeOf: { 13147 // The common sugar between two typeof expressions, where one is 13148 // potentially a typeof_unqual and the other is not, we unify to the 13149 // qualified type as that retains the most information along with the type. 13150 // We only return a typeof_unqual type when both types are unqual types. 13151 TypeOfKind Kind = TypeOfKind::Qualified; 13152 if (cast<TypeOfType>(X)->getKind() == cast<TypeOfType>(Y)->getKind() && 13153 cast<TypeOfType>(X)->getKind() == TypeOfKind::Unqualified) 13154 Kind = TypeOfKind::Unqualified; 13155 return Ctx.getTypeOfType(Ctx.getQualifiedType(Underlying), Kind); 13156 } 13157 case Type::TypeOfExpr: 13158 return QualType(); 13159 13160 case Type::UnaryTransform: { 13161 const auto *UX = cast<UnaryTransformType>(X), 13162 *UY = cast<UnaryTransformType>(Y); 13163 UnaryTransformType::UTTKind KX = UX->getUTTKind(); 13164 if (KX != UY->getUTTKind()) 13165 return QualType(); 13166 QualType BX = UX->getBaseType(), BY = UY->getBaseType(); 13167 if (!Ctx.hasSameType(BX, BY)) 13168 return QualType(); 13169 // FIXME: It's inefficient to have to unify the base types. 13170 return Ctx.getUnaryTransformType(Ctx.getCommonSugaredType(BX, BY), 13171 Ctx.getQualifiedType(Underlying), KX); 13172 } 13173 case Type::Using: { 13174 const auto *UX = cast<UsingType>(X), *UY = cast<UsingType>(Y); 13175 const UsingShadowDecl *CD = 13176 ::getCommonDecl(UX->getFoundDecl(), UY->getFoundDecl()); 13177 if (!CD) 13178 return QualType(); 13179 return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying)); 13180 } 13181 } 13182 llvm_unreachable("Unhandled Type Class"); 13183 } 13184 13185 static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) { 13186 SmallVector<SplitQualType, 8> R; 13187 while (true) { 13188 QTotal.addConsistentQualifiers(T.Quals); 13189 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType(); 13190 if (NT == QualType(T.Ty, 0)) 13191 break; 13192 R.push_back(T); 13193 T = NT.split(); 13194 } 13195 return R; 13196 } 13197 13198 QualType ASTContext::getCommonSugaredType(QualType X, QualType Y, 13199 bool Unqualified) { 13200 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y)); 13201 if (X == Y) 13202 return X; 13203 if (!Unqualified) { 13204 if (X.isCanonical()) 13205 return X; 13206 if (Y.isCanonical()) 13207 return Y; 13208 } 13209 13210 SplitQualType SX = X.split(), SY = Y.split(); 13211 Qualifiers QX, QY; 13212 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys, 13213 // until we reach their underlying "canonical nodes". Note these are not 13214 // necessarily canonical types, as they may still have sugared properties. 13215 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively. 13216 auto Xs = ::unwrapSugar(SX, QX), Ys = ::unwrapSugar(SY, QY); 13217 if (SX.Ty != SY.Ty) { 13218 // The canonical nodes differ. Build a common canonical node out of the two, 13219 // unifying their sugar. This may recurse back here. 13220 SX.Ty = 13221 ::getCommonNonSugarTypeNode(*this, SX.Ty, QX, SY.Ty, QY).getTypePtr(); 13222 } else { 13223 // The canonical nodes were identical: We may have desugared too much. 13224 // Add any common sugar back in. 13225 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) { 13226 QX -= SX.Quals; 13227 QY -= SY.Quals; 13228 SX = Xs.pop_back_val(); 13229 SY = Ys.pop_back_val(); 13230 } 13231 } 13232 if (Unqualified) 13233 QX = Qualifiers::removeCommonQualifiers(QX, QY); 13234 else 13235 assert(QX == QY); 13236 13237 // Even though the remaining sugar nodes in Xs and Ys differ, some may be 13238 // related. Walk up these nodes, unifying them and adding the result. 13239 while (!Xs.empty() && !Ys.empty()) { 13240 auto Underlying = SplitQualType( 13241 SX.Ty, Qualifiers::removeCommonQualifiers(SX.Quals, SY.Quals)); 13242 SX = Xs.pop_back_val(); 13243 SY = Ys.pop_back_val(); 13244 SX.Ty = ::getCommonSugarTypeNode(*this, SX.Ty, SY.Ty, Underlying) 13245 .getTypePtrOrNull(); 13246 // Stop at the first pair which is unrelated. 13247 if (!SX.Ty) { 13248 SX.Ty = Underlying.Ty; 13249 break; 13250 } 13251 QX -= Underlying.Quals; 13252 }; 13253 13254 // Add back the missing accumulated qualifiers, which were stripped off 13255 // with the sugar nodes we could not unify. 13256 QualType R = getQualifiedType(SX.Ty, QX); 13257 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X)); 13258 return R; 13259 } 13260 13261 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 13262 assert(Ty->isFixedPointType()); 13263 13264 if (Ty->isSaturatedFixedPointType()) return Ty; 13265 13266 switch (Ty->castAs<BuiltinType>()->getKind()) { 13267 default: 13268 llvm_unreachable("Not a fixed point type!"); 13269 case BuiltinType::ShortAccum: 13270 return SatShortAccumTy; 13271 case BuiltinType::Accum: 13272 return SatAccumTy; 13273 case BuiltinType::LongAccum: 13274 return SatLongAccumTy; 13275 case BuiltinType::UShortAccum: 13276 return SatUnsignedShortAccumTy; 13277 case BuiltinType::UAccum: 13278 return SatUnsignedAccumTy; 13279 case BuiltinType::ULongAccum: 13280 return SatUnsignedLongAccumTy; 13281 case BuiltinType::ShortFract: 13282 return SatShortFractTy; 13283 case BuiltinType::Fract: 13284 return SatFractTy; 13285 case BuiltinType::LongFract: 13286 return SatLongFractTy; 13287 case BuiltinType::UShortFract: 13288 return SatUnsignedShortFractTy; 13289 case BuiltinType::UFract: 13290 return SatUnsignedFractTy; 13291 case BuiltinType::ULongFract: 13292 return SatUnsignedLongFractTy; 13293 } 13294 } 13295 13296 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 13297 if (LangOpts.OpenCL) 13298 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 13299 13300 if (LangOpts.CUDA) 13301 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 13302 13303 return getLangASFromTargetAS(AS); 13304 } 13305 13306 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 13307 // doesn't include ASTContext.h 13308 template 13309 clang::LazyGenerationalUpdatePtr< 13310 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 13311 clang::LazyGenerationalUpdatePtr< 13312 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 13313 const clang::ASTContext &Ctx, Decl *Value); 13314 13315 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 13316 assert(Ty->isFixedPointType()); 13317 13318 const TargetInfo &Target = getTargetInfo(); 13319 switch (Ty->castAs<BuiltinType>()->getKind()) { 13320 default: 13321 llvm_unreachable("Not a fixed point type!"); 13322 case BuiltinType::ShortAccum: 13323 case BuiltinType::SatShortAccum: 13324 return Target.getShortAccumScale(); 13325 case BuiltinType::Accum: 13326 case BuiltinType::SatAccum: 13327 return Target.getAccumScale(); 13328 case BuiltinType::LongAccum: 13329 case BuiltinType::SatLongAccum: 13330 return Target.getLongAccumScale(); 13331 case BuiltinType::UShortAccum: 13332 case BuiltinType::SatUShortAccum: 13333 return Target.getUnsignedShortAccumScale(); 13334 case BuiltinType::UAccum: 13335 case BuiltinType::SatUAccum: 13336 return Target.getUnsignedAccumScale(); 13337 case BuiltinType::ULongAccum: 13338 case BuiltinType::SatULongAccum: 13339 return Target.getUnsignedLongAccumScale(); 13340 case BuiltinType::ShortFract: 13341 case BuiltinType::SatShortFract: 13342 return Target.getShortFractScale(); 13343 case BuiltinType::Fract: 13344 case BuiltinType::SatFract: 13345 return Target.getFractScale(); 13346 case BuiltinType::LongFract: 13347 case BuiltinType::SatLongFract: 13348 return Target.getLongFractScale(); 13349 case BuiltinType::UShortFract: 13350 case BuiltinType::SatUShortFract: 13351 return Target.getUnsignedShortFractScale(); 13352 case BuiltinType::UFract: 13353 case BuiltinType::SatUFract: 13354 return Target.getUnsignedFractScale(); 13355 case BuiltinType::ULongFract: 13356 case BuiltinType::SatULongFract: 13357 return Target.getUnsignedLongFractScale(); 13358 } 13359 } 13360 13361 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 13362 assert(Ty->isFixedPointType()); 13363 13364 const TargetInfo &Target = getTargetInfo(); 13365 switch (Ty->castAs<BuiltinType>()->getKind()) { 13366 default: 13367 llvm_unreachable("Not a fixed point type!"); 13368 case BuiltinType::ShortAccum: 13369 case BuiltinType::SatShortAccum: 13370 return Target.getShortAccumIBits(); 13371 case BuiltinType::Accum: 13372 case BuiltinType::SatAccum: 13373 return Target.getAccumIBits(); 13374 case BuiltinType::LongAccum: 13375 case BuiltinType::SatLongAccum: 13376 return Target.getLongAccumIBits(); 13377 case BuiltinType::UShortAccum: 13378 case BuiltinType::SatUShortAccum: 13379 return Target.getUnsignedShortAccumIBits(); 13380 case BuiltinType::UAccum: 13381 case BuiltinType::SatUAccum: 13382 return Target.getUnsignedAccumIBits(); 13383 case BuiltinType::ULongAccum: 13384 case BuiltinType::SatULongAccum: 13385 return Target.getUnsignedLongAccumIBits(); 13386 case BuiltinType::ShortFract: 13387 case BuiltinType::SatShortFract: 13388 case BuiltinType::Fract: 13389 case BuiltinType::SatFract: 13390 case BuiltinType::LongFract: 13391 case BuiltinType::SatLongFract: 13392 case BuiltinType::UShortFract: 13393 case BuiltinType::SatUShortFract: 13394 case BuiltinType::UFract: 13395 case BuiltinType::SatUFract: 13396 case BuiltinType::ULongFract: 13397 case BuiltinType::SatULongFract: 13398 return 0; 13399 } 13400 } 13401 13402 llvm::FixedPointSemantics 13403 ASTContext::getFixedPointSemantics(QualType Ty) const { 13404 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 13405 "Can only get the fixed point semantics for a " 13406 "fixed point or integer type."); 13407 if (Ty->isIntegerType()) 13408 return llvm::FixedPointSemantics::GetIntegerSemantics( 13409 getIntWidth(Ty), Ty->isSignedIntegerType()); 13410 13411 bool isSigned = Ty->isSignedFixedPointType(); 13412 return llvm::FixedPointSemantics( 13413 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 13414 Ty->isSaturatedFixedPointType(), 13415 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 13416 } 13417 13418 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 13419 assert(Ty->isFixedPointType()); 13420 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 13421 } 13422 13423 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 13424 assert(Ty->isFixedPointType()); 13425 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 13426 } 13427 13428 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 13429 assert(Ty->isUnsignedFixedPointType() && 13430 "Expected unsigned fixed point type"); 13431 13432 switch (Ty->castAs<BuiltinType>()->getKind()) { 13433 case BuiltinType::UShortAccum: 13434 return ShortAccumTy; 13435 case BuiltinType::UAccum: 13436 return AccumTy; 13437 case BuiltinType::ULongAccum: 13438 return LongAccumTy; 13439 case BuiltinType::SatUShortAccum: 13440 return SatShortAccumTy; 13441 case BuiltinType::SatUAccum: 13442 return SatAccumTy; 13443 case BuiltinType::SatULongAccum: 13444 return SatLongAccumTy; 13445 case BuiltinType::UShortFract: 13446 return ShortFractTy; 13447 case BuiltinType::UFract: 13448 return FractTy; 13449 case BuiltinType::ULongFract: 13450 return LongFractTy; 13451 case BuiltinType::SatUShortFract: 13452 return SatShortFractTy; 13453 case BuiltinType::SatUFract: 13454 return SatFractTy; 13455 case BuiltinType::SatULongFract: 13456 return SatLongFractTy; 13457 default: 13458 llvm_unreachable("Unexpected unsigned fixed point type"); 13459 } 13460 } 13461 13462 std::vector<std::string> ASTContext::filterFunctionTargetVersionAttrs( 13463 const TargetVersionAttr *TV) const { 13464 assert(TV != nullptr); 13465 llvm::SmallVector<StringRef, 8> Feats; 13466 std::vector<std::string> ResFeats; 13467 TV->getFeatures(Feats); 13468 for (auto &Feature : Feats) 13469 if (Target->validateCpuSupports(Feature.str())) 13470 // Use '?' to mark features that came from TargetVersion. 13471 ResFeats.push_back("?" + Feature.str()); 13472 return ResFeats; 13473 } 13474 13475 ParsedTargetAttr 13476 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 13477 assert(TD != nullptr); 13478 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(TD->getFeaturesStr()); 13479 13480 llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { 13481 return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); 13482 }); 13483 return ParsedAttr; 13484 } 13485 13486 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13487 const FunctionDecl *FD) const { 13488 if (FD) 13489 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 13490 else 13491 Target->initFeatureMap(FeatureMap, getDiagnostics(), 13492 Target->getTargetOpts().CPU, 13493 Target->getTargetOpts().Features); 13494 } 13495 13496 // Fills in the supplied string map with the set of target features for the 13497 // passed in function. 13498 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13499 GlobalDecl GD) const { 13500 StringRef TargetCPU = Target->getTargetOpts().CPU; 13501 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 13502 if (const auto *TD = FD->getAttr<TargetAttr>()) { 13503 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 13504 13505 // Make a copy of the features as passed on the command line into the 13506 // beginning of the additional features from the function to override. 13507 ParsedAttr.Features.insert( 13508 ParsedAttr.Features.begin(), 13509 Target->getTargetOpts().FeaturesAsWritten.begin(), 13510 Target->getTargetOpts().FeaturesAsWritten.end()); 13511 13512 if (ParsedAttr.CPU != "" && Target->isValidCPUName(ParsedAttr.CPU)) 13513 TargetCPU = ParsedAttr.CPU; 13514 13515 // Now populate the feature map, first with the TargetCPU which is either 13516 // the default or a new one from the target attribute string. Then we'll use 13517 // the passed in features (FeaturesAsWritten) along with the new ones from 13518 // the attribute. 13519 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 13520 ParsedAttr.Features); 13521 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 13522 llvm::SmallVector<StringRef, 32> FeaturesTmp; 13523 Target->getCPUSpecificCPUDispatchFeatures( 13524 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 13525 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 13526 Features.insert(Features.begin(), 13527 Target->getTargetOpts().FeaturesAsWritten.begin(), 13528 Target->getTargetOpts().FeaturesAsWritten.end()); 13529 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13530 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { 13531 std::vector<std::string> Features; 13532 StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); 13533 if (Target->getTriple().isAArch64()) { 13534 // TargetClones for AArch64 13535 if (VersionStr != "default") { 13536 SmallVector<StringRef, 1> VersionFeatures; 13537 VersionStr.split(VersionFeatures, "+"); 13538 for (auto &VFeature : VersionFeatures) { 13539 VFeature = VFeature.trim(); 13540 // Use '?' to mark features that came from AArch64 TargetClones. 13541 Features.push_back((StringRef{"?"} + VFeature).str()); 13542 } 13543 } 13544 Features.insert(Features.begin(), 13545 Target->getTargetOpts().FeaturesAsWritten.begin(), 13546 Target->getTargetOpts().FeaturesAsWritten.end()); 13547 } else { 13548 if (VersionStr.startswith("arch=")) 13549 TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); 13550 else if (VersionStr != "default") 13551 Features.push_back((StringRef{"+"} + VersionStr).str()); 13552 } 13553 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13554 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) { 13555 std::vector<std::string> Feats = filterFunctionTargetVersionAttrs(TV); 13556 Feats.insert(Feats.begin(), 13557 Target->getTargetOpts().FeaturesAsWritten.begin(), 13558 Target->getTargetOpts().FeaturesAsWritten.end()); 13559 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Feats); 13560 } else { 13561 FeatureMap = Target->getTargetOpts().FeatureMap; 13562 } 13563 } 13564 13565 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 13566 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 13567 return *OMPTraitInfoVector.back(); 13568 } 13569 13570 const StreamingDiagnostic &clang:: 13571 operator<<(const StreamingDiagnostic &DB, 13572 const ASTContext::SectionInfo &Section) { 13573 if (Section.Decl) 13574 return DB << Section.Decl; 13575 return DB << "a prior #pragma section"; 13576 } 13577 13578 bool ASTContext::mayExternalize(const Decl *D) const { 13579 bool IsInternalVar = 13580 isa<VarDecl>(D) && 13581 basicGVALinkageForVariable(*this, cast<VarDecl>(D)) == GVA_Internal; 13582 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 13583 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 13584 (D->hasAttr<CUDAConstantAttr>() && 13585 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 13586 // CUDA/HIP: managed variables need to be externalized since it is 13587 // a declaration in IR, therefore cannot have internal linkage. Kernels in 13588 // anonymous name space needs to be externalized to avoid duplicate symbols. 13589 return (IsInternalVar && 13590 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) || 13591 (D->hasAttr<CUDAGlobalAttr>() && 13592 basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) == 13593 GVA_Internal); 13594 } 13595 13596 bool ASTContext::shouldExternalize(const Decl *D) const { 13597 return mayExternalize(D) && 13598 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() || 13599 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 13600 } 13601 13602 StringRef ASTContext::getCUIDHash() const { 13603 if (!CUIDHash.empty()) 13604 return CUIDHash; 13605 if (LangOpts.CUID.empty()) 13606 return StringRef(); 13607 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 13608 return CUIDHash; 13609 } 13610