1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/StmtOpenACC.h" 45 #include "clang/AST/TemplateBase.h" 46 #include "clang/AST/TemplateName.h" 47 #include "clang/AST/Type.h" 48 #include "clang/AST/TypeLoc.h" 49 #include "clang/AST/UnresolvedSet.h" 50 #include "clang/AST/VTableBuilder.h" 51 #include "clang/Basic/AddressSpaces.h" 52 #include "clang/Basic/Builtins.h" 53 #include "clang/Basic/CommentOptions.h" 54 #include "clang/Basic/ExceptionSpecificationType.h" 55 #include "clang/Basic/IdentifierTable.h" 56 #include "clang/Basic/LLVM.h" 57 #include "clang/Basic/LangOptions.h" 58 #include "clang/Basic/Linkage.h" 59 #include "clang/Basic/Module.h" 60 #include "clang/Basic/NoSanitizeList.h" 61 #include "clang/Basic/ObjCRuntime.h" 62 #include "clang/Basic/ProfileList.h" 63 #include "clang/Basic/SourceLocation.h" 64 #include "clang/Basic/SourceManager.h" 65 #include "clang/Basic/Specifiers.h" 66 #include "clang/Basic/TargetCXXABI.h" 67 #include "clang/Basic/TargetInfo.h" 68 #include "clang/Basic/XRayLists.h" 69 #include "llvm/ADT/APFixedPoint.h" 70 #include "llvm/ADT/APInt.h" 71 #include "llvm/ADT/APSInt.h" 72 #include "llvm/ADT/ArrayRef.h" 73 #include "llvm/ADT/DenseMap.h" 74 #include "llvm/ADT/DenseSet.h" 75 #include "llvm/ADT/FoldingSet.h" 76 #include "llvm/ADT/PointerUnion.h" 77 #include "llvm/ADT/STLExtras.h" 78 #include "llvm/ADT/SmallPtrSet.h" 79 #include "llvm/ADT/SmallVector.h" 80 #include "llvm/ADT/StringExtras.h" 81 #include "llvm/ADT/StringRef.h" 82 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 83 #include "llvm/Support/Capacity.h" 84 #include "llvm/Support/Casting.h" 85 #include "llvm/Support/Compiler.h" 86 #include "llvm/Support/ErrorHandling.h" 87 #include "llvm/Support/MD5.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SipHash.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include "llvm/TargetParser/AArch64TargetParser.h" 92 #include "llvm/TargetParser/Triple.h" 93 #include <algorithm> 94 #include <cassert> 95 #include <cstddef> 96 #include <cstdint> 97 #include <cstdlib> 98 #include <map> 99 #include <memory> 100 #include <optional> 101 #include <string> 102 #include <tuple> 103 #include <utility> 104 105 using namespace clang; 106 107 enum FloatingRank { 108 BFloat16Rank, 109 Float16Rank, 110 HalfRank, 111 FloatRank, 112 DoubleRank, 113 LongDoubleRank, 114 Float128Rank, 115 Ibm128Rank 116 }; 117 118 /// \returns The locations that are relevant when searching for Doc comments 119 /// related to \p D. 120 static SmallVector<SourceLocation, 2> 121 getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) { 122 assert(D); 123 124 // User can not attach documentation to implicit declarations. 125 if (D->isImplicit()) 126 return {}; 127 128 // User can not attach documentation to implicit instantiations. 129 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 130 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 131 return {}; 132 } 133 134 if (const auto *VD = dyn_cast<VarDecl>(D)) { 135 if (VD->isStaticDataMember() && 136 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 137 return {}; 138 } 139 140 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 141 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 142 return {}; 143 } 144 145 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 146 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 147 if (TSK == TSK_ImplicitInstantiation || 148 TSK == TSK_Undeclared) 149 return {}; 150 } 151 152 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 153 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 154 return {}; 155 } 156 if (const auto *TD = dyn_cast<TagDecl>(D)) { 157 // When tag declaration (but not definition!) is part of the 158 // decl-specifier-seq of some other declaration, it doesn't get comment 159 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 160 return {}; 161 } 162 // TODO: handle comments for function parameters properly. 163 if (isa<ParmVarDecl>(D)) 164 return {}; 165 166 // TODO: we could look up template parameter documentation in the template 167 // documentation. 168 if (isa<TemplateTypeParmDecl>(D) || 169 isa<NonTypeTemplateParmDecl>(D) || 170 isa<TemplateTemplateParmDecl>(D)) 171 return {}; 172 173 SmallVector<SourceLocation, 2> Locations; 174 // Find declaration location. 175 // For Objective-C declarations we generally don't expect to have multiple 176 // declarators, thus use declaration starting location as the "declaration 177 // location". 178 // For all other declarations multiple declarators are used quite frequently, 179 // so we use the location of the identifier as the "declaration location". 180 SourceLocation BaseLocation; 181 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 182 isa<ObjCPropertyDecl>(D) || isa<RedeclarableTemplateDecl>(D) || 183 isa<ClassTemplateSpecializationDecl>(D) || 184 // Allow association with Y across {} in `typedef struct X {} Y`. 185 isa<TypedefDecl>(D)) 186 BaseLocation = D->getBeginLoc(); 187 else 188 BaseLocation = D->getLocation(); 189 190 if (!D->getLocation().isMacroID()) { 191 Locations.emplace_back(BaseLocation); 192 } else { 193 const auto *DeclCtx = D->getDeclContext(); 194 195 // When encountering definitions generated from a macro (that are not 196 // contained by another declaration in the macro) we need to try and find 197 // the comment at the location of the expansion but if there is no comment 198 // there we should retry to see if there is a comment inside the macro as 199 // well. To this end we return first BaseLocation to first look at the 200 // expansion site, the second value is the spelling location of the 201 // beginning of the declaration defined inside the macro. 202 if (!(DeclCtx && 203 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) { 204 Locations.emplace_back(SourceMgr.getExpansionLoc(BaseLocation)); 205 } 206 207 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that 208 // we don't refer to the macro argument location at the expansion site (this 209 // can happen if the name's spelling is provided via macro argument), and 210 // always to the declaration itself. 211 Locations.emplace_back(SourceMgr.getSpellingLoc(D->getBeginLoc())); 212 } 213 214 return Locations; 215 } 216 217 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 218 const Decl *D, const SourceLocation RepresentativeLocForDecl, 219 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 220 // If the declaration doesn't map directly to a location in a file, we 221 // can't find the comment. 222 if (RepresentativeLocForDecl.isInvalid() || 223 !RepresentativeLocForDecl.isFileID()) 224 return nullptr; 225 226 // If there are no comments anywhere, we won't find anything. 227 if (CommentsInTheFile.empty()) 228 return nullptr; 229 230 // Decompose the location for the declaration and find the beginning of the 231 // file buffer. 232 const std::pair<FileID, unsigned> DeclLocDecomp = 233 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 234 235 // Slow path. 236 auto OffsetCommentBehindDecl = 237 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 238 239 // First check whether we have a trailing comment. 240 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 241 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 242 if ((CommentBehindDecl->isDocumentation() || 243 LangOpts.CommentOpts.ParseAllComments) && 244 CommentBehindDecl->isTrailingComment() && 245 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 246 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 247 248 // Check that Doxygen trailing comment comes after the declaration, starts 249 // on the same line and in the same file as the declaration. 250 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 251 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 252 OffsetCommentBehindDecl->first)) { 253 return CommentBehindDecl; 254 } 255 } 256 } 257 258 // The comment just after the declaration was not a trailing comment. 259 // Let's look at the previous comment. 260 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 261 return nullptr; 262 263 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 264 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 265 266 // Check that we actually have a non-member Doxygen comment. 267 if (!(CommentBeforeDecl->isDocumentation() || 268 LangOpts.CommentOpts.ParseAllComments) || 269 CommentBeforeDecl->isTrailingComment()) 270 return nullptr; 271 272 // Decompose the end of the comment. 273 const unsigned CommentEndOffset = 274 Comments.getCommentEndOffset(CommentBeforeDecl); 275 276 // Get the corresponding buffer. 277 bool Invalid = false; 278 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 279 &Invalid).data(); 280 if (Invalid) 281 return nullptr; 282 283 // Extract text between the comment and declaration. 284 StringRef Text(Buffer + CommentEndOffset, 285 DeclLocDecomp.second - CommentEndOffset); 286 287 // There should be no other declarations or preprocessor directives between 288 // comment and declaration. 289 if (Text.find_last_of(";{}#@") != StringRef::npos) 290 return nullptr; 291 292 return CommentBeforeDecl; 293 } 294 295 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 296 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr); 297 298 for (const auto DeclLoc : DeclLocs) { 299 // If the declaration doesn't map directly to a location in a file, we 300 // can't find the comment. 301 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 302 continue; 303 304 if (ExternalSource && !CommentsLoaded) { 305 ExternalSource->ReadComments(); 306 CommentsLoaded = true; 307 } 308 309 if (Comments.empty()) 310 continue; 311 312 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 313 if (!File.isValid()) 314 continue; 315 316 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 317 if (!CommentsInThisFile || CommentsInThisFile->empty()) 318 continue; 319 320 if (RawComment *Comment = 321 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) 322 return Comment; 323 } 324 325 return nullptr; 326 } 327 328 void ASTContext::addComment(const RawComment &RC) { 329 assert(LangOpts.RetainCommentsFromSystemHeaders || 330 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 331 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 332 } 333 334 /// If we have a 'templated' declaration for a template, adjust 'D' to 335 /// refer to the actual template. 336 /// If we have an implicit instantiation, adjust 'D' to refer to template. 337 static const Decl &adjustDeclToTemplate(const Decl &D) { 338 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 339 // Is this function declaration part of a function template? 340 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 341 return *FTD; 342 343 // Nothing to do if function is not an implicit instantiation. 344 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 345 return D; 346 347 // Function is an implicit instantiation of a function template? 348 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 349 return *FTD; 350 351 // Function is instantiated from a member definition of a class template? 352 if (const FunctionDecl *MemberDecl = 353 FD->getInstantiatedFromMemberFunction()) 354 return *MemberDecl; 355 356 return D; 357 } 358 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 359 // Static data member is instantiated from a member definition of a class 360 // template? 361 if (VD->isStaticDataMember()) 362 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 363 return *MemberDecl; 364 365 return D; 366 } 367 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 368 // Is this class declaration part of a class template? 369 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 370 return *CTD; 371 372 // Class is an implicit instantiation of a class template or partial 373 // specialization? 374 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 375 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 376 return D; 377 llvm::PointerUnion<ClassTemplateDecl *, 378 ClassTemplatePartialSpecializationDecl *> 379 PU = CTSD->getSpecializedTemplateOrPartial(); 380 return PU.is<ClassTemplateDecl *>() 381 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 382 : *static_cast<const Decl *>( 383 PU.get<ClassTemplatePartialSpecializationDecl *>()); 384 } 385 386 // Class is instantiated from a member definition of a class template? 387 if (const MemberSpecializationInfo *Info = 388 CRD->getMemberSpecializationInfo()) 389 return *Info->getInstantiatedFrom(); 390 391 return D; 392 } 393 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 394 // Enum is instantiated from a member definition of a class template? 395 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 396 return *MemberDecl; 397 398 return D; 399 } 400 // FIXME: Adjust alias templates? 401 return D; 402 } 403 404 const RawComment *ASTContext::getRawCommentForAnyRedecl( 405 const Decl *D, 406 const Decl **OriginalDecl) const { 407 if (!D) { 408 if (OriginalDecl) 409 OriginalDecl = nullptr; 410 return nullptr; 411 } 412 413 D = &adjustDeclToTemplate(*D); 414 415 // Any comment directly attached to D? 416 { 417 auto DeclComment = DeclRawComments.find(D); 418 if (DeclComment != DeclRawComments.end()) { 419 if (OriginalDecl) 420 *OriginalDecl = D; 421 return DeclComment->second; 422 } 423 } 424 425 // Any comment attached to any redeclaration of D? 426 const Decl *CanonicalD = D->getCanonicalDecl(); 427 if (!CanonicalD) 428 return nullptr; 429 430 { 431 auto RedeclComment = RedeclChainComments.find(CanonicalD); 432 if (RedeclComment != RedeclChainComments.end()) { 433 if (OriginalDecl) 434 *OriginalDecl = RedeclComment->second; 435 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 436 assert(CommentAtRedecl != DeclRawComments.end() && 437 "This decl is supposed to have comment attached."); 438 return CommentAtRedecl->second; 439 } 440 } 441 442 // Any redeclarations of D that we haven't checked for comments yet? 443 // We can't use DenseMap::iterator directly since it'd get invalid. 444 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 445 return CommentlessRedeclChains.lookup(CanonicalD); 446 }(); 447 448 for (const auto Redecl : D->redecls()) { 449 assert(Redecl); 450 // Skip all redeclarations that have been checked previously. 451 if (LastCheckedRedecl) { 452 if (LastCheckedRedecl == Redecl) { 453 LastCheckedRedecl = nullptr; 454 } 455 continue; 456 } 457 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 458 if (RedeclComment) { 459 cacheRawCommentForDecl(*Redecl, *RedeclComment); 460 if (OriginalDecl) 461 *OriginalDecl = Redecl; 462 return RedeclComment; 463 } 464 CommentlessRedeclChains[CanonicalD] = Redecl; 465 } 466 467 if (OriginalDecl) 468 *OriginalDecl = nullptr; 469 return nullptr; 470 } 471 472 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 473 const RawComment &Comment) const { 474 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 475 DeclRawComments.try_emplace(&OriginalD, &Comment); 476 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 477 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 478 CommentlessRedeclChains.erase(CanonicalDecl); 479 } 480 481 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 482 SmallVectorImpl<const NamedDecl *> &Redeclared) { 483 const DeclContext *DC = ObjCMethod->getDeclContext(); 484 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 485 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 486 if (!ID) 487 return; 488 // Add redeclared method here. 489 for (const auto *Ext : ID->known_extensions()) { 490 if (ObjCMethodDecl *RedeclaredMethod = 491 Ext->getMethod(ObjCMethod->getSelector(), 492 ObjCMethod->isInstanceMethod())) 493 Redeclared.push_back(RedeclaredMethod); 494 } 495 } 496 } 497 498 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 499 const Preprocessor *PP) { 500 if (Comments.empty() || Decls.empty()) 501 return; 502 503 FileID File; 504 for (const Decl *D : Decls) { 505 if (D->isInvalidDecl()) 506 continue; 507 508 D = &adjustDeclToTemplate(*D); 509 SourceLocation Loc = D->getLocation(); 510 if (Loc.isValid()) { 511 // See if there are any new comments that are not attached to a decl. 512 // The location doesn't have to be precise - we care only about the file. 513 File = SourceMgr.getDecomposedLoc(Loc).first; 514 break; 515 } 516 } 517 518 if (File.isInvalid()) 519 return; 520 521 auto CommentsInThisFile = Comments.getCommentsInFile(File); 522 if (!CommentsInThisFile || CommentsInThisFile->empty() || 523 CommentsInThisFile->rbegin()->second->isAttached()) 524 return; 525 526 // There is at least one comment not attached to a decl. 527 // Maybe it should be attached to one of Decls? 528 // 529 // Note that this way we pick up not only comments that precede the 530 // declaration, but also comments that *follow* the declaration -- thanks to 531 // the lookahead in the lexer: we've consumed the semicolon and looked 532 // ahead through comments. 533 for (const Decl *D : Decls) { 534 assert(D); 535 if (D->isInvalidDecl()) 536 continue; 537 538 D = &adjustDeclToTemplate(*D); 539 540 if (DeclRawComments.count(D) > 0) 541 continue; 542 543 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr); 544 545 for (const auto DeclLoc : DeclLocs) { 546 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 547 continue; 548 549 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl( 550 D, DeclLoc, *CommentsInThisFile)) { 551 cacheRawCommentForDecl(*D, *DocComment); 552 comments::FullComment *FC = DocComment->parse(*this, PP, D); 553 ParsedComments[D->getCanonicalDecl()] = FC; 554 break; 555 } 556 } 557 } 558 } 559 560 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 561 const Decl *D) const { 562 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 563 ThisDeclInfo->CommentDecl = D; 564 ThisDeclInfo->IsFilled = false; 565 ThisDeclInfo->fill(); 566 ThisDeclInfo->CommentDecl = FC->getDecl(); 567 if (!ThisDeclInfo->TemplateParameters) 568 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 569 comments::FullComment *CFC = 570 new (*this) comments::FullComment(FC->getBlocks(), 571 ThisDeclInfo); 572 return CFC; 573 } 574 575 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 576 const RawComment *RC = getRawCommentForDeclNoCache(D); 577 return RC ? RC->parse(*this, nullptr, D) : nullptr; 578 } 579 580 comments::FullComment *ASTContext::getCommentForDecl( 581 const Decl *D, 582 const Preprocessor *PP) const { 583 if (!D || D->isInvalidDecl()) 584 return nullptr; 585 D = &adjustDeclToTemplate(*D); 586 587 const Decl *Canonical = D->getCanonicalDecl(); 588 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 589 ParsedComments.find(Canonical); 590 591 if (Pos != ParsedComments.end()) { 592 if (Canonical != D) { 593 comments::FullComment *FC = Pos->second; 594 comments::FullComment *CFC = cloneFullComment(FC, D); 595 return CFC; 596 } 597 return Pos->second; 598 } 599 600 const Decl *OriginalDecl = nullptr; 601 602 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 603 if (!RC) { 604 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 605 SmallVector<const NamedDecl*, 8> Overridden; 606 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 607 if (OMD && OMD->isPropertyAccessor()) 608 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 609 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 610 return cloneFullComment(FC, D); 611 if (OMD) 612 addRedeclaredMethods(OMD, Overridden); 613 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 614 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 615 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 616 return cloneFullComment(FC, D); 617 } 618 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 619 // Attach any tag type's documentation to its typedef if latter 620 // does not have one of its own. 621 QualType QT = TD->getUnderlyingType(); 622 if (const auto *TT = QT->getAs<TagType>()) 623 if (const Decl *TD = TT->getDecl()) 624 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 625 return cloneFullComment(FC, D); 626 } 627 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 628 while (IC->getSuperClass()) { 629 IC = IC->getSuperClass(); 630 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 631 return cloneFullComment(FC, D); 632 } 633 } 634 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 635 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 636 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 637 return cloneFullComment(FC, D); 638 } 639 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 640 if (!(RD = RD->getDefinition())) 641 return nullptr; 642 // Check non-virtual bases. 643 for (const auto &I : RD->bases()) { 644 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 645 continue; 646 QualType Ty = I.getType(); 647 if (Ty.isNull()) 648 continue; 649 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 650 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 651 continue; 652 653 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 654 return cloneFullComment(FC, D); 655 } 656 } 657 // Check virtual bases. 658 for (const auto &I : RD->vbases()) { 659 if (I.getAccessSpecifier() != AS_public) 660 continue; 661 QualType Ty = I.getType(); 662 if (Ty.isNull()) 663 continue; 664 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 665 if (!(VirtualBase= VirtualBase->getDefinition())) 666 continue; 667 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 668 return cloneFullComment(FC, D); 669 } 670 } 671 } 672 return nullptr; 673 } 674 675 // If the RawComment was attached to other redeclaration of this Decl, we 676 // should parse the comment in context of that other Decl. This is important 677 // because comments can contain references to parameter names which can be 678 // different across redeclarations. 679 if (D != OriginalDecl && OriginalDecl) 680 return getCommentForDecl(OriginalDecl, PP); 681 682 comments::FullComment *FC = RC->parse(*this, PP, D); 683 ParsedComments[Canonical] = FC; 684 return FC; 685 } 686 687 void 688 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 689 const ASTContext &C, 690 TemplateTemplateParmDecl *Parm) { 691 ID.AddInteger(Parm->getDepth()); 692 ID.AddInteger(Parm->getPosition()); 693 ID.AddBoolean(Parm->isParameterPack()); 694 695 TemplateParameterList *Params = Parm->getTemplateParameters(); 696 ID.AddInteger(Params->size()); 697 for (TemplateParameterList::const_iterator P = Params->begin(), 698 PEnd = Params->end(); 699 P != PEnd; ++P) { 700 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 701 ID.AddInteger(0); 702 ID.AddBoolean(TTP->isParameterPack()); 703 if (TTP->isExpandedParameterPack()) { 704 ID.AddBoolean(true); 705 ID.AddInteger(TTP->getNumExpansionParameters()); 706 } else 707 ID.AddBoolean(false); 708 continue; 709 } 710 711 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 712 ID.AddInteger(1); 713 ID.AddBoolean(NTTP->isParameterPack()); 714 ID.AddPointer(C.getUnconstrainedType(C.getCanonicalType(NTTP->getType())) 715 .getAsOpaquePtr()); 716 if (NTTP->isExpandedParameterPack()) { 717 ID.AddBoolean(true); 718 ID.AddInteger(NTTP->getNumExpansionTypes()); 719 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 720 QualType T = NTTP->getExpansionType(I); 721 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 722 } 723 } else 724 ID.AddBoolean(false); 725 continue; 726 } 727 728 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 729 ID.AddInteger(2); 730 Profile(ID, C, TTP); 731 } 732 } 733 734 TemplateTemplateParmDecl * 735 ASTContext::getCanonicalTemplateTemplateParmDecl( 736 TemplateTemplateParmDecl *TTP) const { 737 // Check if we already have a canonical template template parameter. 738 llvm::FoldingSetNodeID ID; 739 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 740 void *InsertPos = nullptr; 741 CanonicalTemplateTemplateParm *Canonical 742 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 743 if (Canonical) 744 return Canonical->getParam(); 745 746 // Build a canonical template parameter list. 747 TemplateParameterList *Params = TTP->getTemplateParameters(); 748 SmallVector<NamedDecl *, 4> CanonParams; 749 CanonParams.reserve(Params->size()); 750 for (TemplateParameterList::const_iterator P = Params->begin(), 751 PEnd = Params->end(); 752 P != PEnd; ++P) { 753 // Note that, per C++20 [temp.over.link]/6, when determining whether 754 // template-parameters are equivalent, constraints are ignored. 755 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 756 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create( 757 *this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 758 TTP->getDepth(), TTP->getIndex(), nullptr, false, 759 TTP->isParameterPack(), /*HasTypeConstraint=*/false, 760 TTP->isExpandedParameterPack() 761 ? std::optional<unsigned>(TTP->getNumExpansionParameters()) 762 : std::nullopt); 763 CanonParams.push_back(NewTTP); 764 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 765 QualType T = getUnconstrainedType(getCanonicalType(NTTP->getType())); 766 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 767 NonTypeTemplateParmDecl *Param; 768 if (NTTP->isExpandedParameterPack()) { 769 SmallVector<QualType, 2> ExpandedTypes; 770 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 771 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 772 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 773 ExpandedTInfos.push_back( 774 getTrivialTypeSourceInfo(ExpandedTypes.back())); 775 } 776 777 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 778 SourceLocation(), 779 SourceLocation(), 780 NTTP->getDepth(), 781 NTTP->getPosition(), nullptr, 782 T, 783 TInfo, 784 ExpandedTypes, 785 ExpandedTInfos); 786 } else { 787 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 788 SourceLocation(), 789 SourceLocation(), 790 NTTP->getDepth(), 791 NTTP->getPosition(), nullptr, 792 T, 793 NTTP->isParameterPack(), 794 TInfo); 795 } 796 CanonParams.push_back(Param); 797 } else 798 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 799 cast<TemplateTemplateParmDecl>(*P))); 800 } 801 802 TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create( 803 *this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(), 804 TTP->getPosition(), TTP->isParameterPack(), nullptr, /*Typename=*/false, 805 TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(), 806 CanonParams, SourceLocation(), 807 /*RequiresClause=*/nullptr)); 808 809 // Get the new insert position for the node we care about. 810 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 811 assert(!Canonical && "Shouldn't be in the map!"); 812 (void)Canonical; 813 814 // Create the canonical template template parameter entry. 815 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 816 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 817 return CanonTTP; 818 } 819 820 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 821 auto Kind = getTargetInfo().getCXXABI().getKind(); 822 return getLangOpts().CXXABI.value_or(Kind); 823 } 824 825 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 826 if (!LangOpts.CPlusPlus) return nullptr; 827 828 switch (getCXXABIKind()) { 829 case TargetCXXABI::AppleARM64: 830 case TargetCXXABI::Fuchsia: 831 case TargetCXXABI::GenericARM: // Same as Itanium at this level 832 case TargetCXXABI::iOS: 833 case TargetCXXABI::WatchOS: 834 case TargetCXXABI::GenericAArch64: 835 case TargetCXXABI::GenericMIPS: 836 case TargetCXXABI::GenericItanium: 837 case TargetCXXABI::WebAssembly: 838 case TargetCXXABI::XL: 839 return CreateItaniumCXXABI(*this); 840 case TargetCXXABI::Microsoft: 841 return CreateMicrosoftCXXABI(*this); 842 } 843 llvm_unreachable("Invalid CXXABI type!"); 844 } 845 846 interp::Context &ASTContext::getInterpContext() { 847 if (!InterpContext) { 848 InterpContext.reset(new interp::Context(*this)); 849 } 850 return *InterpContext.get(); 851 } 852 853 ParentMapContext &ASTContext::getParentMapContext() { 854 if (!ParentMapCtx) 855 ParentMapCtx.reset(new ParentMapContext(*this)); 856 return *ParentMapCtx.get(); 857 } 858 859 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 860 const LangOptions &LangOpts) { 861 switch (LangOpts.getAddressSpaceMapMangling()) { 862 case LangOptions::ASMM_Target: 863 return TI.useAddressSpaceMapMangling(); 864 case LangOptions::ASMM_On: 865 return true; 866 case LangOptions::ASMM_Off: 867 return false; 868 } 869 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 870 } 871 872 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 873 IdentifierTable &idents, SelectorTable &sels, 874 Builtin::Context &builtins, TranslationUnitKind TUKind) 875 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize), 876 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()), 877 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()), 878 DependentSizedMatrixTypes(this_()), 879 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize), 880 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()), 881 TemplateSpecializationTypes(this_()), 882 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 883 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()), 884 ArrayParameterTypes(this_()), CanonTemplateTemplateParms(this_()), 885 SourceMgr(SM), LangOpts(LOpts), 886 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 887 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 888 LangOpts.XRayNeverInstrumentFiles, 889 LangOpts.XRayAttrListFiles, SM)), 890 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 891 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 892 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 893 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 894 CompCategories(this_()), LastSDM(nullptr, 0) { 895 addTranslationUnitDecl(); 896 } 897 898 void ASTContext::cleanup() { 899 // Release the DenseMaps associated with DeclContext objects. 900 // FIXME: Is this the ideal solution? 901 ReleaseDeclContextMaps(); 902 903 // Call all of the deallocation functions on all of their targets. 904 for (auto &Pair : Deallocations) 905 (Pair.first)(Pair.second); 906 Deallocations.clear(); 907 908 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 909 // because they can contain DenseMaps. 910 for (llvm::DenseMap<const ObjCContainerDecl*, 911 const ASTRecordLayout*>::iterator 912 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 913 // Increment in loop to prevent using deallocated memory. 914 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 915 R->Destroy(*this); 916 ObjCLayouts.clear(); 917 918 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 919 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 920 // Increment in loop to prevent using deallocated memory. 921 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 922 R->Destroy(*this); 923 } 924 ASTRecordLayouts.clear(); 925 926 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 927 AEnd = DeclAttrs.end(); 928 A != AEnd; ++A) 929 A->second->~AttrVec(); 930 DeclAttrs.clear(); 931 932 for (const auto &Value : ModuleInitializers) 933 Value.second->~PerModuleInitializers(); 934 ModuleInitializers.clear(); 935 } 936 937 ASTContext::~ASTContext() { cleanup(); } 938 939 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 940 TraversalScope = TopLevelDecls; 941 getParentMapContext().clear(); 942 } 943 944 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 945 Deallocations.push_back({Callback, Data}); 946 } 947 948 void 949 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 950 ExternalSource = std::move(Source); 951 } 952 953 void ASTContext::PrintStats() const { 954 llvm::errs() << "\n*** AST Context Stats:\n"; 955 llvm::errs() << " " << Types.size() << " types total.\n"; 956 957 unsigned counts[] = { 958 #define TYPE(Name, Parent) 0, 959 #define ABSTRACT_TYPE(Name, Parent) 960 #include "clang/AST/TypeNodes.inc" 961 0 // Extra 962 }; 963 964 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 965 Type *T = Types[i]; 966 counts[(unsigned)T->getTypeClass()]++; 967 } 968 969 unsigned Idx = 0; 970 unsigned TotalBytes = 0; 971 #define TYPE(Name, Parent) \ 972 if (counts[Idx]) \ 973 llvm::errs() << " " << counts[Idx] << " " << #Name \ 974 << " types, " << sizeof(Name##Type) << " each " \ 975 << "(" << counts[Idx] * sizeof(Name##Type) \ 976 << " bytes)\n"; \ 977 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 978 ++Idx; 979 #define ABSTRACT_TYPE(Name, Parent) 980 #include "clang/AST/TypeNodes.inc" 981 982 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 983 984 // Implicit special member functions. 985 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 986 << NumImplicitDefaultConstructors 987 << " implicit default constructors created\n"; 988 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 989 << NumImplicitCopyConstructors 990 << " implicit copy constructors created\n"; 991 if (getLangOpts().CPlusPlus) 992 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 993 << NumImplicitMoveConstructors 994 << " implicit move constructors created\n"; 995 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 996 << NumImplicitCopyAssignmentOperators 997 << " implicit copy assignment operators created\n"; 998 if (getLangOpts().CPlusPlus) 999 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 1000 << NumImplicitMoveAssignmentOperators 1001 << " implicit move assignment operators created\n"; 1002 llvm::errs() << NumImplicitDestructorsDeclared << "/" 1003 << NumImplicitDestructors 1004 << " implicit destructors created\n"; 1005 1006 if (ExternalSource) { 1007 llvm::errs() << "\n"; 1008 ExternalSource->PrintStats(); 1009 } 1010 1011 BumpAlloc.PrintStats(); 1012 } 1013 1014 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1015 bool NotifyListeners) { 1016 if (NotifyListeners) 1017 if (auto *Listener = getASTMutationListener()) 1018 Listener->RedefinedHiddenDefinition(ND, M); 1019 1020 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1021 } 1022 1023 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1024 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1025 if (It == MergedDefModules.end()) 1026 return; 1027 1028 auto &Merged = It->second; 1029 llvm::DenseSet<Module*> Found; 1030 for (Module *&M : Merged) 1031 if (!Found.insert(M).second) 1032 M = nullptr; 1033 llvm::erase(Merged, nullptr); 1034 } 1035 1036 ArrayRef<Module *> 1037 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1038 auto MergedIt = 1039 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1040 if (MergedIt == MergedDefModules.end()) 1041 return std::nullopt; 1042 return MergedIt->second; 1043 } 1044 1045 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1046 if (LazyInitializers.empty()) 1047 return; 1048 1049 auto *Source = Ctx.getExternalSource(); 1050 assert(Source && "lazy initializers but no external source"); 1051 1052 auto LazyInits = std::move(LazyInitializers); 1053 LazyInitializers.clear(); 1054 1055 for (auto ID : LazyInits) 1056 Initializers.push_back(Source->GetExternalDecl(ID)); 1057 1058 assert(LazyInitializers.empty() && 1059 "GetExternalDecl for lazy module initializer added more inits"); 1060 } 1061 1062 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1063 // One special case: if we add a module initializer that imports another 1064 // module, and that module's only initializer is an ImportDecl, simplify. 1065 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1066 auto It = ModuleInitializers.find(ID->getImportedModule()); 1067 1068 // Maybe the ImportDecl does nothing at all. (Common case.) 1069 if (It == ModuleInitializers.end()) 1070 return; 1071 1072 // Maybe the ImportDecl only imports another ImportDecl. 1073 auto &Imported = *It->second; 1074 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1075 Imported.resolve(*this); 1076 auto *OnlyDecl = Imported.Initializers.front(); 1077 if (isa<ImportDecl>(OnlyDecl)) 1078 D = OnlyDecl; 1079 } 1080 } 1081 1082 auto *&Inits = ModuleInitializers[M]; 1083 if (!Inits) 1084 Inits = new (*this) PerModuleInitializers; 1085 Inits->Initializers.push_back(D); 1086 } 1087 1088 void ASTContext::addLazyModuleInitializers(Module *M, 1089 ArrayRef<GlobalDeclID> IDs) { 1090 auto *&Inits = ModuleInitializers[M]; 1091 if (!Inits) 1092 Inits = new (*this) PerModuleInitializers; 1093 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1094 IDs.begin(), IDs.end()); 1095 } 1096 1097 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1098 auto It = ModuleInitializers.find(M); 1099 if (It == ModuleInitializers.end()) 1100 return std::nullopt; 1101 1102 auto *Inits = It->second; 1103 Inits->resolve(*this); 1104 return Inits->Initializers; 1105 } 1106 1107 void ASTContext::setCurrentNamedModule(Module *M) { 1108 assert(M->isNamedModule()); 1109 assert(!CurrentCXXNamedModule && 1110 "We should set named module for ASTContext for only once"); 1111 CurrentCXXNamedModule = M; 1112 } 1113 1114 bool ASTContext::isInSameModule(const Module *M1, const Module *M2) { 1115 if (!M1 != !M2) 1116 return false; 1117 1118 /// Get the representative module for M. The representative module is the 1119 /// first module unit for a specific primary module name. So that the module 1120 /// units have the same representative module belongs to the same module. 1121 /// 1122 /// The process is helpful to reduce the expensive string operations. 1123 auto GetRepresentativeModule = [this](const Module *M) { 1124 auto Iter = SameModuleLookupSet.find(M); 1125 if (Iter != SameModuleLookupSet.end()) 1126 return Iter->second; 1127 1128 const Module *RepresentativeModule = 1129 PrimaryModuleNameMap.try_emplace(M->getPrimaryModuleInterfaceName(), M) 1130 .first->second; 1131 SameModuleLookupSet[M] = RepresentativeModule; 1132 return RepresentativeModule; 1133 }; 1134 1135 assert(M1 && "Shouldn't call `isInSameModule` if both M1 and M2 are none."); 1136 return GetRepresentativeModule(M1) == GetRepresentativeModule(M2); 1137 } 1138 1139 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1140 if (!ExternCContext) 1141 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1142 1143 return ExternCContext; 1144 } 1145 1146 BuiltinTemplateDecl * 1147 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1148 const IdentifierInfo *II) const { 1149 auto *BuiltinTemplate = 1150 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1151 BuiltinTemplate->setImplicit(); 1152 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1153 1154 return BuiltinTemplate; 1155 } 1156 1157 BuiltinTemplateDecl * 1158 ASTContext::getMakeIntegerSeqDecl() const { 1159 if (!MakeIntegerSeqDecl) 1160 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1161 getMakeIntegerSeqName()); 1162 return MakeIntegerSeqDecl; 1163 } 1164 1165 BuiltinTemplateDecl * 1166 ASTContext::getTypePackElementDecl() const { 1167 if (!TypePackElementDecl) 1168 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1169 getTypePackElementName()); 1170 return TypePackElementDecl; 1171 } 1172 1173 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1174 RecordDecl::TagKind TK) const { 1175 SourceLocation Loc; 1176 RecordDecl *NewDecl; 1177 if (getLangOpts().CPlusPlus) 1178 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1179 Loc, &Idents.get(Name)); 1180 else 1181 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1182 &Idents.get(Name)); 1183 NewDecl->setImplicit(); 1184 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1185 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1186 return NewDecl; 1187 } 1188 1189 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1190 StringRef Name) const { 1191 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1192 TypedefDecl *NewDecl = TypedefDecl::Create( 1193 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1194 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1195 NewDecl->setImplicit(); 1196 return NewDecl; 1197 } 1198 1199 TypedefDecl *ASTContext::getInt128Decl() const { 1200 if (!Int128Decl) 1201 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1202 return Int128Decl; 1203 } 1204 1205 TypedefDecl *ASTContext::getUInt128Decl() const { 1206 if (!UInt128Decl) 1207 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1208 return UInt128Decl; 1209 } 1210 1211 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1212 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K); 1213 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1214 Types.push_back(Ty); 1215 } 1216 1217 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1218 const TargetInfo *AuxTarget) { 1219 assert((!this->Target || this->Target == &Target) && 1220 "Incorrect target reinitialization"); 1221 assert(VoidTy.isNull() && "Context reinitialized?"); 1222 1223 this->Target = &Target; 1224 this->AuxTarget = AuxTarget; 1225 1226 ABI.reset(createCXXABI(Target)); 1227 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1228 1229 // C99 6.2.5p19. 1230 InitBuiltinType(VoidTy, BuiltinType::Void); 1231 1232 // C99 6.2.5p2. 1233 InitBuiltinType(BoolTy, BuiltinType::Bool); 1234 // C99 6.2.5p3. 1235 if (LangOpts.CharIsSigned) 1236 InitBuiltinType(CharTy, BuiltinType::Char_S); 1237 else 1238 InitBuiltinType(CharTy, BuiltinType::Char_U); 1239 // C99 6.2.5p4. 1240 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1241 InitBuiltinType(ShortTy, BuiltinType::Short); 1242 InitBuiltinType(IntTy, BuiltinType::Int); 1243 InitBuiltinType(LongTy, BuiltinType::Long); 1244 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1245 1246 // C99 6.2.5p6. 1247 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1248 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1249 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1250 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1251 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1252 1253 // C99 6.2.5p10. 1254 InitBuiltinType(FloatTy, BuiltinType::Float); 1255 InitBuiltinType(DoubleTy, BuiltinType::Double); 1256 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1257 1258 // GNU extension, __float128 for IEEE quadruple precision 1259 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1260 1261 // __ibm128 for IBM extended precision 1262 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); 1263 1264 // C11 extension ISO/IEC TS 18661-3 1265 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1266 1267 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1268 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1269 InitBuiltinType(AccumTy, BuiltinType::Accum); 1270 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1271 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1272 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1273 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1274 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1275 InitBuiltinType(FractTy, BuiltinType::Fract); 1276 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1277 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1278 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1279 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1280 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1281 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1282 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1283 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1284 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1285 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1286 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1287 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1288 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1289 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1290 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1291 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1292 1293 // GNU extension, 128-bit integers. 1294 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1295 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1296 1297 // C++ 3.9.1p5 1298 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1299 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1300 else // -fshort-wchar makes wchar_t be unsigned. 1301 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1302 if (LangOpts.CPlusPlus && LangOpts.WChar) 1303 WideCharTy = WCharTy; 1304 else { 1305 // C99 (or C++ using -fno-wchar). 1306 WideCharTy = getFromTargetType(Target.getWCharType()); 1307 } 1308 1309 WIntTy = getFromTargetType(Target.getWIntType()); 1310 1311 // C++20 (proposed) 1312 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1313 1314 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1315 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1316 else // C99 1317 Char16Ty = getFromTargetType(Target.getChar16Type()); 1318 1319 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1320 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1321 else // C99 1322 Char32Ty = getFromTargetType(Target.getChar32Type()); 1323 1324 // Placeholder type for type-dependent expressions whose type is 1325 // completely unknown. No code should ever check a type against 1326 // DependentTy and users should never see it; however, it is here to 1327 // help diagnose failures to properly check for type-dependent 1328 // expressions. 1329 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1330 1331 // Placeholder type for functions. 1332 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1333 1334 // Placeholder type for bound members. 1335 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1336 1337 // Placeholder type for unresolved templates. 1338 InitBuiltinType(UnresolvedTemplateTy, BuiltinType::UnresolvedTemplate); 1339 1340 // Placeholder type for pseudo-objects. 1341 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1342 1343 // "any" type; useful for debugger-like clients. 1344 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1345 1346 // Placeholder type for unbridged ARC casts. 1347 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1348 1349 // Placeholder type for builtin functions. 1350 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1351 1352 // Placeholder type for OMP array sections. 1353 if (LangOpts.OpenMP) { 1354 InitBuiltinType(ArraySectionTy, BuiltinType::ArraySection); 1355 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1356 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1357 } 1358 // Placeholder type for OpenACC array sections, if we are ALSO in OMP mode, 1359 // don't bother, as we're just using the same type as OMP. 1360 if (LangOpts.OpenACC && !LangOpts.OpenMP) { 1361 InitBuiltinType(ArraySectionTy, BuiltinType::ArraySection); 1362 } 1363 if (LangOpts.MatrixTypes) 1364 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1365 1366 // Builtin types for 'id', 'Class', and 'SEL'. 1367 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1368 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1369 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1370 1371 if (LangOpts.OpenCL) { 1372 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1373 InitBuiltinType(SingletonId, BuiltinType::Id); 1374 #include "clang/Basic/OpenCLImageTypes.def" 1375 1376 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1377 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1378 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1379 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1380 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1381 1382 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1383 InitBuiltinType(Id##Ty, BuiltinType::Id); 1384 #include "clang/Basic/OpenCLExtensionTypes.def" 1385 } 1386 1387 if (Target.hasAArch64SVETypes() || 1388 (AuxTarget && AuxTarget->hasAArch64SVETypes())) { 1389 #define SVE_TYPE(Name, Id, SingletonId) \ 1390 InitBuiltinType(SingletonId, BuiltinType::Id); 1391 #include "clang/Basic/AArch64SVEACLETypes.def" 1392 } 1393 1394 if (Target.getTriple().isPPC64()) { 1395 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1396 InitBuiltinType(Id##Ty, BuiltinType::Id); 1397 #include "clang/Basic/PPCTypes.def" 1398 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1399 InitBuiltinType(Id##Ty, BuiltinType::Id); 1400 #include "clang/Basic/PPCTypes.def" 1401 } 1402 1403 if (Target.hasRISCVVTypes()) { 1404 #define RVV_TYPE(Name, Id, SingletonId) \ 1405 InitBuiltinType(SingletonId, BuiltinType::Id); 1406 #include "clang/Basic/RISCVVTypes.def" 1407 } 1408 1409 if (Target.getTriple().isWasm() && Target.hasFeature("reference-types")) { 1410 #define WASM_TYPE(Name, Id, SingletonId) \ 1411 InitBuiltinType(SingletonId, BuiltinType::Id); 1412 #include "clang/Basic/WebAssemblyReferenceTypes.def" 1413 } 1414 1415 if (Target.getTriple().isAMDGPU() || 1416 (AuxTarget && AuxTarget->getTriple().isAMDGPU())) { 1417 #define AMDGPU_TYPE(Name, Id, SingletonId) \ 1418 InitBuiltinType(SingletonId, BuiltinType::Id); 1419 #include "clang/Basic/AMDGPUTypes.def" 1420 } 1421 1422 // Builtin type for __objc_yes and __objc_no 1423 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1424 SignedCharTy : BoolTy); 1425 1426 ObjCConstantStringType = QualType(); 1427 1428 ObjCSuperType = QualType(); 1429 1430 // void * type 1431 if (LangOpts.OpenCLGenericAddressSpace) { 1432 auto Q = VoidTy.getQualifiers(); 1433 Q.setAddressSpace(LangAS::opencl_generic); 1434 VoidPtrTy = getPointerType(getCanonicalType( 1435 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1436 } else { 1437 VoidPtrTy = getPointerType(VoidTy); 1438 } 1439 1440 // nullptr type (C++0x 2.14.7) 1441 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1442 1443 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1444 InitBuiltinType(HalfTy, BuiltinType::Half); 1445 1446 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1447 1448 // Builtin type used to help define __builtin_va_list. 1449 VaListTagDecl = nullptr; 1450 1451 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1452 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1453 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1454 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1455 } 1456 } 1457 1458 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1459 return SourceMgr.getDiagnostics(); 1460 } 1461 1462 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1463 AttrVec *&Result = DeclAttrs[D]; 1464 if (!Result) { 1465 void *Mem = Allocate(sizeof(AttrVec)); 1466 Result = new (Mem) AttrVec; 1467 } 1468 1469 return *Result; 1470 } 1471 1472 /// Erase the attributes corresponding to the given declaration. 1473 void ASTContext::eraseDeclAttrs(const Decl *D) { 1474 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1475 if (Pos != DeclAttrs.end()) { 1476 Pos->second->~AttrVec(); 1477 DeclAttrs.erase(Pos); 1478 } 1479 } 1480 1481 // FIXME: Remove ? 1482 MemberSpecializationInfo * 1483 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1484 assert(Var->isStaticDataMember() && "Not a static data member"); 1485 return getTemplateOrSpecializationInfo(Var) 1486 .dyn_cast<MemberSpecializationInfo *>(); 1487 } 1488 1489 ASTContext::TemplateOrSpecializationInfo 1490 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1491 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1492 TemplateOrInstantiation.find(Var); 1493 if (Pos == TemplateOrInstantiation.end()) 1494 return {}; 1495 1496 return Pos->second; 1497 } 1498 1499 void 1500 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1501 TemplateSpecializationKind TSK, 1502 SourceLocation PointOfInstantiation) { 1503 assert(Inst->isStaticDataMember() && "Not a static data member"); 1504 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1505 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1506 Tmpl, TSK, PointOfInstantiation)); 1507 } 1508 1509 void 1510 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1511 TemplateOrSpecializationInfo TSI) { 1512 assert(!TemplateOrInstantiation[Inst] && 1513 "Already noted what the variable was instantiated from"); 1514 TemplateOrInstantiation[Inst] = TSI; 1515 } 1516 1517 NamedDecl * 1518 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1519 return InstantiatedFromUsingDecl.lookup(UUD); 1520 } 1521 1522 void 1523 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1524 assert((isa<UsingDecl>(Pattern) || 1525 isa<UnresolvedUsingValueDecl>(Pattern) || 1526 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1527 "pattern decl is not a using decl"); 1528 assert((isa<UsingDecl>(Inst) || 1529 isa<UnresolvedUsingValueDecl>(Inst) || 1530 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1531 "instantiation did not produce a using decl"); 1532 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1533 InstantiatedFromUsingDecl[Inst] = Pattern; 1534 } 1535 1536 UsingEnumDecl * 1537 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1538 return InstantiatedFromUsingEnumDecl.lookup(UUD); 1539 } 1540 1541 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1542 UsingEnumDecl *Pattern) { 1543 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1544 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1545 } 1546 1547 UsingShadowDecl * 1548 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1549 return InstantiatedFromUsingShadowDecl.lookup(Inst); 1550 } 1551 1552 void 1553 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1554 UsingShadowDecl *Pattern) { 1555 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1556 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1557 } 1558 1559 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1560 return InstantiatedFromUnnamedFieldDecl.lookup(Field); 1561 } 1562 1563 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1564 FieldDecl *Tmpl) { 1565 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1566 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1567 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1568 "Already noted what unnamed field was instantiated from"); 1569 1570 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1571 } 1572 1573 ASTContext::overridden_cxx_method_iterator 1574 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1575 return overridden_methods(Method).begin(); 1576 } 1577 1578 ASTContext::overridden_cxx_method_iterator 1579 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1580 return overridden_methods(Method).end(); 1581 } 1582 1583 unsigned 1584 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1585 auto Range = overridden_methods(Method); 1586 return Range.end() - Range.begin(); 1587 } 1588 1589 ASTContext::overridden_method_range 1590 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1591 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1592 OverriddenMethods.find(Method->getCanonicalDecl()); 1593 if (Pos == OverriddenMethods.end()) 1594 return overridden_method_range(nullptr, nullptr); 1595 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1596 } 1597 1598 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1599 const CXXMethodDecl *Overridden) { 1600 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1601 OverriddenMethods[Method].push_back(Overridden); 1602 } 1603 1604 void ASTContext::getOverriddenMethods( 1605 const NamedDecl *D, 1606 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1607 assert(D); 1608 1609 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1610 Overridden.append(overridden_methods_begin(CXXMethod), 1611 overridden_methods_end(CXXMethod)); 1612 return; 1613 } 1614 1615 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1616 if (!Method) 1617 return; 1618 1619 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1620 Method->getOverriddenMethods(OverDecls); 1621 Overridden.append(OverDecls.begin(), OverDecls.end()); 1622 } 1623 1624 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1625 assert(!Import->getNextLocalImport() && 1626 "Import declaration already in the chain"); 1627 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1628 if (!FirstLocalImport) { 1629 FirstLocalImport = Import; 1630 LastLocalImport = Import; 1631 return; 1632 } 1633 1634 LastLocalImport->setNextLocalImport(Import); 1635 LastLocalImport = Import; 1636 } 1637 1638 //===----------------------------------------------------------------------===// 1639 // Type Sizing and Analysis 1640 //===----------------------------------------------------------------------===// 1641 1642 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1643 /// scalar floating point type. 1644 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1645 switch (T->castAs<BuiltinType>()->getKind()) { 1646 default: 1647 llvm_unreachable("Not a floating point type!"); 1648 case BuiltinType::BFloat16: 1649 return Target->getBFloat16Format(); 1650 case BuiltinType::Float16: 1651 return Target->getHalfFormat(); 1652 case BuiltinType::Half: 1653 return Target->getHalfFormat(); 1654 case BuiltinType::Float: return Target->getFloatFormat(); 1655 case BuiltinType::Double: return Target->getDoubleFormat(); 1656 case BuiltinType::Ibm128: 1657 return Target->getIbm128Format(); 1658 case BuiltinType::LongDouble: 1659 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) 1660 return AuxTarget->getLongDoubleFormat(); 1661 return Target->getLongDoubleFormat(); 1662 case BuiltinType::Float128: 1663 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) 1664 return AuxTarget->getFloat128Format(); 1665 return Target->getFloat128Format(); 1666 } 1667 } 1668 1669 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1670 unsigned Align = Target->getCharWidth(); 1671 1672 const unsigned AlignFromAttr = D->getMaxAlignment(); 1673 if (AlignFromAttr) 1674 Align = AlignFromAttr; 1675 1676 // __attribute__((aligned)) can increase or decrease alignment 1677 // *except* on a struct or struct member, where it only increases 1678 // alignment unless 'packed' is also specified. 1679 // 1680 // It is an error for alignas to decrease alignment, so we can 1681 // ignore that possibility; Sema should diagnose it. 1682 bool UseAlignAttrOnly; 1683 if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) 1684 UseAlignAttrOnly = 1685 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>(); 1686 else 1687 UseAlignAttrOnly = AlignFromAttr != 0; 1688 // If we're using the align attribute only, just ignore everything 1689 // else about the declaration and its type. 1690 if (UseAlignAttrOnly) { 1691 // do nothing 1692 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1693 QualType T = VD->getType(); 1694 if (const auto *RT = T->getAs<ReferenceType>()) { 1695 if (ForAlignof) 1696 T = RT->getPointeeType(); 1697 else 1698 T = getPointerType(RT->getPointeeType()); 1699 } 1700 QualType BaseT = getBaseElementType(T); 1701 if (T->isFunctionType()) 1702 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1703 else if (!BaseT->isIncompleteType()) { 1704 // Adjust alignments of declarations with array type by the 1705 // large-array alignment on the target. 1706 if (const ArrayType *arrayType = getAsArrayType(T)) { 1707 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1708 if (!ForAlignof && MinWidth) { 1709 if (isa<VariableArrayType>(arrayType)) 1710 Align = std::max(Align, Target->getLargeArrayAlign()); 1711 else if (isa<ConstantArrayType>(arrayType) && 1712 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1713 Align = std::max(Align, Target->getLargeArrayAlign()); 1714 } 1715 } 1716 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1717 if (BaseT.getQualifiers().hasUnaligned()) 1718 Align = Target->getCharWidth(); 1719 } 1720 1721 // Ensure miminum alignment for global variables. 1722 if (const auto *VD = dyn_cast<VarDecl>(D)) 1723 if (VD->hasGlobalStorage() && !ForAlignof) { 1724 uint64_t TypeSize = 1725 !BaseT->isIncompleteType() ? getTypeSize(T.getTypePtr()) : 0; 1726 Align = std::max(Align, getMinGlobalAlignOfVar(TypeSize, VD)); 1727 } 1728 1729 // Fields can be subject to extra alignment constraints, like if 1730 // the field is packed, the struct is packed, or the struct has a 1731 // a max-field-alignment constraint (#pragma pack). So calculate 1732 // the actual alignment of the field within the struct, and then 1733 // (as we're expected to) constrain that by the alignment of the type. 1734 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1735 const RecordDecl *Parent = Field->getParent(); 1736 // We can only produce a sensible answer if the record is valid. 1737 if (!Parent->isInvalidDecl()) { 1738 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1739 1740 // Start with the record's overall alignment. 1741 unsigned FieldAlign = toBits(Layout.getAlignment()); 1742 1743 // Use the GCD of that and the offset within the record. 1744 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1745 if (Offset > 0) { 1746 // Alignment is always a power of 2, so the GCD will be a power of 2, 1747 // which means we get to do this crazy thing instead of Euclid's. 1748 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1749 if (LowBitOfOffset < FieldAlign) 1750 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1751 } 1752 1753 Align = std::min(Align, FieldAlign); 1754 } 1755 } 1756 } 1757 1758 // Some targets have hard limitation on the maximum requestable alignment in 1759 // aligned attribute for static variables. 1760 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1761 const auto *VD = dyn_cast<VarDecl>(D); 1762 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1763 Align = std::min(Align, MaxAlignedAttr); 1764 1765 return toCharUnitsFromBits(Align); 1766 } 1767 1768 CharUnits ASTContext::getExnObjectAlignment() const { 1769 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1770 } 1771 1772 // getTypeInfoDataSizeInChars - Return the size of a type, in 1773 // chars. If the type is a record, its data size is returned. This is 1774 // the size of the memcpy that's performed when assigning this type 1775 // using a trivial copy/move assignment operator. 1776 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1777 TypeInfoChars Info = getTypeInfoInChars(T); 1778 1779 // In C++, objects can sometimes be allocated into the tail padding 1780 // of a base-class subobject. We decide whether that's possible 1781 // during class layout, so here we can just trust the layout results. 1782 if (getLangOpts().CPlusPlus) { 1783 if (const auto *RT = T->getAs<RecordType>(); 1784 RT && !RT->getDecl()->isInvalidDecl()) { 1785 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1786 Info.Width = layout.getDataSize(); 1787 } 1788 } 1789 1790 return Info; 1791 } 1792 1793 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1794 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1795 TypeInfoChars 1796 static getConstantArrayInfoInChars(const ASTContext &Context, 1797 const ConstantArrayType *CAT) { 1798 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1799 uint64_t Size = CAT->getZExtSize(); 1800 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1801 (uint64_t)(-1)/Size) && 1802 "Overflow in array type char size evaluation"); 1803 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1804 unsigned Align = EltInfo.Align.getQuantity(); 1805 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1806 Context.getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1807 Width = llvm::alignTo(Width, Align); 1808 return TypeInfoChars(CharUnits::fromQuantity(Width), 1809 CharUnits::fromQuantity(Align), 1810 EltInfo.AlignRequirement); 1811 } 1812 1813 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1814 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1815 return getConstantArrayInfoInChars(*this, CAT); 1816 TypeInfo Info = getTypeInfo(T); 1817 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1818 toCharUnitsFromBits(Info.Align), Info.AlignRequirement); 1819 } 1820 1821 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1822 return getTypeInfoInChars(T.getTypePtr()); 1823 } 1824 1825 bool ASTContext::isPromotableIntegerType(QualType T) const { 1826 // HLSL doesn't promote all small integer types to int, it 1827 // just uses the rank-based promotion rules for all types. 1828 if (getLangOpts().HLSL) 1829 return false; 1830 1831 if (const auto *BT = T->getAs<BuiltinType>()) 1832 switch (BT->getKind()) { 1833 case BuiltinType::Bool: 1834 case BuiltinType::Char_S: 1835 case BuiltinType::Char_U: 1836 case BuiltinType::SChar: 1837 case BuiltinType::UChar: 1838 case BuiltinType::Short: 1839 case BuiltinType::UShort: 1840 case BuiltinType::WChar_S: 1841 case BuiltinType::WChar_U: 1842 case BuiltinType::Char8: 1843 case BuiltinType::Char16: 1844 case BuiltinType::Char32: 1845 return true; 1846 default: 1847 return false; 1848 } 1849 1850 // Enumerated types are promotable to their compatible integer types 1851 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). 1852 if (const auto *ET = T->getAs<EnumType>()) { 1853 if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() || 1854 ET->getDecl()->isScoped()) 1855 return false; 1856 1857 return true; 1858 } 1859 1860 return false; 1861 } 1862 1863 bool ASTContext::isAlignmentRequired(const Type *T) const { 1864 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; 1865 } 1866 1867 bool ASTContext::isAlignmentRequired(QualType T) const { 1868 return isAlignmentRequired(T.getTypePtr()); 1869 } 1870 1871 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1872 bool NeedsPreferredAlignment) const { 1873 // An alignment on a typedef overrides anything else. 1874 if (const auto *TT = T->getAs<TypedefType>()) 1875 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1876 return Align; 1877 1878 // If we have an (array of) complete type, we're done. 1879 T = getBaseElementType(T); 1880 if (!T->isIncompleteType()) 1881 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1882 1883 // If we had an array type, its element type might be a typedef 1884 // type with an alignment attribute. 1885 if (const auto *TT = T->getAs<TypedefType>()) 1886 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1887 return Align; 1888 1889 // Otherwise, see if the declaration of the type had an attribute. 1890 if (const auto *TT = T->getAs<TagType>()) 1891 return TT->getDecl()->getMaxAlignment(); 1892 1893 return 0; 1894 } 1895 1896 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1897 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1898 if (I != MemoizedTypeInfo.end()) 1899 return I->second; 1900 1901 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1902 TypeInfo TI = getTypeInfoImpl(T); 1903 MemoizedTypeInfo[T] = TI; 1904 return TI; 1905 } 1906 1907 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1908 /// method does not work on incomplete types. 1909 /// 1910 /// FIXME: Pointers into different addr spaces could have different sizes and 1911 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1912 /// should take a QualType, &c. 1913 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1914 uint64_t Width = 0; 1915 unsigned Align = 8; 1916 AlignRequirementKind AlignRequirement = AlignRequirementKind::None; 1917 LangAS AS = LangAS::Default; 1918 switch (T->getTypeClass()) { 1919 #define TYPE(Class, Base) 1920 #define ABSTRACT_TYPE(Class, Base) 1921 #define NON_CANONICAL_TYPE(Class, Base) 1922 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1923 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1924 case Type::Class: \ 1925 assert(!T->isDependentType() && "should not see dependent types here"); \ 1926 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1927 #include "clang/AST/TypeNodes.inc" 1928 llvm_unreachable("Should not see dependent types"); 1929 1930 case Type::FunctionNoProto: 1931 case Type::FunctionProto: 1932 // GCC extension: alignof(function) = 32 bits 1933 Width = 0; 1934 Align = 32; 1935 break; 1936 1937 case Type::IncompleteArray: 1938 case Type::VariableArray: 1939 case Type::ConstantArray: 1940 case Type::ArrayParameter: { 1941 // Model non-constant sized arrays as size zero, but track the alignment. 1942 uint64_t Size = 0; 1943 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1944 Size = CAT->getZExtSize(); 1945 1946 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1947 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1948 "Overflow in array type bit size evaluation"); 1949 Width = EltInfo.Width * Size; 1950 Align = EltInfo.Align; 1951 AlignRequirement = EltInfo.AlignRequirement; 1952 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1953 getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1954 Width = llvm::alignTo(Width, Align); 1955 break; 1956 } 1957 1958 case Type::ExtVector: 1959 case Type::Vector: { 1960 const auto *VT = cast<VectorType>(T); 1961 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 1962 Width = VT->isExtVectorBoolType() ? VT->getNumElements() 1963 : EltInfo.Width * VT->getNumElements(); 1964 // Enforce at least byte size and alignment. 1965 Width = std::max<unsigned>(8, Width); 1966 Align = std::max<unsigned>(8, Width); 1967 1968 // If the alignment is not a power of 2, round up to the next power of 2. 1969 // This happens for non-power-of-2 length vectors. 1970 if (Align & (Align-1)) { 1971 Align = llvm::bit_ceil(Align); 1972 Width = llvm::alignTo(Width, Align); 1973 } 1974 // Adjust the alignment based on the target max. 1975 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 1976 if (TargetVectorAlign && TargetVectorAlign < Align) 1977 Align = TargetVectorAlign; 1978 if (VT->getVectorKind() == VectorKind::SveFixedLengthData) 1979 // Adjust the alignment for fixed-length SVE vectors. This is important 1980 // for non-power-of-2 vector lengths. 1981 Align = 128; 1982 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) 1983 // Adjust the alignment for fixed-length SVE predicates. 1984 Align = 16; 1985 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData || 1986 VT->getVectorKind() == VectorKind::RVVFixedLengthMask) 1987 // Adjust the alignment for fixed-length RVV vectors. 1988 Align = std::min<unsigned>(64, Width); 1989 break; 1990 } 1991 1992 case Type::ConstantMatrix: { 1993 const auto *MT = cast<ConstantMatrixType>(T); 1994 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 1995 // The internal layout of a matrix value is implementation defined. 1996 // Initially be ABI compatible with arrays with respect to alignment and 1997 // size. 1998 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 1999 Align = ElementInfo.Align; 2000 break; 2001 } 2002 2003 case Type::Builtin: 2004 switch (cast<BuiltinType>(T)->getKind()) { 2005 default: llvm_unreachable("Unknown builtin type!"); 2006 case BuiltinType::Void: 2007 // GCC extension: alignof(void) = 8 bits. 2008 Width = 0; 2009 Align = 8; 2010 break; 2011 case BuiltinType::Bool: 2012 Width = Target->getBoolWidth(); 2013 Align = Target->getBoolAlign(); 2014 break; 2015 case BuiltinType::Char_S: 2016 case BuiltinType::Char_U: 2017 case BuiltinType::UChar: 2018 case BuiltinType::SChar: 2019 case BuiltinType::Char8: 2020 Width = Target->getCharWidth(); 2021 Align = Target->getCharAlign(); 2022 break; 2023 case BuiltinType::WChar_S: 2024 case BuiltinType::WChar_U: 2025 Width = Target->getWCharWidth(); 2026 Align = Target->getWCharAlign(); 2027 break; 2028 case BuiltinType::Char16: 2029 Width = Target->getChar16Width(); 2030 Align = Target->getChar16Align(); 2031 break; 2032 case BuiltinType::Char32: 2033 Width = Target->getChar32Width(); 2034 Align = Target->getChar32Align(); 2035 break; 2036 case BuiltinType::UShort: 2037 case BuiltinType::Short: 2038 Width = Target->getShortWidth(); 2039 Align = Target->getShortAlign(); 2040 break; 2041 case BuiltinType::UInt: 2042 case BuiltinType::Int: 2043 Width = Target->getIntWidth(); 2044 Align = Target->getIntAlign(); 2045 break; 2046 case BuiltinType::ULong: 2047 case BuiltinType::Long: 2048 Width = Target->getLongWidth(); 2049 Align = Target->getLongAlign(); 2050 break; 2051 case BuiltinType::ULongLong: 2052 case BuiltinType::LongLong: 2053 Width = Target->getLongLongWidth(); 2054 Align = Target->getLongLongAlign(); 2055 break; 2056 case BuiltinType::Int128: 2057 case BuiltinType::UInt128: 2058 Width = 128; 2059 Align = Target->getInt128Align(); 2060 break; 2061 case BuiltinType::ShortAccum: 2062 case BuiltinType::UShortAccum: 2063 case BuiltinType::SatShortAccum: 2064 case BuiltinType::SatUShortAccum: 2065 Width = Target->getShortAccumWidth(); 2066 Align = Target->getShortAccumAlign(); 2067 break; 2068 case BuiltinType::Accum: 2069 case BuiltinType::UAccum: 2070 case BuiltinType::SatAccum: 2071 case BuiltinType::SatUAccum: 2072 Width = Target->getAccumWidth(); 2073 Align = Target->getAccumAlign(); 2074 break; 2075 case BuiltinType::LongAccum: 2076 case BuiltinType::ULongAccum: 2077 case BuiltinType::SatLongAccum: 2078 case BuiltinType::SatULongAccum: 2079 Width = Target->getLongAccumWidth(); 2080 Align = Target->getLongAccumAlign(); 2081 break; 2082 case BuiltinType::ShortFract: 2083 case BuiltinType::UShortFract: 2084 case BuiltinType::SatShortFract: 2085 case BuiltinType::SatUShortFract: 2086 Width = Target->getShortFractWidth(); 2087 Align = Target->getShortFractAlign(); 2088 break; 2089 case BuiltinType::Fract: 2090 case BuiltinType::UFract: 2091 case BuiltinType::SatFract: 2092 case BuiltinType::SatUFract: 2093 Width = Target->getFractWidth(); 2094 Align = Target->getFractAlign(); 2095 break; 2096 case BuiltinType::LongFract: 2097 case BuiltinType::ULongFract: 2098 case BuiltinType::SatLongFract: 2099 case BuiltinType::SatULongFract: 2100 Width = Target->getLongFractWidth(); 2101 Align = Target->getLongFractAlign(); 2102 break; 2103 case BuiltinType::BFloat16: 2104 if (Target->hasBFloat16Type()) { 2105 Width = Target->getBFloat16Width(); 2106 Align = Target->getBFloat16Align(); 2107 } else if ((getLangOpts().SYCLIsDevice || 2108 (getLangOpts().OpenMP && 2109 getLangOpts().OpenMPIsTargetDevice)) && 2110 AuxTarget->hasBFloat16Type()) { 2111 Width = AuxTarget->getBFloat16Width(); 2112 Align = AuxTarget->getBFloat16Align(); 2113 } 2114 break; 2115 case BuiltinType::Float16: 2116 case BuiltinType::Half: 2117 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2118 !getLangOpts().OpenMPIsTargetDevice) { 2119 Width = Target->getHalfWidth(); 2120 Align = Target->getHalfAlign(); 2121 } else { 2122 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2123 "Expected OpenMP device compilation."); 2124 Width = AuxTarget->getHalfWidth(); 2125 Align = AuxTarget->getHalfAlign(); 2126 } 2127 break; 2128 case BuiltinType::Float: 2129 Width = Target->getFloatWidth(); 2130 Align = Target->getFloatAlign(); 2131 break; 2132 case BuiltinType::Double: 2133 Width = Target->getDoubleWidth(); 2134 Align = Target->getDoubleAlign(); 2135 break; 2136 case BuiltinType::Ibm128: 2137 Width = Target->getIbm128Width(); 2138 Align = Target->getIbm128Align(); 2139 break; 2140 case BuiltinType::LongDouble: 2141 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2142 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2143 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2144 Width = AuxTarget->getLongDoubleWidth(); 2145 Align = AuxTarget->getLongDoubleAlign(); 2146 } else { 2147 Width = Target->getLongDoubleWidth(); 2148 Align = Target->getLongDoubleAlign(); 2149 } 2150 break; 2151 case BuiltinType::Float128: 2152 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2153 !getLangOpts().OpenMPIsTargetDevice) { 2154 Width = Target->getFloat128Width(); 2155 Align = Target->getFloat128Align(); 2156 } else { 2157 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2158 "Expected OpenMP device compilation."); 2159 Width = AuxTarget->getFloat128Width(); 2160 Align = AuxTarget->getFloat128Align(); 2161 } 2162 break; 2163 case BuiltinType::NullPtr: 2164 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*) 2165 Width = Target->getPointerWidth(LangAS::Default); 2166 Align = Target->getPointerAlign(LangAS::Default); 2167 break; 2168 case BuiltinType::ObjCId: 2169 case BuiltinType::ObjCClass: 2170 case BuiltinType::ObjCSel: 2171 Width = Target->getPointerWidth(LangAS::Default); 2172 Align = Target->getPointerAlign(LangAS::Default); 2173 break; 2174 case BuiltinType::OCLSampler: 2175 case BuiltinType::OCLEvent: 2176 case BuiltinType::OCLClkEvent: 2177 case BuiltinType::OCLQueue: 2178 case BuiltinType::OCLReserveID: 2179 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2180 case BuiltinType::Id: 2181 #include "clang/Basic/OpenCLImageTypes.def" 2182 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2183 case BuiltinType::Id: 2184 #include "clang/Basic/OpenCLExtensionTypes.def" 2185 AS = Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 2186 Width = Target->getPointerWidth(AS); 2187 Align = Target->getPointerAlign(AS); 2188 break; 2189 // The SVE types are effectively target-specific. The length of an 2190 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2191 // of 128 bits. There is one predicate bit for each vector byte, so the 2192 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2193 // 2194 // Because the length is only known at runtime, we use a dummy value 2195 // of 0 for the static length. The alignment values are those defined 2196 // by the Procedure Call Standard for the Arm Architecture. 2197 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2198 IsSigned, IsFP, IsBF) \ 2199 case BuiltinType::Id: \ 2200 Width = 0; \ 2201 Align = 128; \ 2202 break; 2203 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2204 case BuiltinType::Id: \ 2205 Width = 0; \ 2206 Align = 16; \ 2207 break; 2208 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \ 2209 case BuiltinType::Id: \ 2210 Width = 0; \ 2211 Align = 16; \ 2212 break; 2213 #include "clang/Basic/AArch64SVEACLETypes.def" 2214 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2215 case BuiltinType::Id: \ 2216 Width = Size; \ 2217 Align = Size; \ 2218 break; 2219 #include "clang/Basic/PPCTypes.def" 2220 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2221 IsFP, IsBF) \ 2222 case BuiltinType::Id: \ 2223 Width = 0; \ 2224 Align = ElBits; \ 2225 break; 2226 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2227 case BuiltinType::Id: \ 2228 Width = 0; \ 2229 Align = 8; \ 2230 break; 2231 #include "clang/Basic/RISCVVTypes.def" 2232 #define WASM_TYPE(Name, Id, SingletonId) \ 2233 case BuiltinType::Id: \ 2234 Width = 0; \ 2235 Align = 8; \ 2236 break; 2237 #include "clang/Basic/WebAssemblyReferenceTypes.def" 2238 #define AMDGPU_OPAQUE_PTR_TYPE(NAME, MANGLEDNAME, AS, WIDTH, ALIGN, ID, \ 2239 SINGLETONID) \ 2240 case BuiltinType::ID: \ 2241 Width = WIDTH; \ 2242 Align = ALIGN; \ 2243 break; 2244 #include "clang/Basic/AMDGPUTypes.def" 2245 } 2246 break; 2247 case Type::ObjCObjectPointer: 2248 Width = Target->getPointerWidth(LangAS::Default); 2249 Align = Target->getPointerAlign(LangAS::Default); 2250 break; 2251 case Type::BlockPointer: 2252 AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace(); 2253 Width = Target->getPointerWidth(AS); 2254 Align = Target->getPointerAlign(AS); 2255 break; 2256 case Type::LValueReference: 2257 case Type::RValueReference: 2258 // alignof and sizeof should never enter this code path here, so we go 2259 // the pointer route. 2260 AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace(); 2261 Width = Target->getPointerWidth(AS); 2262 Align = Target->getPointerAlign(AS); 2263 break; 2264 case Type::Pointer: 2265 AS = cast<PointerType>(T)->getPointeeType().getAddressSpace(); 2266 Width = Target->getPointerWidth(AS); 2267 Align = Target->getPointerAlign(AS); 2268 break; 2269 case Type::MemberPointer: { 2270 const auto *MPT = cast<MemberPointerType>(T); 2271 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2272 Width = MPI.Width; 2273 Align = MPI.Align; 2274 break; 2275 } 2276 case Type::Complex: { 2277 // Complex types have the same alignment as their elements, but twice the 2278 // size. 2279 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2280 Width = EltInfo.Width * 2; 2281 Align = EltInfo.Align; 2282 break; 2283 } 2284 case Type::ObjCObject: 2285 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2286 case Type::Adjusted: 2287 case Type::Decayed: 2288 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2289 case Type::ObjCInterface: { 2290 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2291 if (ObjCI->getDecl()->isInvalidDecl()) { 2292 Width = 8; 2293 Align = 8; 2294 break; 2295 } 2296 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2297 Width = toBits(Layout.getSize()); 2298 Align = toBits(Layout.getAlignment()); 2299 break; 2300 } 2301 case Type::BitInt: { 2302 const auto *EIT = cast<BitIntType>(T); 2303 Align = Target->getBitIntAlign(EIT->getNumBits()); 2304 Width = Target->getBitIntWidth(EIT->getNumBits()); 2305 break; 2306 } 2307 case Type::Record: 2308 case Type::Enum: { 2309 const auto *TT = cast<TagType>(T); 2310 2311 if (TT->getDecl()->isInvalidDecl()) { 2312 Width = 8; 2313 Align = 8; 2314 break; 2315 } 2316 2317 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2318 const EnumDecl *ED = ET->getDecl(); 2319 TypeInfo Info = 2320 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2321 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2322 Info.Align = AttrAlign; 2323 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; 2324 } 2325 return Info; 2326 } 2327 2328 const auto *RT = cast<RecordType>(TT); 2329 const RecordDecl *RD = RT->getDecl(); 2330 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2331 Width = toBits(Layout.getSize()); 2332 Align = toBits(Layout.getAlignment()); 2333 AlignRequirement = RD->hasAttr<AlignedAttr>() 2334 ? AlignRequirementKind::RequiredByRecord 2335 : AlignRequirementKind::None; 2336 break; 2337 } 2338 2339 case Type::SubstTemplateTypeParm: 2340 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2341 getReplacementType().getTypePtr()); 2342 2343 case Type::Auto: 2344 case Type::DeducedTemplateSpecialization: { 2345 const auto *A = cast<DeducedType>(T); 2346 assert(!A->getDeducedType().isNull() && 2347 "cannot request the size of an undeduced or dependent auto type"); 2348 return getTypeInfo(A->getDeducedType().getTypePtr()); 2349 } 2350 2351 case Type::Paren: 2352 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2353 2354 case Type::MacroQualified: 2355 return getTypeInfo( 2356 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2357 2358 case Type::ObjCTypeParam: 2359 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2360 2361 case Type::Using: 2362 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr()); 2363 2364 case Type::Typedef: { 2365 const auto *TT = cast<TypedefType>(T); 2366 TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr()); 2367 // If the typedef has an aligned attribute on it, it overrides any computed 2368 // alignment we have. This violates the GCC documentation (which says that 2369 // attribute(aligned) can only round up) but matches its implementation. 2370 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) { 2371 Align = AttrAlign; 2372 AlignRequirement = AlignRequirementKind::RequiredByTypedef; 2373 } else { 2374 Align = Info.Align; 2375 AlignRequirement = Info.AlignRequirement; 2376 } 2377 Width = Info.Width; 2378 break; 2379 } 2380 2381 case Type::Elaborated: 2382 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2383 2384 case Type::Attributed: 2385 return getTypeInfo( 2386 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2387 2388 case Type::CountAttributed: 2389 return getTypeInfo(cast<CountAttributedType>(T)->desugar().getTypePtr()); 2390 2391 case Type::BTFTagAttributed: 2392 return getTypeInfo( 2393 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr()); 2394 2395 case Type::Atomic: { 2396 // Start with the base type information. 2397 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2398 Width = Info.Width; 2399 Align = Info.Align; 2400 2401 if (!Width) { 2402 // An otherwise zero-sized type should still generate an 2403 // atomic operation. 2404 Width = Target->getCharWidth(); 2405 assert(Align); 2406 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2407 // If the size of the type doesn't exceed the platform's max 2408 // atomic promotion width, make the size and alignment more 2409 // favorable to atomic operations: 2410 2411 // Round the size up to a power of 2. 2412 Width = llvm::bit_ceil(Width); 2413 2414 // Set the alignment equal to the size. 2415 Align = static_cast<unsigned>(Width); 2416 } 2417 } 2418 break; 2419 2420 case Type::Pipe: 2421 Width = Target->getPointerWidth(LangAS::opencl_global); 2422 Align = Target->getPointerAlign(LangAS::opencl_global); 2423 break; 2424 } 2425 2426 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2427 return TypeInfo(Width, Align, AlignRequirement); 2428 } 2429 2430 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2431 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2432 if (I != MemoizedUnadjustedAlign.end()) 2433 return I->second; 2434 2435 unsigned UnadjustedAlign; 2436 if (const auto *RT = T->getAs<RecordType>()) { 2437 const RecordDecl *RD = RT->getDecl(); 2438 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2439 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2440 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2441 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2442 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2443 } else { 2444 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2445 } 2446 2447 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2448 return UnadjustedAlign; 2449 } 2450 2451 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2452 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign( 2453 getTargetInfo().getTriple(), Target->getTargetOpts().FeatureMap); 2454 return SimdAlign; 2455 } 2456 2457 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2458 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2459 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2460 } 2461 2462 /// toBits - Convert a size in characters to a size in characters. 2463 int64_t ASTContext::toBits(CharUnits CharSize) const { 2464 return CharSize.getQuantity() * getCharWidth(); 2465 } 2466 2467 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2468 /// This method does not work on incomplete types. 2469 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2470 return getTypeInfoInChars(T).Width; 2471 } 2472 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2473 return getTypeInfoInChars(T).Width; 2474 } 2475 2476 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2477 /// characters. This method does not work on incomplete types. 2478 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2479 return toCharUnitsFromBits(getTypeAlign(T)); 2480 } 2481 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2482 return toCharUnitsFromBits(getTypeAlign(T)); 2483 } 2484 2485 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2486 /// type, in characters, before alignment adjustments. This method does 2487 /// not work on incomplete types. 2488 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2489 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2490 } 2491 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2492 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2493 } 2494 2495 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2496 /// type for the current target in bits. This can be different than the ABI 2497 /// alignment in cases where it is beneficial for performance or backwards 2498 /// compatibility preserving to overalign a data type. (Note: despite the name, 2499 /// the preferred alignment is ABI-impacting, and not an optimization.) 2500 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2501 TypeInfo TI = getTypeInfo(T); 2502 unsigned ABIAlign = TI.Align; 2503 2504 T = T->getBaseElementTypeUnsafe(); 2505 2506 // The preferred alignment of member pointers is that of a pointer. 2507 if (T->isMemberPointerType()) 2508 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2509 2510 if (!Target->allowsLargerPreferedTypeAlignment()) 2511 return ABIAlign; 2512 2513 if (const auto *RT = T->getAs<RecordType>()) { 2514 const RecordDecl *RD = RT->getDecl(); 2515 2516 // When used as part of a typedef, or together with a 'packed' attribute, 2517 // the 'aligned' attribute can be used to decrease alignment. Note that the 2518 // 'packed' case is already taken into consideration when computing the 2519 // alignment, we only need to handle the typedef case here. 2520 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || 2521 RD->isInvalidDecl()) 2522 return ABIAlign; 2523 2524 unsigned PreferredAlign = static_cast<unsigned>( 2525 toBits(getASTRecordLayout(RD).PreferredAlignment)); 2526 assert(PreferredAlign >= ABIAlign && 2527 "PreferredAlign should be at least as large as ABIAlign."); 2528 return PreferredAlign; 2529 } 2530 2531 // Double (and, for targets supporting AIX `power` alignment, long double) and 2532 // long long should be naturally aligned (despite requiring less alignment) if 2533 // possible. 2534 if (const auto *CT = T->getAs<ComplexType>()) 2535 T = CT->getElementType().getTypePtr(); 2536 if (const auto *ET = T->getAs<EnumType>()) 2537 T = ET->getDecl()->getIntegerType().getTypePtr(); 2538 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2539 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2540 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2541 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2542 Target->defaultsToAIXPowerAlignment())) 2543 // Don't increase the alignment if an alignment attribute was specified on a 2544 // typedef declaration. 2545 if (!TI.isAlignRequired()) 2546 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2547 2548 return ABIAlign; 2549 } 2550 2551 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2552 /// for __attribute__((aligned)) on this target, to be used if no alignment 2553 /// value is specified. 2554 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2555 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2556 } 2557 2558 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2559 /// to a global variable of the specified type. 2560 unsigned ASTContext::getAlignOfGlobalVar(QualType T, const VarDecl *VD) const { 2561 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2562 return std::max(getPreferredTypeAlign(T), 2563 getMinGlobalAlignOfVar(TypeSize, VD)); 2564 } 2565 2566 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2567 /// should be given to a global variable of the specified type. 2568 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T, 2569 const VarDecl *VD) const { 2570 return toCharUnitsFromBits(getAlignOfGlobalVar(T, VD)); 2571 } 2572 2573 unsigned ASTContext::getMinGlobalAlignOfVar(uint64_t Size, 2574 const VarDecl *VD) const { 2575 // Make the default handling as that of a non-weak definition in the 2576 // current translation unit. 2577 bool HasNonWeakDef = !VD || (VD->hasDefinition() && !VD->isWeak()); 2578 return getTargetInfo().getMinGlobalAlign(Size, HasNonWeakDef); 2579 } 2580 2581 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2582 CharUnits Offset = CharUnits::Zero(); 2583 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2584 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2585 Offset += Layout->getBaseClassOffset(Base); 2586 Layout = &getASTRecordLayout(Base); 2587 } 2588 return Offset; 2589 } 2590 2591 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2592 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2593 CharUnits ThisAdjustment = CharUnits::Zero(); 2594 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2595 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2596 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2597 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2598 const CXXRecordDecl *Base = RD; 2599 const CXXRecordDecl *Derived = Path[I]; 2600 if (DerivedMember) 2601 std::swap(Base, Derived); 2602 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2603 RD = Path[I]; 2604 } 2605 if (DerivedMember) 2606 ThisAdjustment = -ThisAdjustment; 2607 return ThisAdjustment; 2608 } 2609 2610 /// DeepCollectObjCIvars - 2611 /// This routine first collects all declared, but not synthesized, ivars in 2612 /// super class and then collects all ivars, including those synthesized for 2613 /// current class. This routine is used for implementation of current class 2614 /// when all ivars, declared and synthesized are known. 2615 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2616 bool leafClass, 2617 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2618 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2619 DeepCollectObjCIvars(SuperClass, false, Ivars); 2620 if (!leafClass) { 2621 llvm::append_range(Ivars, OI->ivars()); 2622 } else { 2623 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2624 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2625 Iv= Iv->getNextIvar()) 2626 Ivars.push_back(Iv); 2627 } 2628 } 2629 2630 /// CollectInheritedProtocols - Collect all protocols in current class and 2631 /// those inherited by it. 2632 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2633 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2634 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2635 // We can use protocol_iterator here instead of 2636 // all_referenced_protocol_iterator since we are walking all categories. 2637 for (auto *Proto : OI->all_referenced_protocols()) { 2638 CollectInheritedProtocols(Proto, Protocols); 2639 } 2640 2641 // Categories of this Interface. 2642 for (const auto *Cat : OI->visible_categories()) 2643 CollectInheritedProtocols(Cat, Protocols); 2644 2645 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2646 while (SD) { 2647 CollectInheritedProtocols(SD, Protocols); 2648 SD = SD->getSuperClass(); 2649 } 2650 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2651 for (auto *Proto : OC->protocols()) { 2652 CollectInheritedProtocols(Proto, Protocols); 2653 } 2654 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2655 // Insert the protocol. 2656 if (!Protocols.insert( 2657 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2658 return; 2659 2660 for (auto *Proto : OP->protocols()) 2661 CollectInheritedProtocols(Proto, Protocols); 2662 } 2663 } 2664 2665 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2666 const RecordDecl *RD, 2667 bool CheckIfTriviallyCopyable) { 2668 assert(RD->isUnion() && "Must be union type"); 2669 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2670 2671 for (const auto *Field : RD->fields()) { 2672 if (!Context.hasUniqueObjectRepresentations(Field->getType(), 2673 CheckIfTriviallyCopyable)) 2674 return false; 2675 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2676 if (FieldSize != UnionSize) 2677 return false; 2678 } 2679 return !RD->field_empty(); 2680 } 2681 2682 static int64_t getSubobjectOffset(const FieldDecl *Field, 2683 const ASTContext &Context, 2684 const clang::ASTRecordLayout & /*Layout*/) { 2685 return Context.getFieldOffset(Field); 2686 } 2687 2688 static int64_t getSubobjectOffset(const CXXRecordDecl *RD, 2689 const ASTContext &Context, 2690 const clang::ASTRecordLayout &Layout) { 2691 return Context.toBits(Layout.getBaseClassOffset(RD)); 2692 } 2693 2694 static std::optional<int64_t> 2695 structHasUniqueObjectRepresentations(const ASTContext &Context, 2696 const RecordDecl *RD, 2697 bool CheckIfTriviallyCopyable); 2698 2699 static std::optional<int64_t> 2700 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context, 2701 bool CheckIfTriviallyCopyable) { 2702 if (Field->getType()->isRecordType()) { 2703 const RecordDecl *RD = Field->getType()->getAsRecordDecl(); 2704 if (!RD->isUnion()) 2705 return structHasUniqueObjectRepresentations(Context, RD, 2706 CheckIfTriviallyCopyable); 2707 } 2708 2709 // A _BitInt type may not be unique if it has padding bits 2710 // but if it is a bitfield the padding bits are not used. 2711 bool IsBitIntType = Field->getType()->isBitIntType(); 2712 if (!Field->getType()->isReferenceType() && !IsBitIntType && 2713 !Context.hasUniqueObjectRepresentations(Field->getType(), 2714 CheckIfTriviallyCopyable)) 2715 return std::nullopt; 2716 2717 int64_t FieldSizeInBits = 2718 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2719 if (Field->isBitField()) { 2720 // If we have explicit padding bits, they don't contribute bits 2721 // to the actual object representation, so return 0. 2722 if (Field->isUnnamedBitField()) 2723 return 0; 2724 2725 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2726 if (IsBitIntType) { 2727 if ((unsigned)BitfieldSize > 2728 cast<BitIntType>(Field->getType())->getNumBits()) 2729 return std::nullopt; 2730 } else if (BitfieldSize > FieldSizeInBits) { 2731 return std::nullopt; 2732 } 2733 FieldSizeInBits = BitfieldSize; 2734 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations( 2735 Field->getType(), CheckIfTriviallyCopyable)) { 2736 return std::nullopt; 2737 } 2738 return FieldSizeInBits; 2739 } 2740 2741 static std::optional<int64_t> 2742 getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context, 2743 bool CheckIfTriviallyCopyable) { 2744 return structHasUniqueObjectRepresentations(Context, RD, 2745 CheckIfTriviallyCopyable); 2746 } 2747 2748 template <typename RangeT> 2749 static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( 2750 const RangeT &Subobjects, int64_t CurOffsetInBits, 2751 const ASTContext &Context, const clang::ASTRecordLayout &Layout, 2752 bool CheckIfTriviallyCopyable) { 2753 for (const auto *Subobject : Subobjects) { 2754 std::optional<int64_t> SizeInBits = 2755 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable); 2756 if (!SizeInBits) 2757 return std::nullopt; 2758 if (*SizeInBits != 0) { 2759 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); 2760 if (Offset != CurOffsetInBits) 2761 return std::nullopt; 2762 CurOffsetInBits += *SizeInBits; 2763 } 2764 } 2765 return CurOffsetInBits; 2766 } 2767 2768 static std::optional<int64_t> 2769 structHasUniqueObjectRepresentations(const ASTContext &Context, 2770 const RecordDecl *RD, 2771 bool CheckIfTriviallyCopyable) { 2772 assert(!RD->isUnion() && "Must be struct/class type"); 2773 const auto &Layout = Context.getASTRecordLayout(RD); 2774 2775 int64_t CurOffsetInBits = 0; 2776 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2777 if (ClassDecl->isDynamicClass()) 2778 return std::nullopt; 2779 2780 SmallVector<CXXRecordDecl *, 4> Bases; 2781 for (const auto &Base : ClassDecl->bases()) { 2782 // Empty types can be inherited from, and non-empty types can potentially 2783 // have tail padding, so just make sure there isn't an error. 2784 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); 2785 } 2786 2787 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { 2788 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); 2789 }); 2790 2791 std::optional<int64_t> OffsetAfterBases = 2792 structSubobjectsHaveUniqueObjectRepresentations( 2793 Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable); 2794 if (!OffsetAfterBases) 2795 return std::nullopt; 2796 CurOffsetInBits = *OffsetAfterBases; 2797 } 2798 2799 std::optional<int64_t> OffsetAfterFields = 2800 structSubobjectsHaveUniqueObjectRepresentations( 2801 RD->fields(), CurOffsetInBits, Context, Layout, 2802 CheckIfTriviallyCopyable); 2803 if (!OffsetAfterFields) 2804 return std::nullopt; 2805 CurOffsetInBits = *OffsetAfterFields; 2806 2807 return CurOffsetInBits; 2808 } 2809 2810 bool ASTContext::hasUniqueObjectRepresentations( 2811 QualType Ty, bool CheckIfTriviallyCopyable) const { 2812 // C++17 [meta.unary.prop]: 2813 // The predicate condition for a template specialization 2814 // has_unique_object_representations<T> shall be satisfied if and only if: 2815 // (9.1) - T is trivially copyable, and 2816 // (9.2) - any two objects of type T with the same value have the same 2817 // object representation, where: 2818 // - two objects of array or non-union class type are considered to have 2819 // the same value if their respective sequences of direct subobjects 2820 // have the same values, and 2821 // - two objects of union type are considered to have the same value if 2822 // they have the same active member and the corresponding members have 2823 // the same value. 2824 // The set of scalar types for which this condition holds is 2825 // implementation-defined. [ Note: If a type has padding bits, the condition 2826 // does not hold; otherwise, the condition holds true for unsigned integral 2827 // types. -- end note ] 2828 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2829 2830 // Arrays are unique only if their element type is unique. 2831 if (Ty->isArrayType()) 2832 return hasUniqueObjectRepresentations(getBaseElementType(Ty), 2833 CheckIfTriviallyCopyable); 2834 2835 assert((Ty->isVoidType() || !Ty->isIncompleteType()) && 2836 "hasUniqueObjectRepresentations should not be called with an " 2837 "incomplete type"); 2838 2839 // (9.1) - T is trivially copyable... 2840 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this)) 2841 return false; 2842 2843 // All integrals and enums are unique. 2844 if (Ty->isIntegralOrEnumerationType()) { 2845 // Except _BitInt types that have padding bits. 2846 if (const auto *BIT = Ty->getAs<BitIntType>()) 2847 return getTypeSize(BIT) == BIT->getNumBits(); 2848 2849 return true; 2850 } 2851 2852 // All other pointers are unique. 2853 if (Ty->isPointerType()) 2854 return true; 2855 2856 if (const auto *MPT = Ty->getAs<MemberPointerType>()) 2857 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2858 2859 if (Ty->isRecordType()) { 2860 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2861 2862 if (Record->isInvalidDecl()) 2863 return false; 2864 2865 if (Record->isUnion()) 2866 return unionHasUniqueObjectRepresentations(*this, Record, 2867 CheckIfTriviallyCopyable); 2868 2869 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations( 2870 *this, Record, CheckIfTriviallyCopyable); 2871 2872 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty)); 2873 } 2874 2875 // FIXME: More cases to handle here (list by rsmith): 2876 // vectors (careful about, eg, vector of 3 foo) 2877 // _Complex int and friends 2878 // _Atomic T 2879 // Obj-C block pointers 2880 // Obj-C object pointers 2881 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2882 // clk_event_t, queue_t, reserve_id_t) 2883 // There're also Obj-C class types and the Obj-C selector type, but I think it 2884 // makes sense for those to return false here. 2885 2886 return false; 2887 } 2888 2889 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2890 unsigned count = 0; 2891 // Count ivars declared in class extension. 2892 for (const auto *Ext : OI->known_extensions()) 2893 count += Ext->ivar_size(); 2894 2895 // Count ivar defined in this class's implementation. This 2896 // includes synthesized ivars. 2897 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2898 count += ImplDecl->ivar_size(); 2899 2900 return count; 2901 } 2902 2903 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2904 if (!E) 2905 return false; 2906 2907 // nullptr_t is always treated as null. 2908 if (E->getType()->isNullPtrType()) return true; 2909 2910 if (E->getType()->isAnyPointerType() && 2911 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2912 Expr::NPC_ValueDependentIsNull)) 2913 return true; 2914 2915 // Unfortunately, __null has type 'int'. 2916 if (isa<GNUNullExpr>(E)) return true; 2917 2918 return false; 2919 } 2920 2921 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2922 /// exists. 2923 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2924 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2925 I = ObjCImpls.find(D); 2926 if (I != ObjCImpls.end()) 2927 return cast<ObjCImplementationDecl>(I->second); 2928 return nullptr; 2929 } 2930 2931 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2932 /// exists. 2933 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2934 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2935 I = ObjCImpls.find(D); 2936 if (I != ObjCImpls.end()) 2937 return cast<ObjCCategoryImplDecl>(I->second); 2938 return nullptr; 2939 } 2940 2941 /// Set the implementation of ObjCInterfaceDecl. 2942 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2943 ObjCImplementationDecl *ImplD) { 2944 assert(IFaceD && ImplD && "Passed null params"); 2945 ObjCImpls[IFaceD] = ImplD; 2946 } 2947 2948 /// Set the implementation of ObjCCategoryDecl. 2949 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2950 ObjCCategoryImplDecl *ImplD) { 2951 assert(CatD && ImplD && "Passed null params"); 2952 ObjCImpls[CatD] = ImplD; 2953 } 2954 2955 const ObjCMethodDecl * 2956 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2957 return ObjCMethodRedecls.lookup(MD); 2958 } 2959 2960 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2961 const ObjCMethodDecl *Redecl) { 2962 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2963 ObjCMethodRedecls[MD] = Redecl; 2964 } 2965 2966 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2967 const NamedDecl *ND) const { 2968 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2969 return ID; 2970 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2971 return CD->getClassInterface(); 2972 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2973 return IMD->getClassInterface(); 2974 2975 return nullptr; 2976 } 2977 2978 /// Get the copy initialization expression of VarDecl, or nullptr if 2979 /// none exists. 2980 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2981 assert(VD && "Passed null params"); 2982 assert(VD->hasAttr<BlocksAttr>() && 2983 "getBlockVarCopyInits - not __block var"); 2984 auto I = BlockVarCopyInits.find(VD); 2985 if (I != BlockVarCopyInits.end()) 2986 return I->second; 2987 return {nullptr, false}; 2988 } 2989 2990 /// Set the copy initialization expression of a block var decl. 2991 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2992 bool CanThrow) { 2993 assert(VD && CopyExpr && "Passed null params"); 2994 assert(VD->hasAttr<BlocksAttr>() && 2995 "setBlockVarCopyInits - not __block var"); 2996 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2997 } 2998 2999 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 3000 unsigned DataSize) const { 3001 if (!DataSize) 3002 DataSize = TypeLoc::getFullDataSizeForType(T); 3003 else 3004 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 3005 "incorrect data size provided to CreateTypeSourceInfo!"); 3006 3007 auto *TInfo = 3008 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 3009 new (TInfo) TypeSourceInfo(T, DataSize); 3010 return TInfo; 3011 } 3012 3013 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 3014 SourceLocation L) const { 3015 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 3016 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 3017 return DI; 3018 } 3019 3020 const ASTRecordLayout & 3021 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 3022 return getObjCLayout(D, nullptr); 3023 } 3024 3025 const ASTRecordLayout & 3026 ASTContext::getASTObjCImplementationLayout( 3027 const ObjCImplementationDecl *D) const { 3028 return getObjCLayout(D->getClassInterface(), D); 3029 } 3030 3031 static auto getCanonicalTemplateArguments(const ASTContext &C, 3032 ArrayRef<TemplateArgument> Args, 3033 bool &AnyNonCanonArgs) { 3034 SmallVector<TemplateArgument, 16> CanonArgs(Args); 3035 for (auto &Arg : CanonArgs) { 3036 TemplateArgument OrigArg = Arg; 3037 Arg = C.getCanonicalTemplateArgument(Arg); 3038 AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg); 3039 } 3040 return CanonArgs; 3041 } 3042 3043 //===----------------------------------------------------------------------===// 3044 // Type creation/memoization methods 3045 //===----------------------------------------------------------------------===// 3046 3047 QualType 3048 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 3049 unsigned fastQuals = quals.getFastQualifiers(); 3050 quals.removeFastQualifiers(); 3051 3052 // Check if we've already instantiated this type. 3053 llvm::FoldingSetNodeID ID; 3054 ExtQuals::Profile(ID, baseType, quals); 3055 void *insertPos = nullptr; 3056 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 3057 assert(eq->getQualifiers() == quals); 3058 return QualType(eq, fastQuals); 3059 } 3060 3061 // If the base type is not canonical, make the appropriate canonical type. 3062 QualType canon; 3063 if (!baseType->isCanonicalUnqualified()) { 3064 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 3065 canonSplit.Quals.addConsistentQualifiers(quals); 3066 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 3067 3068 // Re-find the insert position. 3069 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 3070 } 3071 3072 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals); 3073 ExtQualNodes.InsertNode(eq, insertPos); 3074 return QualType(eq, fastQuals); 3075 } 3076 3077 QualType ASTContext::getAddrSpaceQualType(QualType T, 3078 LangAS AddressSpace) const { 3079 QualType CanT = getCanonicalType(T); 3080 if (CanT.getAddressSpace() == AddressSpace) 3081 return T; 3082 3083 // If we are composing extended qualifiers together, merge together 3084 // into one ExtQuals node. 3085 QualifierCollector Quals; 3086 const Type *TypeNode = Quals.strip(T); 3087 3088 // If this type already has an address space specified, it cannot get 3089 // another one. 3090 assert(!Quals.hasAddressSpace() && 3091 "Type cannot be in multiple addr spaces!"); 3092 Quals.addAddressSpace(AddressSpace); 3093 3094 return getExtQualType(TypeNode, Quals); 3095 } 3096 3097 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 3098 // If the type is not qualified with an address space, just return it 3099 // immediately. 3100 if (!T.hasAddressSpace()) 3101 return T; 3102 3103 QualifierCollector Quals; 3104 const Type *TypeNode; 3105 // For arrays, strip the qualifier off the element type, then reconstruct the 3106 // array type 3107 if (T.getTypePtr()->isArrayType()) { 3108 T = getUnqualifiedArrayType(T, Quals); 3109 TypeNode = T.getTypePtr(); 3110 } else { 3111 // If we are composing extended qualifiers together, merge together 3112 // into one ExtQuals node. 3113 while (T.hasAddressSpace()) { 3114 TypeNode = Quals.strip(T); 3115 3116 // If the type no longer has an address space after stripping qualifiers, 3117 // jump out. 3118 if (!QualType(TypeNode, 0).hasAddressSpace()) 3119 break; 3120 3121 // There might be sugar in the way. Strip it and try again. 3122 T = T.getSingleStepDesugaredType(*this); 3123 } 3124 } 3125 3126 Quals.removeAddressSpace(); 3127 3128 // Removal of the address space can mean there are no longer any 3129 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3130 // or required. 3131 if (Quals.hasNonFastQualifiers()) 3132 return getExtQualType(TypeNode, Quals); 3133 else 3134 return QualType(TypeNode, Quals.getFastQualifiers()); 3135 } 3136 3137 uint16_t 3138 ASTContext::getPointerAuthVTablePointerDiscriminator(const CXXRecordDecl *RD) { 3139 assert(RD->isPolymorphic() && 3140 "Attempted to get vtable pointer discriminator on a monomorphic type"); 3141 std::unique_ptr<MangleContext> MC(createMangleContext()); 3142 SmallString<256> Str; 3143 llvm::raw_svector_ostream Out(Str); 3144 MC->mangleCXXVTable(RD, Out); 3145 return llvm::getPointerAuthStableSipHash(Str); 3146 } 3147 3148 /// Encode a function type for use in the discriminator of a function pointer 3149 /// type. We can't use the itanium scheme for this since C has quite permissive 3150 /// rules for type compatibility that we need to be compatible with. 3151 /// 3152 /// Formally, this function associates every function pointer type T with an 3153 /// encoded string E(T). Let the equivalence relation T1 ~ T2 be defined as 3154 /// E(T1) == E(T2). E(T) is part of the ABI of values of type T. C type 3155 /// compatibility requires equivalent treatment under the ABI, so 3156 /// CCompatible(T1, T2) must imply E(T1) == E(T2), that is, CCompatible must be 3157 /// a subset of ~. Crucially, however, it must be a proper subset because 3158 /// CCompatible is not an equivalence relation: for example, int[] is compatible 3159 /// with both int[1] and int[2], but the latter are not compatible with each 3160 /// other. Therefore this encoding function must be careful to only distinguish 3161 /// types if there is no third type with which they are both required to be 3162 /// compatible. 3163 static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx, 3164 raw_ostream &OS, QualType QT) { 3165 // FIXME: Consider address space qualifiers. 3166 const Type *T = QT.getCanonicalType().getTypePtr(); 3167 3168 // FIXME: Consider using the C++ type mangling when we encounter a construct 3169 // that is incompatible with C. 3170 3171 switch (T->getTypeClass()) { 3172 case Type::Atomic: 3173 return encodeTypeForFunctionPointerAuth( 3174 Ctx, OS, cast<AtomicType>(T)->getValueType()); 3175 3176 case Type::LValueReference: 3177 OS << "R"; 3178 encodeTypeForFunctionPointerAuth(Ctx, OS, 3179 cast<ReferenceType>(T)->getPointeeType()); 3180 return; 3181 case Type::RValueReference: 3182 OS << "O"; 3183 encodeTypeForFunctionPointerAuth(Ctx, OS, 3184 cast<ReferenceType>(T)->getPointeeType()); 3185 return; 3186 3187 case Type::Pointer: 3188 // C11 6.7.6.1p2: 3189 // For two pointer types to be compatible, both shall be identically 3190 // qualified and both shall be pointers to compatible types. 3191 // FIXME: we should also consider pointee types. 3192 OS << "P"; 3193 return; 3194 3195 case Type::ObjCObjectPointer: 3196 case Type::BlockPointer: 3197 OS << "P"; 3198 return; 3199 3200 case Type::Complex: 3201 OS << "C"; 3202 return encodeTypeForFunctionPointerAuth( 3203 Ctx, OS, cast<ComplexType>(T)->getElementType()); 3204 3205 case Type::VariableArray: 3206 case Type::ConstantArray: 3207 case Type::IncompleteArray: 3208 case Type::ArrayParameter: 3209 // C11 6.7.6.2p6: 3210 // For two array types to be compatible, both shall have compatible 3211 // element types, and if both size specifiers are present, and are integer 3212 // constant expressions, then both size specifiers shall have the same 3213 // constant value [...] 3214 // 3215 // So since ElemType[N] has to be compatible ElemType[], we can't encode the 3216 // width of the array. 3217 OS << "A"; 3218 return encodeTypeForFunctionPointerAuth( 3219 Ctx, OS, cast<ArrayType>(T)->getElementType()); 3220 3221 case Type::ObjCInterface: 3222 case Type::ObjCObject: 3223 OS << "<objc_object>"; 3224 return; 3225 3226 case Type::Enum: { 3227 // C11 6.7.2.2p4: 3228 // Each enumerated type shall be compatible with char, a signed integer 3229 // type, or an unsigned integer type. 3230 // 3231 // So we have to treat enum types as integers. 3232 QualType UnderlyingType = cast<EnumType>(T)->getDecl()->getIntegerType(); 3233 return encodeTypeForFunctionPointerAuth( 3234 Ctx, OS, UnderlyingType.isNull() ? Ctx.IntTy : UnderlyingType); 3235 } 3236 3237 case Type::FunctionNoProto: 3238 case Type::FunctionProto: { 3239 // C11 6.7.6.3p15: 3240 // For two function types to be compatible, both shall specify compatible 3241 // return types. Moreover, the parameter type lists, if both are present, 3242 // shall agree in the number of parameters and in the use of the ellipsis 3243 // terminator; corresponding parameters shall have compatible types. 3244 // 3245 // That paragraph goes on to describe how unprototyped functions are to be 3246 // handled, which we ignore here. Unprototyped function pointers are hashed 3247 // as though they were prototyped nullary functions since thats probably 3248 // what the user meant. This behavior is non-conforming. 3249 // FIXME: If we add a "custom discriminator" function type attribute we 3250 // should encode functions as their discriminators. 3251 OS << "F"; 3252 const auto *FuncType = cast<FunctionType>(T); 3253 encodeTypeForFunctionPointerAuth(Ctx, OS, FuncType->getReturnType()); 3254 if (const auto *FPT = dyn_cast<FunctionProtoType>(FuncType)) { 3255 for (QualType Param : FPT->param_types()) { 3256 Param = Ctx.getSignatureParameterType(Param); 3257 encodeTypeForFunctionPointerAuth(Ctx, OS, Param); 3258 } 3259 if (FPT->isVariadic()) 3260 OS << "z"; 3261 } 3262 OS << "E"; 3263 return; 3264 } 3265 3266 case Type::MemberPointer: { 3267 OS << "M"; 3268 const auto *MPT = T->getAs<MemberPointerType>(); 3269 encodeTypeForFunctionPointerAuth(Ctx, OS, QualType(MPT->getClass(), 0)); 3270 encodeTypeForFunctionPointerAuth(Ctx, OS, MPT->getPointeeType()); 3271 return; 3272 } 3273 case Type::ExtVector: 3274 case Type::Vector: 3275 OS << "Dv" << Ctx.getTypeSizeInChars(T).getQuantity(); 3276 break; 3277 3278 // Don't bother discriminating based on these types. 3279 case Type::Pipe: 3280 case Type::BitInt: 3281 case Type::ConstantMatrix: 3282 OS << "?"; 3283 return; 3284 3285 case Type::Builtin: { 3286 const auto *BTy = T->getAs<BuiltinType>(); 3287 switch (BTy->getKind()) { 3288 #define SIGNED_TYPE(Id, SingletonId) \ 3289 case BuiltinType::Id: \ 3290 OS << "i"; \ 3291 return; 3292 #define UNSIGNED_TYPE(Id, SingletonId) \ 3293 case BuiltinType::Id: \ 3294 OS << "i"; \ 3295 return; 3296 #define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id: 3297 #define BUILTIN_TYPE(Id, SingletonId) 3298 #include "clang/AST/BuiltinTypes.def" 3299 llvm_unreachable("placeholder types should not appear here."); 3300 3301 case BuiltinType::Half: 3302 OS << "Dh"; 3303 return; 3304 case BuiltinType::Float: 3305 OS << "f"; 3306 return; 3307 case BuiltinType::Double: 3308 OS << "d"; 3309 return; 3310 case BuiltinType::LongDouble: 3311 OS << "e"; 3312 return; 3313 case BuiltinType::Float16: 3314 OS << "DF16_"; 3315 return; 3316 case BuiltinType::Float128: 3317 OS << "g"; 3318 return; 3319 3320 case BuiltinType::Void: 3321 OS << "v"; 3322 return; 3323 3324 case BuiltinType::ObjCId: 3325 case BuiltinType::ObjCClass: 3326 case BuiltinType::ObjCSel: 3327 case BuiltinType::NullPtr: 3328 OS << "P"; 3329 return; 3330 3331 // Don't bother discriminating based on OpenCL types. 3332 case BuiltinType::OCLSampler: 3333 case BuiltinType::OCLEvent: 3334 case BuiltinType::OCLClkEvent: 3335 case BuiltinType::OCLQueue: 3336 case BuiltinType::OCLReserveID: 3337 case BuiltinType::BFloat16: 3338 case BuiltinType::VectorQuad: 3339 case BuiltinType::VectorPair: 3340 OS << "?"; 3341 return; 3342 3343 // Don't bother discriminating based on these seldom-used types. 3344 case BuiltinType::Ibm128: 3345 return; 3346 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 3347 case BuiltinType::Id: \ 3348 return; 3349 #include "clang/Basic/OpenCLImageTypes.def" 3350 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 3351 case BuiltinType::Id: \ 3352 return; 3353 #include "clang/Basic/OpenCLExtensionTypes.def" 3354 #define SVE_TYPE(Name, Id, SingletonId) \ 3355 case BuiltinType::Id: \ 3356 return; 3357 #include "clang/Basic/AArch64SVEACLETypes.def" 3358 case BuiltinType::Dependent: 3359 llvm_unreachable("should never get here"); 3360 case BuiltinType::AMDGPUBufferRsrc: 3361 case BuiltinType::WasmExternRef: 3362 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 3363 #include "clang/Basic/RISCVVTypes.def" 3364 llvm_unreachable("not yet implemented"); 3365 } 3366 } 3367 case Type::Record: { 3368 const RecordDecl *RD = T->getAs<RecordType>()->getDecl(); 3369 const IdentifierInfo *II = RD->getIdentifier(); 3370 3371 // In C++, an immediate typedef of an anonymous struct or union 3372 // is considered to name it for ODR purposes, but C's specification 3373 // of type compatibility does not have a similar rule. Using the typedef 3374 // name in function type discriminators anyway, as we do here, 3375 // therefore technically violates the C standard: two function pointer 3376 // types defined in terms of two typedef'd anonymous structs with 3377 // different names are formally still compatible, but we are assigning 3378 // them different discriminators and therefore incompatible ABIs. 3379 // 3380 // This is a relatively minor violation that significantly improves 3381 // discrimination in some cases and has not caused problems in 3382 // practice. Regardless, it is now part of the ABI in places where 3383 // function type discrimination is used, and it can no longer be 3384 // changed except on new platforms. 3385 3386 if (!II) 3387 if (const TypedefNameDecl *Typedef = RD->getTypedefNameForAnonDecl()) 3388 II = Typedef->getDeclName().getAsIdentifierInfo(); 3389 3390 if (!II) { 3391 OS << "<anonymous_record>"; 3392 return; 3393 } 3394 OS << II->getLength() << II->getName(); 3395 return; 3396 } 3397 case Type::DeducedTemplateSpecialization: 3398 case Type::Auto: 3399 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3400 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 3401 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 3402 #define ABSTRACT_TYPE(Class, Base) 3403 #define TYPE(Class, Base) 3404 #include "clang/AST/TypeNodes.inc" 3405 llvm_unreachable("unexpected non-canonical or dependent type!"); 3406 return; 3407 } 3408 } 3409 3410 uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) { 3411 assert(!T->isDependentType() && 3412 "cannot compute type discriminator of a dependent type"); 3413 3414 SmallString<256> Str; 3415 llvm::raw_svector_ostream Out(Str); 3416 3417 if (T->isFunctionPointerType() || T->isFunctionReferenceType()) 3418 T = T->getPointeeType(); 3419 3420 if (T->isFunctionType()) { 3421 encodeTypeForFunctionPointerAuth(*this, Out, T); 3422 } else { 3423 T = T.getUnqualifiedType(); 3424 std::unique_ptr<MangleContext> MC(createMangleContext()); 3425 MC->mangleCanonicalTypeName(T, Out); 3426 } 3427 3428 return llvm::getPointerAuthStableSipHash(Str); 3429 } 3430 3431 QualType ASTContext::getObjCGCQualType(QualType T, 3432 Qualifiers::GC GCAttr) const { 3433 QualType CanT = getCanonicalType(T); 3434 if (CanT.getObjCGCAttr() == GCAttr) 3435 return T; 3436 3437 if (const auto *ptr = T->getAs<PointerType>()) { 3438 QualType Pointee = ptr->getPointeeType(); 3439 if (Pointee->isAnyPointerType()) { 3440 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3441 return getPointerType(ResultType); 3442 } 3443 } 3444 3445 // If we are composing extended qualifiers together, merge together 3446 // into one ExtQuals node. 3447 QualifierCollector Quals; 3448 const Type *TypeNode = Quals.strip(T); 3449 3450 // If this type already has an ObjCGC specified, it cannot get 3451 // another one. 3452 assert(!Quals.hasObjCGCAttr() && 3453 "Type cannot have multiple ObjCGCs!"); 3454 Quals.addObjCGCAttr(GCAttr); 3455 3456 return getExtQualType(TypeNode, Quals); 3457 } 3458 3459 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3460 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3461 QualType Pointee = Ptr->getPointeeType(); 3462 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3463 return getPointerType(removeAddrSpaceQualType(Pointee)); 3464 } 3465 } 3466 return T; 3467 } 3468 3469 QualType ASTContext::getCountAttributedType( 3470 QualType WrappedTy, Expr *CountExpr, bool CountInBytes, bool OrNull, 3471 ArrayRef<TypeCoupledDeclRefInfo> DependentDecls) const { 3472 assert(WrappedTy->isPointerType() || WrappedTy->isArrayType()); 3473 3474 llvm::FoldingSetNodeID ID; 3475 CountAttributedType::Profile(ID, WrappedTy, CountExpr, CountInBytes, OrNull); 3476 3477 void *InsertPos = nullptr; 3478 CountAttributedType *CATy = 3479 CountAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); 3480 if (CATy) 3481 return QualType(CATy, 0); 3482 3483 QualType CanonTy = getCanonicalType(WrappedTy); 3484 size_t Size = CountAttributedType::totalSizeToAlloc<TypeCoupledDeclRefInfo>( 3485 DependentDecls.size()); 3486 CATy = (CountAttributedType *)Allocate(Size, TypeAlignment); 3487 new (CATy) CountAttributedType(WrappedTy, CanonTy, CountExpr, CountInBytes, 3488 OrNull, DependentDecls); 3489 Types.push_back(CATy); 3490 CountAttributedTypes.InsertNode(CATy, InsertPos); 3491 3492 return QualType(CATy, 0); 3493 } 3494 3495 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3496 FunctionType::ExtInfo Info) { 3497 if (T->getExtInfo() == Info) 3498 return T; 3499 3500 QualType Result; 3501 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3502 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3503 } else { 3504 const auto *FPT = cast<FunctionProtoType>(T); 3505 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3506 EPI.ExtInfo = Info; 3507 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3508 } 3509 3510 return cast<FunctionType>(Result.getTypePtr()); 3511 } 3512 3513 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3514 QualType ResultType) { 3515 FD = FD->getMostRecentDecl(); 3516 while (true) { 3517 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3518 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3519 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3520 if (FunctionDecl *Next = FD->getPreviousDecl()) 3521 FD = Next; 3522 else 3523 break; 3524 } 3525 if (ASTMutationListener *L = getASTMutationListener()) 3526 L->DeducedReturnType(FD, ResultType); 3527 } 3528 3529 /// Get a function type and produce the equivalent function type with the 3530 /// specified exception specification. Type sugar that can be present on a 3531 /// declaration of a function with an exception specification is permitted 3532 /// and preserved. Other type sugar (for instance, typedefs) is not. 3533 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3534 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const { 3535 // Might have some parens. 3536 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3537 return getParenType( 3538 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3539 3540 // Might be wrapped in a macro qualified type. 3541 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3542 return getMacroQualifiedType( 3543 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3544 MQT->getMacroIdentifier()); 3545 3546 // Might have a calling-convention attribute. 3547 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3548 return getAttributedType( 3549 AT->getAttrKind(), 3550 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3551 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3552 3553 // Anything else must be a function type. Rebuild it with the new exception 3554 // specification. 3555 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3556 return getFunctionType( 3557 Proto->getReturnType(), Proto->getParamTypes(), 3558 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3559 } 3560 3561 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3562 QualType U) const { 3563 return hasSameType(T, U) || 3564 (getLangOpts().CPlusPlus17 && 3565 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3566 getFunctionTypeWithExceptionSpec(U, EST_None))); 3567 } 3568 3569 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3570 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3571 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3572 SmallVector<QualType, 16> Args(Proto->param_types().size()); 3573 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3574 Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]); 3575 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3576 } 3577 3578 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3579 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3580 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3581 } 3582 3583 return T; 3584 } 3585 3586 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3587 return hasSameType(T, U) || 3588 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3589 getFunctionTypeWithoutPtrSizes(U)); 3590 } 3591 3592 void ASTContext::adjustExceptionSpec( 3593 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3594 bool AsWritten) { 3595 // Update the type. 3596 QualType Updated = 3597 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3598 FD->setType(Updated); 3599 3600 if (!AsWritten) 3601 return; 3602 3603 // Update the type in the type source information too. 3604 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3605 // If the type and the type-as-written differ, we may need to update 3606 // the type-as-written too. 3607 if (TSInfo->getType() != FD->getType()) 3608 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3609 3610 // FIXME: When we get proper type location information for exceptions, 3611 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3612 // up the TypeSourceInfo; 3613 assert(TypeLoc::getFullDataSizeForType(Updated) == 3614 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3615 "TypeLoc size mismatch from updating exception specification"); 3616 TSInfo->overrideType(Updated); 3617 } 3618 } 3619 3620 /// getComplexType - Return the uniqued reference to the type for a complex 3621 /// number with the specified element type. 3622 QualType ASTContext::getComplexType(QualType T) const { 3623 // Unique pointers, to guarantee there is only one pointer of a particular 3624 // structure. 3625 llvm::FoldingSetNodeID ID; 3626 ComplexType::Profile(ID, T); 3627 3628 void *InsertPos = nullptr; 3629 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3630 return QualType(CT, 0); 3631 3632 // If the pointee type isn't canonical, this won't be a canonical type either, 3633 // so fill in the canonical type field. 3634 QualType Canonical; 3635 if (!T.isCanonical()) { 3636 Canonical = getComplexType(getCanonicalType(T)); 3637 3638 // Get the new insert position for the node we care about. 3639 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3640 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3641 } 3642 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical); 3643 Types.push_back(New); 3644 ComplexTypes.InsertNode(New, InsertPos); 3645 return QualType(New, 0); 3646 } 3647 3648 /// getPointerType - Return the uniqued reference to the type for a pointer to 3649 /// the specified type. 3650 QualType ASTContext::getPointerType(QualType T) const { 3651 // Unique pointers, to guarantee there is only one pointer of a particular 3652 // structure. 3653 llvm::FoldingSetNodeID ID; 3654 PointerType::Profile(ID, T); 3655 3656 void *InsertPos = nullptr; 3657 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3658 return QualType(PT, 0); 3659 3660 // If the pointee type isn't canonical, this won't be a canonical type either, 3661 // so fill in the canonical type field. 3662 QualType Canonical; 3663 if (!T.isCanonical()) { 3664 Canonical = getPointerType(getCanonicalType(T)); 3665 3666 // Get the new insert position for the node we care about. 3667 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3668 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3669 } 3670 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical); 3671 Types.push_back(New); 3672 PointerTypes.InsertNode(New, InsertPos); 3673 return QualType(New, 0); 3674 } 3675 3676 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3677 llvm::FoldingSetNodeID ID; 3678 AdjustedType::Profile(ID, Orig, New); 3679 void *InsertPos = nullptr; 3680 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3681 if (AT) 3682 return QualType(AT, 0); 3683 3684 QualType Canonical = getCanonicalType(New); 3685 3686 // Get the new insert position for the node we care about. 3687 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3688 assert(!AT && "Shouldn't be in the map!"); 3689 3690 AT = new (*this, alignof(AdjustedType)) 3691 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3692 Types.push_back(AT); 3693 AdjustedTypes.InsertNode(AT, InsertPos); 3694 return QualType(AT, 0); 3695 } 3696 3697 QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const { 3698 llvm::FoldingSetNodeID ID; 3699 AdjustedType::Profile(ID, Orig, Decayed); 3700 void *InsertPos = nullptr; 3701 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3702 if (AT) 3703 return QualType(AT, 0); 3704 3705 QualType Canonical = getCanonicalType(Decayed); 3706 3707 // Get the new insert position for the node we care about. 3708 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3709 assert(!AT && "Shouldn't be in the map!"); 3710 3711 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical); 3712 Types.push_back(AT); 3713 AdjustedTypes.InsertNode(AT, InsertPos); 3714 return QualType(AT, 0); 3715 } 3716 3717 QualType ASTContext::getDecayedType(QualType T) const { 3718 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3719 3720 QualType Decayed; 3721 3722 // C99 6.7.5.3p7: 3723 // A declaration of a parameter as "array of type" shall be 3724 // adjusted to "qualified pointer to type", where the type 3725 // qualifiers (if any) are those specified within the [ and ] of 3726 // the array type derivation. 3727 if (T->isArrayType()) 3728 Decayed = getArrayDecayedType(T); 3729 3730 // C99 6.7.5.3p8: 3731 // A declaration of a parameter as "function returning type" 3732 // shall be adjusted to "pointer to function returning type", as 3733 // in 6.3.2.1. 3734 if (T->isFunctionType()) 3735 Decayed = getPointerType(T); 3736 3737 return getDecayedType(T, Decayed); 3738 } 3739 3740 QualType ASTContext::getArrayParameterType(QualType Ty) const { 3741 if (Ty->isArrayParameterType()) 3742 return Ty; 3743 assert(Ty->isConstantArrayType() && "Ty must be an array type."); 3744 const auto *ATy = cast<ConstantArrayType>(Ty); 3745 llvm::FoldingSetNodeID ID; 3746 ATy->Profile(ID, *this, ATy->getElementType(), ATy->getZExtSize(), 3747 ATy->getSizeExpr(), ATy->getSizeModifier(), 3748 ATy->getIndexTypeQualifiers().getAsOpaqueValue()); 3749 void *InsertPos = nullptr; 3750 ArrayParameterType *AT = 3751 ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos); 3752 if (AT) 3753 return QualType(AT, 0); 3754 3755 QualType Canonical; 3756 if (!Ty.isCanonical()) { 3757 Canonical = getArrayParameterType(getCanonicalType(Ty)); 3758 3759 // Get the new insert position for the node we care about. 3760 AT = ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos); 3761 assert(!AT && "Shouldn't be in the map!"); 3762 } 3763 3764 AT = new (*this, alignof(ArrayParameterType)) 3765 ArrayParameterType(ATy, Canonical); 3766 Types.push_back(AT); 3767 ArrayParameterTypes.InsertNode(AT, InsertPos); 3768 return QualType(AT, 0); 3769 } 3770 3771 /// getBlockPointerType - Return the uniqued reference to the type for 3772 /// a pointer to the specified block. 3773 QualType ASTContext::getBlockPointerType(QualType T) const { 3774 assert(T->isFunctionType() && "block of function types only"); 3775 // Unique pointers, to guarantee there is only one block of a particular 3776 // structure. 3777 llvm::FoldingSetNodeID ID; 3778 BlockPointerType::Profile(ID, T); 3779 3780 void *InsertPos = nullptr; 3781 if (BlockPointerType *PT = 3782 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3783 return QualType(PT, 0); 3784 3785 // If the block pointee type isn't canonical, this won't be a canonical 3786 // type either so fill in the canonical type field. 3787 QualType Canonical; 3788 if (!T.isCanonical()) { 3789 Canonical = getBlockPointerType(getCanonicalType(T)); 3790 3791 // Get the new insert position for the node we care about. 3792 BlockPointerType *NewIP = 3793 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3794 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3795 } 3796 auto *New = 3797 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical); 3798 Types.push_back(New); 3799 BlockPointerTypes.InsertNode(New, InsertPos); 3800 return QualType(New, 0); 3801 } 3802 3803 /// getLValueReferenceType - Return the uniqued reference to the type for an 3804 /// lvalue reference to the specified type. 3805 QualType 3806 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3807 assert((!T->isPlaceholderType() || 3808 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3809 "Unresolved placeholder type"); 3810 3811 // Unique pointers, to guarantee there is only one pointer of a particular 3812 // structure. 3813 llvm::FoldingSetNodeID ID; 3814 ReferenceType::Profile(ID, T, SpelledAsLValue); 3815 3816 void *InsertPos = nullptr; 3817 if (LValueReferenceType *RT = 3818 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3819 return QualType(RT, 0); 3820 3821 const auto *InnerRef = T->getAs<ReferenceType>(); 3822 3823 // If the referencee type isn't canonical, this won't be a canonical type 3824 // either, so fill in the canonical type field. 3825 QualType Canonical; 3826 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3827 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3828 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3829 3830 // Get the new insert position for the node we care about. 3831 LValueReferenceType *NewIP = 3832 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3833 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3834 } 3835 3836 auto *New = new (*this, alignof(LValueReferenceType)) 3837 LValueReferenceType(T, Canonical, SpelledAsLValue); 3838 Types.push_back(New); 3839 LValueReferenceTypes.InsertNode(New, InsertPos); 3840 3841 return QualType(New, 0); 3842 } 3843 3844 /// getRValueReferenceType - Return the uniqued reference to the type for an 3845 /// rvalue reference to the specified type. 3846 QualType ASTContext::getRValueReferenceType(QualType T) const { 3847 assert((!T->isPlaceholderType() || 3848 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3849 "Unresolved placeholder type"); 3850 3851 // Unique pointers, to guarantee there is only one pointer of a particular 3852 // structure. 3853 llvm::FoldingSetNodeID ID; 3854 ReferenceType::Profile(ID, T, false); 3855 3856 void *InsertPos = nullptr; 3857 if (RValueReferenceType *RT = 3858 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3859 return QualType(RT, 0); 3860 3861 const auto *InnerRef = T->getAs<ReferenceType>(); 3862 3863 // If the referencee type isn't canonical, this won't be a canonical type 3864 // either, so fill in the canonical type field. 3865 QualType Canonical; 3866 if (InnerRef || !T.isCanonical()) { 3867 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3868 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3869 3870 // Get the new insert position for the node we care about. 3871 RValueReferenceType *NewIP = 3872 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3873 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3874 } 3875 3876 auto *New = new (*this, alignof(RValueReferenceType)) 3877 RValueReferenceType(T, Canonical); 3878 Types.push_back(New); 3879 RValueReferenceTypes.InsertNode(New, InsertPos); 3880 return QualType(New, 0); 3881 } 3882 3883 /// getMemberPointerType - Return the uniqued reference to the type for a 3884 /// member pointer to the specified type, in the specified class. 3885 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3886 // Unique pointers, to guarantee there is only one pointer of a particular 3887 // structure. 3888 llvm::FoldingSetNodeID ID; 3889 MemberPointerType::Profile(ID, T, Cls); 3890 3891 void *InsertPos = nullptr; 3892 if (MemberPointerType *PT = 3893 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3894 return QualType(PT, 0); 3895 3896 // If the pointee or class type isn't canonical, this won't be a canonical 3897 // type either, so fill in the canonical type field. 3898 QualType Canonical; 3899 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3900 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3901 3902 // Get the new insert position for the node we care about. 3903 MemberPointerType *NewIP = 3904 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3905 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3906 } 3907 auto *New = new (*this, alignof(MemberPointerType)) 3908 MemberPointerType(T, Cls, Canonical); 3909 Types.push_back(New); 3910 MemberPointerTypes.InsertNode(New, InsertPos); 3911 return QualType(New, 0); 3912 } 3913 3914 /// getConstantArrayType - Return the unique reference to the type for an 3915 /// array of the specified element type. 3916 QualType ASTContext::getConstantArrayType(QualType EltTy, 3917 const llvm::APInt &ArySizeIn, 3918 const Expr *SizeExpr, 3919 ArraySizeModifier ASM, 3920 unsigned IndexTypeQuals) const { 3921 assert((EltTy->isDependentType() || 3922 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3923 "Constant array of VLAs is illegal!"); 3924 3925 // We only need the size as part of the type if it's instantiation-dependent. 3926 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3927 SizeExpr = nullptr; 3928 3929 // Convert the array size into a canonical width matching the pointer size for 3930 // the target. 3931 llvm::APInt ArySize(ArySizeIn); 3932 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3933 3934 llvm::FoldingSetNodeID ID; 3935 ConstantArrayType::Profile(ID, *this, EltTy, ArySize.getZExtValue(), SizeExpr, 3936 ASM, IndexTypeQuals); 3937 3938 void *InsertPos = nullptr; 3939 if (ConstantArrayType *ATP = 3940 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3941 return QualType(ATP, 0); 3942 3943 // If the element type isn't canonical or has qualifiers, or the array bound 3944 // is instantiation-dependent, this won't be a canonical type either, so fill 3945 // in the canonical type field. 3946 QualType Canon; 3947 // FIXME: Check below should look for qualifiers behind sugar. 3948 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3949 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3950 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3951 ASM, IndexTypeQuals); 3952 Canon = getQualifiedType(Canon, canonSplit.Quals); 3953 3954 // Get the new insert position for the node we care about. 3955 ConstantArrayType *NewIP = 3956 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3957 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3958 } 3959 3960 auto *New = ConstantArrayType::Create(*this, EltTy, Canon, ArySize, SizeExpr, 3961 ASM, IndexTypeQuals); 3962 ConstantArrayTypes.InsertNode(New, InsertPos); 3963 Types.push_back(New); 3964 return QualType(New, 0); 3965 } 3966 3967 /// getVariableArrayDecayedType - Turns the given type, which may be 3968 /// variably-modified, into the corresponding type with all the known 3969 /// sizes replaced with [*]. 3970 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3971 // Vastly most common case. 3972 if (!type->isVariablyModifiedType()) return type; 3973 3974 QualType result; 3975 3976 SplitQualType split = type.getSplitDesugaredType(); 3977 const Type *ty = split.Ty; 3978 switch (ty->getTypeClass()) { 3979 #define TYPE(Class, Base) 3980 #define ABSTRACT_TYPE(Class, Base) 3981 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3982 #include "clang/AST/TypeNodes.inc" 3983 llvm_unreachable("didn't desugar past all non-canonical types?"); 3984 3985 // These types should never be variably-modified. 3986 case Type::Builtin: 3987 case Type::Complex: 3988 case Type::Vector: 3989 case Type::DependentVector: 3990 case Type::ExtVector: 3991 case Type::DependentSizedExtVector: 3992 case Type::ConstantMatrix: 3993 case Type::DependentSizedMatrix: 3994 case Type::DependentAddressSpace: 3995 case Type::ObjCObject: 3996 case Type::ObjCInterface: 3997 case Type::ObjCObjectPointer: 3998 case Type::Record: 3999 case Type::Enum: 4000 case Type::UnresolvedUsing: 4001 case Type::TypeOfExpr: 4002 case Type::TypeOf: 4003 case Type::Decltype: 4004 case Type::UnaryTransform: 4005 case Type::DependentName: 4006 case Type::InjectedClassName: 4007 case Type::TemplateSpecialization: 4008 case Type::DependentTemplateSpecialization: 4009 case Type::TemplateTypeParm: 4010 case Type::SubstTemplateTypeParmPack: 4011 case Type::Auto: 4012 case Type::DeducedTemplateSpecialization: 4013 case Type::PackExpansion: 4014 case Type::PackIndexing: 4015 case Type::BitInt: 4016 case Type::DependentBitInt: 4017 case Type::ArrayParameter: 4018 llvm_unreachable("type should never be variably-modified"); 4019 4020 // These types can be variably-modified but should never need to 4021 // further decay. 4022 case Type::FunctionNoProto: 4023 case Type::FunctionProto: 4024 case Type::BlockPointer: 4025 case Type::MemberPointer: 4026 case Type::Pipe: 4027 return type; 4028 4029 // These types can be variably-modified. All these modifications 4030 // preserve structure except as noted by comments. 4031 // TODO: if we ever care about optimizing VLAs, there are no-op 4032 // optimizations available here. 4033 case Type::Pointer: 4034 result = getPointerType(getVariableArrayDecayedType( 4035 cast<PointerType>(ty)->getPointeeType())); 4036 break; 4037 4038 case Type::LValueReference: { 4039 const auto *lv = cast<LValueReferenceType>(ty); 4040 result = getLValueReferenceType( 4041 getVariableArrayDecayedType(lv->getPointeeType()), 4042 lv->isSpelledAsLValue()); 4043 break; 4044 } 4045 4046 case Type::RValueReference: { 4047 const auto *lv = cast<RValueReferenceType>(ty); 4048 result = getRValueReferenceType( 4049 getVariableArrayDecayedType(lv->getPointeeType())); 4050 break; 4051 } 4052 4053 case Type::Atomic: { 4054 const auto *at = cast<AtomicType>(ty); 4055 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 4056 break; 4057 } 4058 4059 case Type::ConstantArray: { 4060 const auto *cat = cast<ConstantArrayType>(ty); 4061 result = getConstantArrayType( 4062 getVariableArrayDecayedType(cat->getElementType()), 4063 cat->getSize(), 4064 cat->getSizeExpr(), 4065 cat->getSizeModifier(), 4066 cat->getIndexTypeCVRQualifiers()); 4067 break; 4068 } 4069 4070 case Type::DependentSizedArray: { 4071 const auto *dat = cast<DependentSizedArrayType>(ty); 4072 result = getDependentSizedArrayType( 4073 getVariableArrayDecayedType(dat->getElementType()), 4074 dat->getSizeExpr(), 4075 dat->getSizeModifier(), 4076 dat->getIndexTypeCVRQualifiers(), 4077 dat->getBracketsRange()); 4078 break; 4079 } 4080 4081 // Turn incomplete types into [*] types. 4082 case Type::IncompleteArray: { 4083 const auto *iat = cast<IncompleteArrayType>(ty); 4084 result = 4085 getVariableArrayType(getVariableArrayDecayedType(iat->getElementType()), 4086 /*size*/ nullptr, ArraySizeModifier::Normal, 4087 iat->getIndexTypeCVRQualifiers(), SourceRange()); 4088 break; 4089 } 4090 4091 // Turn VLA types into [*] types. 4092 case Type::VariableArray: { 4093 const auto *vat = cast<VariableArrayType>(ty); 4094 result = getVariableArrayType( 4095 getVariableArrayDecayedType(vat->getElementType()), 4096 /*size*/ nullptr, ArraySizeModifier::Star, 4097 vat->getIndexTypeCVRQualifiers(), vat->getBracketsRange()); 4098 break; 4099 } 4100 } 4101 4102 // Apply the top-level qualifiers from the original. 4103 return getQualifiedType(result, split.Quals); 4104 } 4105 4106 /// getVariableArrayType - Returns a non-unique reference to the type for a 4107 /// variable array of the specified element type. 4108 QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts, 4109 ArraySizeModifier ASM, 4110 unsigned IndexTypeQuals, 4111 SourceRange Brackets) const { 4112 // Since we don't unique expressions, it isn't possible to unique VLA's 4113 // that have an expression provided for their size. 4114 QualType Canon; 4115 4116 // Be sure to pull qualifiers off the element type. 4117 // FIXME: Check below should look for qualifiers behind sugar. 4118 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 4119 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 4120 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 4121 IndexTypeQuals, Brackets); 4122 Canon = getQualifiedType(Canon, canonSplit.Quals); 4123 } 4124 4125 auto *New = new (*this, alignof(VariableArrayType)) 4126 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 4127 4128 VariableArrayTypes.push_back(New); 4129 Types.push_back(New); 4130 return QualType(New, 0); 4131 } 4132 4133 /// getDependentSizedArrayType - Returns a non-unique reference to 4134 /// the type for a dependently-sized array of the specified element 4135 /// type. 4136 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 4137 Expr *numElements, 4138 ArraySizeModifier ASM, 4139 unsigned elementTypeQuals, 4140 SourceRange brackets) const { 4141 assert((!numElements || numElements->isTypeDependent() || 4142 numElements->isValueDependent()) && 4143 "Size must be type- or value-dependent!"); 4144 4145 SplitQualType canonElementType = getCanonicalType(elementType).split(); 4146 4147 void *insertPos = nullptr; 4148 llvm::FoldingSetNodeID ID; 4149 DependentSizedArrayType::Profile( 4150 ID, *this, numElements ? QualType(canonElementType.Ty, 0) : elementType, 4151 ASM, elementTypeQuals, numElements); 4152 4153 // Look for an existing type with these properties. 4154 DependentSizedArrayType *canonTy = 4155 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 4156 4157 // Dependently-sized array types that do not have a specified number 4158 // of elements will have their sizes deduced from a dependent 4159 // initializer. 4160 if (!numElements) { 4161 if (canonTy) 4162 return QualType(canonTy, 0); 4163 4164 auto *newType = new (*this, alignof(DependentSizedArrayType)) 4165 DependentSizedArrayType(elementType, QualType(), numElements, ASM, 4166 elementTypeQuals, brackets); 4167 DependentSizedArrayTypes.InsertNode(newType, insertPos); 4168 Types.push_back(newType); 4169 return QualType(newType, 0); 4170 } 4171 4172 // If we don't have one, build one. 4173 if (!canonTy) { 4174 canonTy = new (*this, alignof(DependentSizedArrayType)) 4175 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(), 4176 numElements, ASM, elementTypeQuals, brackets); 4177 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 4178 Types.push_back(canonTy); 4179 } 4180 4181 // Apply qualifiers from the element type to the array. 4182 QualType canon = getQualifiedType(QualType(canonTy,0), 4183 canonElementType.Quals); 4184 4185 // If we didn't need extra canonicalization for the element type or the size 4186 // expression, then just use that as our result. 4187 if (QualType(canonElementType.Ty, 0) == elementType && 4188 canonTy->getSizeExpr() == numElements) 4189 return canon; 4190 4191 // Otherwise, we need to build a type which follows the spelling 4192 // of the element type. 4193 auto *sugaredType = new (*this, alignof(DependentSizedArrayType)) 4194 DependentSizedArrayType(elementType, canon, numElements, ASM, 4195 elementTypeQuals, brackets); 4196 Types.push_back(sugaredType); 4197 return QualType(sugaredType, 0); 4198 } 4199 4200 QualType ASTContext::getIncompleteArrayType(QualType elementType, 4201 ArraySizeModifier ASM, 4202 unsigned elementTypeQuals) const { 4203 llvm::FoldingSetNodeID ID; 4204 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 4205 4206 void *insertPos = nullptr; 4207 if (IncompleteArrayType *iat = 4208 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 4209 return QualType(iat, 0); 4210 4211 // If the element type isn't canonical, this won't be a canonical type 4212 // either, so fill in the canonical type field. We also have to pull 4213 // qualifiers off the element type. 4214 QualType canon; 4215 4216 // FIXME: Check below should look for qualifiers behind sugar. 4217 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 4218 SplitQualType canonSplit = getCanonicalType(elementType).split(); 4219 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 4220 ASM, elementTypeQuals); 4221 canon = getQualifiedType(canon, canonSplit.Quals); 4222 4223 // Get the new insert position for the node we care about. 4224 IncompleteArrayType *existing = 4225 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 4226 assert(!existing && "Shouldn't be in the map!"); (void) existing; 4227 } 4228 4229 auto *newType = new (*this, alignof(IncompleteArrayType)) 4230 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 4231 4232 IncompleteArrayTypes.InsertNode(newType, insertPos); 4233 Types.push_back(newType); 4234 return QualType(newType, 0); 4235 } 4236 4237 ASTContext::BuiltinVectorTypeInfo 4238 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 4239 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 4240 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 4241 NUMVECTORS}; 4242 4243 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 4244 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 4245 4246 switch (Ty->getKind()) { 4247 default: 4248 llvm_unreachable("Unsupported builtin vector type"); 4249 case BuiltinType::SveInt8: 4250 return SVE_INT_ELTTY(8, 16, true, 1); 4251 case BuiltinType::SveUint8: 4252 return SVE_INT_ELTTY(8, 16, false, 1); 4253 case BuiltinType::SveInt8x2: 4254 return SVE_INT_ELTTY(8, 16, true, 2); 4255 case BuiltinType::SveUint8x2: 4256 return SVE_INT_ELTTY(8, 16, false, 2); 4257 case BuiltinType::SveInt8x3: 4258 return SVE_INT_ELTTY(8, 16, true, 3); 4259 case BuiltinType::SveUint8x3: 4260 return SVE_INT_ELTTY(8, 16, false, 3); 4261 case BuiltinType::SveInt8x4: 4262 return SVE_INT_ELTTY(8, 16, true, 4); 4263 case BuiltinType::SveUint8x4: 4264 return SVE_INT_ELTTY(8, 16, false, 4); 4265 case BuiltinType::SveInt16: 4266 return SVE_INT_ELTTY(16, 8, true, 1); 4267 case BuiltinType::SveUint16: 4268 return SVE_INT_ELTTY(16, 8, false, 1); 4269 case BuiltinType::SveInt16x2: 4270 return SVE_INT_ELTTY(16, 8, true, 2); 4271 case BuiltinType::SveUint16x2: 4272 return SVE_INT_ELTTY(16, 8, false, 2); 4273 case BuiltinType::SveInt16x3: 4274 return SVE_INT_ELTTY(16, 8, true, 3); 4275 case BuiltinType::SveUint16x3: 4276 return SVE_INT_ELTTY(16, 8, false, 3); 4277 case BuiltinType::SveInt16x4: 4278 return SVE_INT_ELTTY(16, 8, true, 4); 4279 case BuiltinType::SveUint16x4: 4280 return SVE_INT_ELTTY(16, 8, false, 4); 4281 case BuiltinType::SveInt32: 4282 return SVE_INT_ELTTY(32, 4, true, 1); 4283 case BuiltinType::SveUint32: 4284 return SVE_INT_ELTTY(32, 4, false, 1); 4285 case BuiltinType::SveInt32x2: 4286 return SVE_INT_ELTTY(32, 4, true, 2); 4287 case BuiltinType::SveUint32x2: 4288 return SVE_INT_ELTTY(32, 4, false, 2); 4289 case BuiltinType::SveInt32x3: 4290 return SVE_INT_ELTTY(32, 4, true, 3); 4291 case BuiltinType::SveUint32x3: 4292 return SVE_INT_ELTTY(32, 4, false, 3); 4293 case BuiltinType::SveInt32x4: 4294 return SVE_INT_ELTTY(32, 4, true, 4); 4295 case BuiltinType::SveUint32x4: 4296 return SVE_INT_ELTTY(32, 4, false, 4); 4297 case BuiltinType::SveInt64: 4298 return SVE_INT_ELTTY(64, 2, true, 1); 4299 case BuiltinType::SveUint64: 4300 return SVE_INT_ELTTY(64, 2, false, 1); 4301 case BuiltinType::SveInt64x2: 4302 return SVE_INT_ELTTY(64, 2, true, 2); 4303 case BuiltinType::SveUint64x2: 4304 return SVE_INT_ELTTY(64, 2, false, 2); 4305 case BuiltinType::SveInt64x3: 4306 return SVE_INT_ELTTY(64, 2, true, 3); 4307 case BuiltinType::SveUint64x3: 4308 return SVE_INT_ELTTY(64, 2, false, 3); 4309 case BuiltinType::SveInt64x4: 4310 return SVE_INT_ELTTY(64, 2, true, 4); 4311 case BuiltinType::SveUint64x4: 4312 return SVE_INT_ELTTY(64, 2, false, 4); 4313 case BuiltinType::SveBool: 4314 return SVE_ELTTY(BoolTy, 16, 1); 4315 case BuiltinType::SveBoolx2: 4316 return SVE_ELTTY(BoolTy, 16, 2); 4317 case BuiltinType::SveBoolx4: 4318 return SVE_ELTTY(BoolTy, 16, 4); 4319 case BuiltinType::SveFloat16: 4320 return SVE_ELTTY(HalfTy, 8, 1); 4321 case BuiltinType::SveFloat16x2: 4322 return SVE_ELTTY(HalfTy, 8, 2); 4323 case BuiltinType::SveFloat16x3: 4324 return SVE_ELTTY(HalfTy, 8, 3); 4325 case BuiltinType::SveFloat16x4: 4326 return SVE_ELTTY(HalfTy, 8, 4); 4327 case BuiltinType::SveFloat32: 4328 return SVE_ELTTY(FloatTy, 4, 1); 4329 case BuiltinType::SveFloat32x2: 4330 return SVE_ELTTY(FloatTy, 4, 2); 4331 case BuiltinType::SveFloat32x3: 4332 return SVE_ELTTY(FloatTy, 4, 3); 4333 case BuiltinType::SveFloat32x4: 4334 return SVE_ELTTY(FloatTy, 4, 4); 4335 case BuiltinType::SveFloat64: 4336 return SVE_ELTTY(DoubleTy, 2, 1); 4337 case BuiltinType::SveFloat64x2: 4338 return SVE_ELTTY(DoubleTy, 2, 2); 4339 case BuiltinType::SveFloat64x3: 4340 return SVE_ELTTY(DoubleTy, 2, 3); 4341 case BuiltinType::SveFloat64x4: 4342 return SVE_ELTTY(DoubleTy, 2, 4); 4343 case BuiltinType::SveBFloat16: 4344 return SVE_ELTTY(BFloat16Ty, 8, 1); 4345 case BuiltinType::SveBFloat16x2: 4346 return SVE_ELTTY(BFloat16Ty, 8, 2); 4347 case BuiltinType::SveBFloat16x3: 4348 return SVE_ELTTY(BFloat16Ty, 8, 3); 4349 case BuiltinType::SveBFloat16x4: 4350 return SVE_ELTTY(BFloat16Ty, 8, 4); 4351 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 4352 IsSigned) \ 4353 case BuiltinType::Id: \ 4354 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 4355 llvm::ElementCount::getScalable(NumEls), NF}; 4356 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 4357 case BuiltinType::Id: \ 4358 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 4359 llvm::ElementCount::getScalable(NumEls), NF}; 4360 #define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 4361 case BuiltinType::Id: \ 4362 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF}; 4363 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 4364 case BuiltinType::Id: \ 4365 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 4366 #include "clang/Basic/RISCVVTypes.def" 4367 } 4368 } 4369 4370 /// getExternrefType - Return a WebAssembly externref type, which represents an 4371 /// opaque reference to a host value. 4372 QualType ASTContext::getWebAssemblyExternrefType() const { 4373 if (Target->getTriple().isWasm() && Target->hasFeature("reference-types")) { 4374 #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \ 4375 if (BuiltinType::Id == BuiltinType::WasmExternRef) \ 4376 return SingletonId; 4377 #include "clang/Basic/WebAssemblyReferenceTypes.def" 4378 } 4379 llvm_unreachable( 4380 "shouldn't try to generate type externref outside WebAssembly target"); 4381 } 4382 4383 /// getScalableVectorType - Return the unique reference to a scalable vector 4384 /// type of the specified element type and size. VectorType must be a built-in 4385 /// type. 4386 QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, 4387 unsigned NumFields) const { 4388 if (Target->hasAArch64SVETypes()) { 4389 uint64_t EltTySize = getTypeSize(EltTy); 4390 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 4391 IsSigned, IsFP, IsBF) \ 4392 if (!EltTy->isBooleanType() && \ 4393 ((EltTy->hasIntegerRepresentation() && \ 4394 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 4395 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 4396 IsFP && !IsBF) || \ 4397 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 4398 IsBF && !IsFP)) && \ 4399 EltTySize == ElBits && NumElts == NumEls) { \ 4400 return SingletonId; \ 4401 } 4402 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 4403 if (EltTy->isBooleanType() && NumElts == NumEls) \ 4404 return SingletonId; 4405 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingleTonId) 4406 #include "clang/Basic/AArch64SVEACLETypes.def" 4407 } else if (Target->hasRISCVVTypes()) { 4408 uint64_t EltTySize = getTypeSize(EltTy); 4409 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 4410 IsFP, IsBF) \ 4411 if (!EltTy->isBooleanType() && \ 4412 ((EltTy->hasIntegerRepresentation() && \ 4413 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 4414 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 4415 IsFP && !IsBF) || \ 4416 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 4417 IsBF && !IsFP)) && \ 4418 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \ 4419 return SingletonId; 4420 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 4421 if (EltTy->isBooleanType() && NumElts == NumEls) \ 4422 return SingletonId; 4423 #include "clang/Basic/RISCVVTypes.def" 4424 } 4425 return QualType(); 4426 } 4427 4428 /// getVectorType - Return the unique reference to a vector type of 4429 /// the specified element type and size. VectorType must be a built-in type. 4430 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 4431 VectorKind VecKind) const { 4432 assert(vecType->isBuiltinType() || 4433 (vecType->isBitIntType() && 4434 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4435 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && 4436 vecType->castAs<BitIntType>()->getNumBits() >= 8)); 4437 4438 // Check if we've already instantiated a vector of this type. 4439 llvm::FoldingSetNodeID ID; 4440 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 4441 4442 void *InsertPos = nullptr; 4443 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4444 return QualType(VTP, 0); 4445 4446 // If the element type isn't canonical, this won't be a canonical type either, 4447 // so fill in the canonical type field. 4448 QualType Canonical; 4449 if (!vecType.isCanonical()) { 4450 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 4451 4452 // Get the new insert position for the node we care about. 4453 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4454 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4455 } 4456 auto *New = new (*this, alignof(VectorType)) 4457 VectorType(vecType, NumElts, Canonical, VecKind); 4458 VectorTypes.InsertNode(New, InsertPos); 4459 Types.push_back(New); 4460 return QualType(New, 0); 4461 } 4462 4463 QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 4464 SourceLocation AttrLoc, 4465 VectorKind VecKind) const { 4466 llvm::FoldingSetNodeID ID; 4467 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 4468 VecKind); 4469 void *InsertPos = nullptr; 4470 DependentVectorType *Canon = 4471 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4472 DependentVectorType *New; 4473 4474 if (Canon) { 4475 New = new (*this, alignof(DependentVectorType)) DependentVectorType( 4476 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 4477 } else { 4478 QualType CanonVecTy = getCanonicalType(VecType); 4479 if (CanonVecTy == VecType) { 4480 New = new (*this, alignof(DependentVectorType)) 4481 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind); 4482 4483 DependentVectorType *CanonCheck = 4484 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4485 assert(!CanonCheck && 4486 "Dependent-sized vector_size canonical type broken"); 4487 (void)CanonCheck; 4488 DependentVectorTypes.InsertNode(New, InsertPos); 4489 } else { 4490 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 4491 SourceLocation(), VecKind); 4492 New = new (*this, alignof(DependentVectorType)) 4493 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 4494 } 4495 } 4496 4497 Types.push_back(New); 4498 return QualType(New, 0); 4499 } 4500 4501 /// getExtVectorType - Return the unique reference to an extended vector type of 4502 /// the specified element type and size. VectorType must be a built-in type. 4503 QualType ASTContext::getExtVectorType(QualType vecType, 4504 unsigned NumElts) const { 4505 assert(vecType->isBuiltinType() || vecType->isDependentType() || 4506 (vecType->isBitIntType() && 4507 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4508 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && 4509 vecType->castAs<BitIntType>()->getNumBits() >= 8)); 4510 4511 // Check if we've already instantiated a vector of this type. 4512 llvm::FoldingSetNodeID ID; 4513 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4514 VectorKind::Generic); 4515 void *InsertPos = nullptr; 4516 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4517 return QualType(VTP, 0); 4518 4519 // If the element type isn't canonical, this won't be a canonical type either, 4520 // so fill in the canonical type field. 4521 QualType Canonical; 4522 if (!vecType.isCanonical()) { 4523 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4524 4525 // Get the new insert position for the node we care about. 4526 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4527 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4528 } 4529 auto *New = new (*this, alignof(ExtVectorType)) 4530 ExtVectorType(vecType, NumElts, Canonical); 4531 VectorTypes.InsertNode(New, InsertPos); 4532 Types.push_back(New); 4533 return QualType(New, 0); 4534 } 4535 4536 QualType 4537 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4538 Expr *SizeExpr, 4539 SourceLocation AttrLoc) const { 4540 llvm::FoldingSetNodeID ID; 4541 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4542 SizeExpr); 4543 4544 void *InsertPos = nullptr; 4545 DependentSizedExtVectorType *Canon 4546 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4547 DependentSizedExtVectorType *New; 4548 if (Canon) { 4549 // We already have a canonical version of this array type; use it as 4550 // the canonical type for a newly-built type. 4551 New = new (*this, alignof(DependentSizedExtVectorType)) 4552 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr, 4553 AttrLoc); 4554 } else { 4555 QualType CanonVecTy = getCanonicalType(vecType); 4556 if (CanonVecTy == vecType) { 4557 New = new (*this, alignof(DependentSizedExtVectorType)) 4558 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc); 4559 4560 DependentSizedExtVectorType *CanonCheck 4561 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4562 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4563 (void)CanonCheck; 4564 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4565 } else { 4566 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4567 SourceLocation()); 4568 New = new (*this, alignof(DependentSizedExtVectorType)) 4569 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc); 4570 } 4571 } 4572 4573 Types.push_back(New); 4574 return QualType(New, 0); 4575 } 4576 4577 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4578 unsigned NumColumns) const { 4579 llvm::FoldingSetNodeID ID; 4580 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4581 Type::ConstantMatrix); 4582 4583 assert(MatrixType::isValidElementType(ElementTy) && 4584 "need a valid element type"); 4585 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4586 ConstantMatrixType::isDimensionValid(NumColumns) && 4587 "need valid matrix dimensions"); 4588 void *InsertPos = nullptr; 4589 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4590 return QualType(MTP, 0); 4591 4592 QualType Canonical; 4593 if (!ElementTy.isCanonical()) { 4594 Canonical = 4595 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4596 4597 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4598 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4599 (void)NewIP; 4600 } 4601 4602 auto *New = new (*this, alignof(ConstantMatrixType)) 4603 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4604 MatrixTypes.InsertNode(New, InsertPos); 4605 Types.push_back(New); 4606 return QualType(New, 0); 4607 } 4608 4609 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4610 Expr *RowExpr, 4611 Expr *ColumnExpr, 4612 SourceLocation AttrLoc) const { 4613 QualType CanonElementTy = getCanonicalType(ElementTy); 4614 llvm::FoldingSetNodeID ID; 4615 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4616 ColumnExpr); 4617 4618 void *InsertPos = nullptr; 4619 DependentSizedMatrixType *Canon = 4620 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4621 4622 if (!Canon) { 4623 Canon = new (*this, alignof(DependentSizedMatrixType)) 4624 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr, 4625 ColumnExpr, AttrLoc); 4626 #ifndef NDEBUG 4627 DependentSizedMatrixType *CanonCheck = 4628 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4629 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4630 #endif 4631 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4632 Types.push_back(Canon); 4633 } 4634 4635 // Already have a canonical version of the matrix type 4636 // 4637 // If it exactly matches the requested type, use it directly. 4638 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4639 Canon->getRowExpr() == ColumnExpr) 4640 return QualType(Canon, 0); 4641 4642 // Use Canon as the canonical type for newly-built type. 4643 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType)) 4644 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr, 4645 ColumnExpr, AttrLoc); 4646 Types.push_back(New); 4647 return QualType(New, 0); 4648 } 4649 4650 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4651 Expr *AddrSpaceExpr, 4652 SourceLocation AttrLoc) const { 4653 assert(AddrSpaceExpr->isInstantiationDependent()); 4654 4655 QualType canonPointeeType = getCanonicalType(PointeeType); 4656 4657 void *insertPos = nullptr; 4658 llvm::FoldingSetNodeID ID; 4659 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4660 AddrSpaceExpr); 4661 4662 DependentAddressSpaceType *canonTy = 4663 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4664 4665 if (!canonTy) { 4666 canonTy = new (*this, alignof(DependentAddressSpaceType)) 4667 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr, 4668 AttrLoc); 4669 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4670 Types.push_back(canonTy); 4671 } 4672 4673 if (canonPointeeType == PointeeType && 4674 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4675 return QualType(canonTy, 0); 4676 4677 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType)) 4678 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0), 4679 AddrSpaceExpr, AttrLoc); 4680 Types.push_back(sugaredType); 4681 return QualType(sugaredType, 0); 4682 } 4683 4684 /// Determine whether \p T is canonical as the result type of a function. 4685 static bool isCanonicalResultType(QualType T) { 4686 return T.isCanonical() && 4687 (T.getObjCLifetime() == Qualifiers::OCL_None || 4688 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4689 } 4690 4691 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4692 QualType 4693 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4694 const FunctionType::ExtInfo &Info) const { 4695 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter 4696 // functionality creates a function without a prototype regardless of 4697 // language mode (so it makes them even in C++). Once the rewriter has been 4698 // fixed, this assertion can be enabled again. 4699 //assert(!LangOpts.requiresStrictPrototypes() && 4700 // "strict prototypes are disabled"); 4701 4702 // Unique functions, to guarantee there is only one function of a particular 4703 // structure. 4704 llvm::FoldingSetNodeID ID; 4705 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4706 4707 void *InsertPos = nullptr; 4708 if (FunctionNoProtoType *FT = 4709 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4710 return QualType(FT, 0); 4711 4712 QualType Canonical; 4713 if (!isCanonicalResultType(ResultTy)) { 4714 Canonical = 4715 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4716 4717 // Get the new insert position for the node we care about. 4718 FunctionNoProtoType *NewIP = 4719 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4720 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4721 } 4722 4723 auto *New = new (*this, alignof(FunctionNoProtoType)) 4724 FunctionNoProtoType(ResultTy, Canonical, Info); 4725 Types.push_back(New); 4726 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4727 return QualType(New, 0); 4728 } 4729 4730 CanQualType 4731 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4732 CanQualType CanResultType = getCanonicalType(ResultType); 4733 4734 // Canonical result types do not have ARC lifetime qualifiers. 4735 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4736 Qualifiers Qs = CanResultType.getQualifiers(); 4737 Qs.removeObjCLifetime(); 4738 return CanQualType::CreateUnsafe( 4739 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4740 } 4741 4742 return CanResultType; 4743 } 4744 4745 static bool isCanonicalExceptionSpecification( 4746 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4747 if (ESI.Type == EST_None) 4748 return true; 4749 if (!NoexceptInType) 4750 return false; 4751 4752 // C++17 onwards: exception specification is part of the type, as a simple 4753 // boolean "can this function type throw". 4754 if (ESI.Type == EST_BasicNoexcept) 4755 return true; 4756 4757 // A noexcept(expr) specification is (possibly) canonical if expr is 4758 // value-dependent. 4759 if (ESI.Type == EST_DependentNoexcept) 4760 return true; 4761 4762 // A dynamic exception specification is canonical if it only contains pack 4763 // expansions (so we can't tell whether it's non-throwing) and all its 4764 // contained types are canonical. 4765 if (ESI.Type == EST_Dynamic) { 4766 bool AnyPackExpansions = false; 4767 for (QualType ET : ESI.Exceptions) { 4768 if (!ET.isCanonical()) 4769 return false; 4770 if (ET->getAs<PackExpansionType>()) 4771 AnyPackExpansions = true; 4772 } 4773 return AnyPackExpansions; 4774 } 4775 4776 return false; 4777 } 4778 4779 QualType ASTContext::getFunctionTypeInternal( 4780 QualType ResultTy, ArrayRef<QualType> ArgArray, 4781 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4782 size_t NumArgs = ArgArray.size(); 4783 4784 // Unique functions, to guarantee there is only one function of a particular 4785 // structure. 4786 llvm::FoldingSetNodeID ID; 4787 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4788 *this, true); 4789 4790 QualType Canonical; 4791 bool Unique = false; 4792 4793 void *InsertPos = nullptr; 4794 if (FunctionProtoType *FPT = 4795 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4796 QualType Existing = QualType(FPT, 0); 4797 4798 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4799 // it so long as our exception specification doesn't contain a dependent 4800 // noexcept expression, or we're just looking for a canonical type. 4801 // Otherwise, we're going to need to create a type 4802 // sugar node to hold the concrete expression. 4803 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4804 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4805 return Existing; 4806 4807 // We need a new type sugar node for this one, to hold the new noexcept 4808 // expression. We do no canonicalization here, but that's OK since we don't 4809 // expect to see the same noexcept expression much more than once. 4810 Canonical = getCanonicalType(Existing); 4811 Unique = true; 4812 } 4813 4814 bool NoexceptInType = getLangOpts().CPlusPlus17; 4815 bool IsCanonicalExceptionSpec = 4816 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4817 4818 // Determine whether the type being created is already canonical or not. 4819 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4820 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4821 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4822 if (!ArgArray[i].isCanonicalAsParam()) 4823 isCanonical = false; 4824 4825 if (OnlyWantCanonical) 4826 assert(isCanonical && 4827 "given non-canonical parameters constructing canonical type"); 4828 4829 // If this type isn't canonical, get the canonical version of it if we don't 4830 // already have it. The exception spec is only partially part of the 4831 // canonical type, and only in C++17 onwards. 4832 if (!isCanonical && Canonical.isNull()) { 4833 SmallVector<QualType, 16> CanonicalArgs; 4834 CanonicalArgs.reserve(NumArgs); 4835 for (unsigned i = 0; i != NumArgs; ++i) 4836 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4837 4838 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4839 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4840 CanonicalEPI.HasTrailingReturn = false; 4841 4842 if (IsCanonicalExceptionSpec) { 4843 // Exception spec is already OK. 4844 } else if (NoexceptInType) { 4845 switch (EPI.ExceptionSpec.Type) { 4846 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4847 // We don't know yet. It shouldn't matter what we pick here; no-one 4848 // should ever look at this. 4849 [[fallthrough]]; 4850 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4851 CanonicalEPI.ExceptionSpec.Type = EST_None; 4852 break; 4853 4854 // A dynamic exception specification is almost always "not noexcept", 4855 // with the exception that a pack expansion might expand to no types. 4856 case EST_Dynamic: { 4857 bool AnyPacks = false; 4858 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4859 if (ET->getAs<PackExpansionType>()) 4860 AnyPacks = true; 4861 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4862 } 4863 if (!AnyPacks) 4864 CanonicalEPI.ExceptionSpec.Type = EST_None; 4865 else { 4866 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4867 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4868 } 4869 break; 4870 } 4871 4872 case EST_DynamicNone: 4873 case EST_BasicNoexcept: 4874 case EST_NoexceptTrue: 4875 case EST_NoThrow: 4876 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4877 break; 4878 4879 case EST_DependentNoexcept: 4880 llvm_unreachable("dependent noexcept is already canonical"); 4881 } 4882 } else { 4883 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4884 } 4885 4886 // Adjust the canonical function result type. 4887 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4888 Canonical = 4889 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4890 4891 // Get the new insert position for the node we care about. 4892 FunctionProtoType *NewIP = 4893 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4894 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4895 } 4896 4897 // Compute the needed size to hold this FunctionProtoType and the 4898 // various trailing objects. 4899 auto ESH = FunctionProtoType::getExceptionSpecSize( 4900 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4901 size_t Size = FunctionProtoType::totalSizeToAlloc< 4902 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4903 FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType, 4904 Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers, 4905 FunctionEffect, EffectConditionExpr>( 4906 NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(), 4907 EPI.requiresFunctionProtoTypeArmAttributes(), ESH.NumExceptionType, 4908 ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4909 EPI.ExtParameterInfos ? NumArgs : 0, 4910 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0, EPI.FunctionEffects.size(), 4911 EPI.FunctionEffects.conditions().size()); 4912 4913 auto *FTP = (FunctionProtoType *)Allocate(Size, alignof(FunctionProtoType)); 4914 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4915 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4916 Types.push_back(FTP); 4917 if (!Unique) 4918 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4919 if (!EPI.FunctionEffects.empty()) 4920 AnyFunctionEffects = true; 4921 return QualType(FTP, 0); 4922 } 4923 4924 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4925 llvm::FoldingSetNodeID ID; 4926 PipeType::Profile(ID, T, ReadOnly); 4927 4928 void *InsertPos = nullptr; 4929 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4930 return QualType(PT, 0); 4931 4932 // If the pipe element type isn't canonical, this won't be a canonical type 4933 // either, so fill in the canonical type field. 4934 QualType Canonical; 4935 if (!T.isCanonical()) { 4936 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4937 4938 // Get the new insert position for the node we care about. 4939 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4940 assert(!NewIP && "Shouldn't be in the map!"); 4941 (void)NewIP; 4942 } 4943 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly); 4944 Types.push_back(New); 4945 PipeTypes.InsertNode(New, InsertPos); 4946 return QualType(New, 0); 4947 } 4948 4949 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4950 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4951 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4952 : Ty; 4953 } 4954 4955 QualType ASTContext::getReadPipeType(QualType T) const { 4956 return getPipeType(T, true); 4957 } 4958 4959 QualType ASTContext::getWritePipeType(QualType T) const { 4960 return getPipeType(T, false); 4961 } 4962 4963 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const { 4964 llvm::FoldingSetNodeID ID; 4965 BitIntType::Profile(ID, IsUnsigned, NumBits); 4966 4967 void *InsertPos = nullptr; 4968 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4969 return QualType(EIT, 0); 4970 4971 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits); 4972 BitIntTypes.InsertNode(New, InsertPos); 4973 Types.push_back(New); 4974 return QualType(New, 0); 4975 } 4976 4977 QualType ASTContext::getDependentBitIntType(bool IsUnsigned, 4978 Expr *NumBitsExpr) const { 4979 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4980 llvm::FoldingSetNodeID ID; 4981 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4982 4983 void *InsertPos = nullptr; 4984 if (DependentBitIntType *Existing = 4985 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4986 return QualType(Existing, 0); 4987 4988 auto *New = new (*this, alignof(DependentBitIntType)) 4989 DependentBitIntType(IsUnsigned, NumBitsExpr); 4990 DependentBitIntTypes.InsertNode(New, InsertPos); 4991 4992 Types.push_back(New); 4993 return QualType(New, 0); 4994 } 4995 4996 #ifndef NDEBUG 4997 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4998 if (!isa<CXXRecordDecl>(D)) return false; 4999 const auto *RD = cast<CXXRecordDecl>(D); 5000 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 5001 return true; 5002 if (RD->getDescribedClassTemplate() && 5003 !isa<ClassTemplateSpecializationDecl>(RD)) 5004 return true; 5005 return false; 5006 } 5007 #endif 5008 5009 /// getInjectedClassNameType - Return the unique reference to the 5010 /// injected class name type for the specified templated declaration. 5011 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 5012 QualType TST) const { 5013 assert(NeedsInjectedClassNameType(Decl)); 5014 if (Decl->TypeForDecl) { 5015 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 5016 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 5017 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 5018 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5019 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 5020 } else { 5021 Type *newType = new (*this, alignof(InjectedClassNameType)) 5022 InjectedClassNameType(Decl, TST); 5023 Decl->TypeForDecl = newType; 5024 Types.push_back(newType); 5025 } 5026 return QualType(Decl->TypeForDecl, 0); 5027 } 5028 5029 /// getTypeDeclType - Return the unique reference to the type for the 5030 /// specified type declaration. 5031 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 5032 assert(Decl && "Passed null for Decl param"); 5033 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 5034 5035 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 5036 return getTypedefType(Typedef); 5037 5038 assert(!isa<TemplateTypeParmDecl>(Decl) && 5039 "Template type parameter types are always available."); 5040 5041 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 5042 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 5043 assert(!NeedsInjectedClassNameType(Record)); 5044 return getRecordType(Record); 5045 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 5046 assert(Enum->isFirstDecl() && "enum has previous declaration"); 5047 return getEnumType(Enum); 5048 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 5049 return getUnresolvedUsingType(Using); 5050 } else 5051 llvm_unreachable("TypeDecl without a type?"); 5052 5053 return QualType(Decl->TypeForDecl, 0); 5054 } 5055 5056 /// getTypedefType - Return the unique reference to the type for the 5057 /// specified typedef name decl. 5058 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 5059 QualType Underlying) const { 5060 if (!Decl->TypeForDecl) { 5061 if (Underlying.isNull()) 5062 Underlying = Decl->getUnderlyingType(); 5063 auto *NewType = new (*this, alignof(TypedefType)) TypedefType( 5064 Type::Typedef, Decl, QualType(), getCanonicalType(Underlying)); 5065 Decl->TypeForDecl = NewType; 5066 Types.push_back(NewType); 5067 return QualType(NewType, 0); 5068 } 5069 if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying) 5070 return QualType(Decl->TypeForDecl, 0); 5071 assert(hasSameType(Decl->getUnderlyingType(), Underlying)); 5072 5073 llvm::FoldingSetNodeID ID; 5074 TypedefType::Profile(ID, Decl, Underlying); 5075 5076 void *InsertPos = nullptr; 5077 if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) { 5078 assert(!T->typeMatchesDecl() && 5079 "non-divergent case should be handled with TypeDecl"); 5080 return QualType(T, 0); 5081 } 5082 5083 void *Mem = Allocate(TypedefType::totalSizeToAlloc<QualType>(true), 5084 alignof(TypedefType)); 5085 auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying, 5086 getCanonicalType(Underlying)); 5087 TypedefTypes.InsertNode(NewType, InsertPos); 5088 Types.push_back(NewType); 5089 return QualType(NewType, 0); 5090 } 5091 5092 QualType ASTContext::getUsingType(const UsingShadowDecl *Found, 5093 QualType Underlying) const { 5094 llvm::FoldingSetNodeID ID; 5095 UsingType::Profile(ID, Found, Underlying); 5096 5097 void *InsertPos = nullptr; 5098 if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos)) 5099 return QualType(T, 0); 5100 5101 const Type *TypeForDecl = 5102 cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(); 5103 5104 assert(!Underlying.hasLocalQualifiers()); 5105 QualType Canon = Underlying->getCanonicalTypeInternal(); 5106 assert(TypeForDecl->getCanonicalTypeInternal() == Canon); 5107 5108 if (Underlying.getTypePtr() == TypeForDecl) 5109 Underlying = QualType(); 5110 void *Mem = 5111 Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()), 5112 alignof(UsingType)); 5113 UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon); 5114 Types.push_back(NewType); 5115 UsingTypes.InsertNode(NewType, InsertPos); 5116 return QualType(NewType, 0); 5117 } 5118 5119 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 5120 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 5121 5122 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 5123 if (PrevDecl->TypeForDecl) 5124 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 5125 5126 auto *newType = new (*this, alignof(RecordType)) RecordType(Decl); 5127 Decl->TypeForDecl = newType; 5128 Types.push_back(newType); 5129 return QualType(newType, 0); 5130 } 5131 5132 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 5133 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 5134 5135 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 5136 if (PrevDecl->TypeForDecl) 5137 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 5138 5139 auto *newType = new (*this, alignof(EnumType)) EnumType(Decl); 5140 Decl->TypeForDecl = newType; 5141 Types.push_back(newType); 5142 return QualType(newType, 0); 5143 } 5144 5145 QualType ASTContext::getUnresolvedUsingType( 5146 const UnresolvedUsingTypenameDecl *Decl) const { 5147 if (Decl->TypeForDecl) 5148 return QualType(Decl->TypeForDecl, 0); 5149 5150 if (const UnresolvedUsingTypenameDecl *CanonicalDecl = 5151 Decl->getCanonicalDecl()) 5152 if (CanonicalDecl->TypeForDecl) 5153 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0); 5154 5155 Type *newType = 5156 new (*this, alignof(UnresolvedUsingType)) UnresolvedUsingType(Decl); 5157 Decl->TypeForDecl = newType; 5158 Types.push_back(newType); 5159 return QualType(newType, 0); 5160 } 5161 5162 QualType ASTContext::getAttributedType(attr::Kind attrKind, 5163 QualType modifiedType, 5164 QualType equivalentType) const { 5165 llvm::FoldingSetNodeID id; 5166 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 5167 5168 void *insertPos = nullptr; 5169 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 5170 if (type) return QualType(type, 0); 5171 5172 QualType canon = getCanonicalType(equivalentType); 5173 type = new (*this, alignof(AttributedType)) 5174 AttributedType(canon, attrKind, modifiedType, equivalentType); 5175 5176 Types.push_back(type); 5177 AttributedTypes.InsertNode(type, insertPos); 5178 5179 return QualType(type, 0); 5180 } 5181 5182 QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr, 5183 QualType Wrapped) { 5184 llvm::FoldingSetNodeID ID; 5185 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr); 5186 5187 void *InsertPos = nullptr; 5188 BTFTagAttributedType *Ty = 5189 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); 5190 if (Ty) 5191 return QualType(Ty, 0); 5192 5193 QualType Canon = getCanonicalType(Wrapped); 5194 Ty = new (*this, alignof(BTFTagAttributedType)) 5195 BTFTagAttributedType(Canon, Wrapped, BTFAttr); 5196 5197 Types.push_back(Ty); 5198 BTFTagAttributedTypes.InsertNode(Ty, InsertPos); 5199 5200 return QualType(Ty, 0); 5201 } 5202 5203 /// Retrieve a substitution-result type. 5204 QualType ASTContext::getSubstTemplateTypeParmType( 5205 QualType Replacement, Decl *AssociatedDecl, unsigned Index, 5206 std::optional<unsigned> PackIndex) const { 5207 llvm::FoldingSetNodeID ID; 5208 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index, 5209 PackIndex); 5210 void *InsertPos = nullptr; 5211 SubstTemplateTypeParmType *SubstParm = 5212 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 5213 5214 if (!SubstParm) { 5215 void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>( 5216 !Replacement.isCanonical()), 5217 alignof(SubstTemplateTypeParmType)); 5218 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl, 5219 Index, PackIndex); 5220 Types.push_back(SubstParm); 5221 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 5222 } 5223 5224 return QualType(SubstParm, 0); 5225 } 5226 5227 /// Retrieve a 5228 QualType 5229 ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl, 5230 unsigned Index, bool Final, 5231 const TemplateArgument &ArgPack) { 5232 #ifndef NDEBUG 5233 for (const auto &P : ArgPack.pack_elements()) 5234 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type"); 5235 #endif 5236 5237 llvm::FoldingSetNodeID ID; 5238 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final, 5239 ArgPack); 5240 void *InsertPos = nullptr; 5241 if (SubstTemplateTypeParmPackType *SubstParm = 5242 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 5243 return QualType(SubstParm, 0); 5244 5245 QualType Canon; 5246 { 5247 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack); 5248 if (!AssociatedDecl->isCanonicalDecl() || 5249 !CanonArgPack.structurallyEquals(ArgPack)) { 5250 Canon = getSubstTemplateTypeParmPackType( 5251 AssociatedDecl->getCanonicalDecl(), Index, Final, CanonArgPack); 5252 [[maybe_unused]] const auto *Nothing = 5253 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 5254 assert(!Nothing); 5255 } 5256 } 5257 5258 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType)) 5259 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final, 5260 ArgPack); 5261 Types.push_back(SubstParm); 5262 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 5263 return QualType(SubstParm, 0); 5264 } 5265 5266 /// Retrieve the template type parameter type for a template 5267 /// parameter or parameter pack with the given depth, index, and (optionally) 5268 /// name. 5269 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 5270 bool ParameterPack, 5271 TemplateTypeParmDecl *TTPDecl) const { 5272 llvm::FoldingSetNodeID ID; 5273 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 5274 void *InsertPos = nullptr; 5275 TemplateTypeParmType *TypeParm 5276 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 5277 5278 if (TypeParm) 5279 return QualType(TypeParm, 0); 5280 5281 if (TTPDecl) { 5282 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 5283 TypeParm = new (*this, alignof(TemplateTypeParmType)) 5284 TemplateTypeParmType(TTPDecl, Canon); 5285 5286 TemplateTypeParmType *TypeCheck 5287 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 5288 assert(!TypeCheck && "Template type parameter canonical type broken"); 5289 (void)TypeCheck; 5290 } else 5291 TypeParm = new (*this, alignof(TemplateTypeParmType)) 5292 TemplateTypeParmType(Depth, Index, ParameterPack); 5293 5294 Types.push_back(TypeParm); 5295 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 5296 5297 return QualType(TypeParm, 0); 5298 } 5299 5300 TypeSourceInfo * 5301 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 5302 SourceLocation NameLoc, 5303 const TemplateArgumentListInfo &Args, 5304 QualType Underlying) const { 5305 assert(!Name.getAsDependentTemplateName() && 5306 "No dependent template names here!"); 5307 QualType TST = 5308 getTemplateSpecializationType(Name, Args.arguments(), Underlying); 5309 5310 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 5311 TemplateSpecializationTypeLoc TL = 5312 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 5313 TL.setTemplateKeywordLoc(SourceLocation()); 5314 TL.setTemplateNameLoc(NameLoc); 5315 TL.setLAngleLoc(Args.getLAngleLoc()); 5316 TL.setRAngleLoc(Args.getRAngleLoc()); 5317 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 5318 TL.setArgLocInfo(i, Args[i].getLocInfo()); 5319 return DI; 5320 } 5321 5322 QualType 5323 ASTContext::getTemplateSpecializationType(TemplateName Template, 5324 ArrayRef<TemplateArgumentLoc> Args, 5325 QualType Underlying) const { 5326 assert(!Template.getAsDependentTemplateName() && 5327 "No dependent template names here!"); 5328 5329 SmallVector<TemplateArgument, 4> ArgVec; 5330 ArgVec.reserve(Args.size()); 5331 for (const TemplateArgumentLoc &Arg : Args) 5332 ArgVec.push_back(Arg.getArgument()); 5333 5334 return getTemplateSpecializationType(Template, ArgVec, Underlying); 5335 } 5336 5337 #ifndef NDEBUG 5338 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 5339 for (const TemplateArgument &Arg : Args) 5340 if (Arg.isPackExpansion()) 5341 return true; 5342 5343 return true; 5344 } 5345 #endif 5346 5347 QualType 5348 ASTContext::getTemplateSpecializationType(TemplateName Template, 5349 ArrayRef<TemplateArgument> Args, 5350 QualType Underlying) const { 5351 assert(!Template.getAsDependentTemplateName() && 5352 "No dependent template names here!"); 5353 5354 const auto *TD = Template.getAsTemplateDecl(); 5355 bool IsTypeAlias = TD && TD->isTypeAlias(); 5356 QualType CanonType; 5357 if (!Underlying.isNull()) 5358 CanonType = getCanonicalType(Underlying); 5359 else { 5360 // We can get here with an alias template when the specialization contains 5361 // a pack expansion that does not match up with a parameter pack. 5362 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 5363 "Caller must compute aliased type"); 5364 IsTypeAlias = false; 5365 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 5366 } 5367 5368 // Allocate the (non-canonical) template specialization type, but don't 5369 // try to unique it: these types typically have location information that 5370 // we don't unique and don't want to lose. 5371 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 5372 sizeof(TemplateArgument) * Args.size() + 5373 (IsTypeAlias ? sizeof(QualType) : 0), 5374 alignof(TemplateSpecializationType)); 5375 auto *Spec 5376 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 5377 IsTypeAlias ? Underlying : QualType()); 5378 5379 Types.push_back(Spec); 5380 return QualType(Spec, 0); 5381 } 5382 5383 QualType ASTContext::getCanonicalTemplateSpecializationType( 5384 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 5385 assert(!Template.getAsDependentTemplateName() && 5386 "No dependent template names here!"); 5387 5388 // Build the canonical template specialization type. 5389 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 5390 bool AnyNonCanonArgs = false; 5391 auto CanonArgs = 5392 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 5393 5394 // Determine whether this canonical template specialization type already 5395 // exists. 5396 llvm::FoldingSetNodeID ID; 5397 TemplateSpecializationType::Profile(ID, CanonTemplate, 5398 CanonArgs, *this); 5399 5400 void *InsertPos = nullptr; 5401 TemplateSpecializationType *Spec 5402 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5403 5404 if (!Spec) { 5405 // Allocate a new canonical template specialization type. 5406 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 5407 sizeof(TemplateArgument) * CanonArgs.size()), 5408 alignof(TemplateSpecializationType)); 5409 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 5410 CanonArgs, 5411 QualType(), QualType()); 5412 Types.push_back(Spec); 5413 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 5414 } 5415 5416 assert(Spec->isDependentType() && 5417 "Non-dependent template-id type must have a canonical type"); 5418 return QualType(Spec, 0); 5419 } 5420 5421 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 5422 NestedNameSpecifier *NNS, 5423 QualType NamedType, 5424 TagDecl *OwnedTagDecl) const { 5425 llvm::FoldingSetNodeID ID; 5426 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 5427 5428 void *InsertPos = nullptr; 5429 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5430 if (T) 5431 return QualType(T, 0); 5432 5433 QualType Canon = NamedType; 5434 if (!Canon.isCanonical()) { 5435 Canon = getCanonicalType(NamedType); 5436 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5437 assert(!CheckT && "Elaborated canonical type broken"); 5438 (void)CheckT; 5439 } 5440 5441 void *Mem = 5442 Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 5443 alignof(ElaboratedType)); 5444 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 5445 5446 Types.push_back(T); 5447 ElaboratedTypes.InsertNode(T, InsertPos); 5448 return QualType(T, 0); 5449 } 5450 5451 QualType 5452 ASTContext::getParenType(QualType InnerType) const { 5453 llvm::FoldingSetNodeID ID; 5454 ParenType::Profile(ID, InnerType); 5455 5456 void *InsertPos = nullptr; 5457 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5458 if (T) 5459 return QualType(T, 0); 5460 5461 QualType Canon = InnerType; 5462 if (!Canon.isCanonical()) { 5463 Canon = getCanonicalType(InnerType); 5464 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5465 assert(!CheckT && "Paren canonical type broken"); 5466 (void)CheckT; 5467 } 5468 5469 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon); 5470 Types.push_back(T); 5471 ParenTypes.InsertNode(T, InsertPos); 5472 return QualType(T, 0); 5473 } 5474 5475 QualType 5476 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 5477 const IdentifierInfo *MacroII) const { 5478 QualType Canon = UnderlyingTy; 5479 if (!Canon.isCanonical()) 5480 Canon = getCanonicalType(UnderlyingTy); 5481 5482 auto *newType = new (*this, alignof(MacroQualifiedType)) 5483 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 5484 Types.push_back(newType); 5485 return QualType(newType, 0); 5486 } 5487 5488 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 5489 NestedNameSpecifier *NNS, 5490 const IdentifierInfo *Name, 5491 QualType Canon) const { 5492 if (Canon.isNull()) { 5493 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5494 if (CanonNNS != NNS) 5495 Canon = getDependentNameType(Keyword, CanonNNS, Name); 5496 } 5497 5498 llvm::FoldingSetNodeID ID; 5499 DependentNameType::Profile(ID, Keyword, NNS, Name); 5500 5501 void *InsertPos = nullptr; 5502 DependentNameType *T 5503 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 5504 if (T) 5505 return QualType(T, 0); 5506 5507 T = new (*this, alignof(DependentNameType)) 5508 DependentNameType(Keyword, NNS, Name, Canon); 5509 Types.push_back(T); 5510 DependentNameTypes.InsertNode(T, InsertPos); 5511 return QualType(T, 0); 5512 } 5513 5514 QualType ASTContext::getDependentTemplateSpecializationType( 5515 ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, 5516 const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const { 5517 // TODO: avoid this copy 5518 SmallVector<TemplateArgument, 16> ArgCopy; 5519 for (unsigned I = 0, E = Args.size(); I != E; ++I) 5520 ArgCopy.push_back(Args[I].getArgument()); 5521 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 5522 } 5523 5524 QualType 5525 ASTContext::getDependentTemplateSpecializationType( 5526 ElaboratedTypeKeyword Keyword, 5527 NestedNameSpecifier *NNS, 5528 const IdentifierInfo *Name, 5529 ArrayRef<TemplateArgument> Args) const { 5530 assert((!NNS || NNS->isDependent()) && 5531 "nested-name-specifier must be dependent"); 5532 5533 llvm::FoldingSetNodeID ID; 5534 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 5535 Name, Args); 5536 5537 void *InsertPos = nullptr; 5538 DependentTemplateSpecializationType *T 5539 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5540 if (T) 5541 return QualType(T, 0); 5542 5543 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5544 5545 ElaboratedTypeKeyword CanonKeyword = Keyword; 5546 if (Keyword == ElaboratedTypeKeyword::None) 5547 CanonKeyword = ElaboratedTypeKeyword::Typename; 5548 5549 bool AnyNonCanonArgs = false; 5550 auto CanonArgs = 5551 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 5552 5553 QualType Canon; 5554 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 5555 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 5556 Name, 5557 CanonArgs); 5558 5559 // Find the insert position again. 5560 [[maybe_unused]] auto *Nothing = 5561 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5562 assert(!Nothing && "canonical type broken"); 5563 } 5564 5565 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 5566 sizeof(TemplateArgument) * Args.size()), 5567 alignof(DependentTemplateSpecializationType)); 5568 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 5569 Name, Args, Canon); 5570 Types.push_back(T); 5571 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 5572 return QualType(T, 0); 5573 } 5574 5575 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 5576 TemplateArgument Arg; 5577 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 5578 QualType ArgType = getTypeDeclType(TTP); 5579 if (TTP->isParameterPack()) 5580 ArgType = getPackExpansionType(ArgType, std::nullopt); 5581 5582 Arg = TemplateArgument(ArgType); 5583 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 5584 QualType T = 5585 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 5586 // For class NTTPs, ensure we include the 'const' so the type matches that 5587 // of a real template argument. 5588 // FIXME: It would be more faithful to model this as something like an 5589 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 5590 if (T->isRecordType()) 5591 T.addConst(); 5592 Expr *E = new (*this) DeclRefExpr( 5593 *this, NTTP, /*RefersToEnclosingVariableOrCapture*/ false, T, 5594 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 5595 5596 if (NTTP->isParameterPack()) 5597 E = new (*this) 5598 PackExpansionExpr(DependentTy, E, NTTP->getLocation(), std::nullopt); 5599 Arg = TemplateArgument(E); 5600 } else { 5601 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5602 TemplateName Name = getQualifiedTemplateName( 5603 nullptr, /*TemplateKeyword=*/false, TemplateName(TTP)); 5604 if (TTP->isParameterPack()) 5605 Arg = TemplateArgument(Name, std::optional<unsigned>()); 5606 else 5607 Arg = TemplateArgument(Name); 5608 } 5609 5610 if (Param->isTemplateParameterPack()) 5611 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5612 5613 return Arg; 5614 } 5615 5616 void 5617 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5618 SmallVectorImpl<TemplateArgument> &Args) { 5619 Args.reserve(Args.size() + Params->size()); 5620 5621 for (NamedDecl *Param : *Params) 5622 Args.push_back(getInjectedTemplateArg(Param)); 5623 } 5624 5625 QualType ASTContext::getPackExpansionType(QualType Pattern, 5626 std::optional<unsigned> NumExpansions, 5627 bool ExpectPackInType) { 5628 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5629 "Pack expansions must expand one or more parameter packs"); 5630 5631 llvm::FoldingSetNodeID ID; 5632 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5633 5634 void *InsertPos = nullptr; 5635 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5636 if (T) 5637 return QualType(T, 0); 5638 5639 QualType Canon; 5640 if (!Pattern.isCanonical()) { 5641 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5642 /*ExpectPackInType=*/false); 5643 5644 // Find the insert position again, in case we inserted an element into 5645 // PackExpansionTypes and invalidated our insert position. 5646 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5647 } 5648 5649 T = new (*this, alignof(PackExpansionType)) 5650 PackExpansionType(Pattern, Canon, NumExpansions); 5651 Types.push_back(T); 5652 PackExpansionTypes.InsertNode(T, InsertPos); 5653 return QualType(T, 0); 5654 } 5655 5656 /// CmpProtocolNames - Comparison predicate for sorting protocols 5657 /// alphabetically. 5658 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5659 ObjCProtocolDecl *const *RHS) { 5660 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5661 } 5662 5663 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5664 if (Protocols.empty()) return true; 5665 5666 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5667 return false; 5668 5669 for (unsigned i = 1; i != Protocols.size(); ++i) 5670 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5671 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5672 return false; 5673 return true; 5674 } 5675 5676 static void 5677 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5678 // Sort protocols, keyed by name. 5679 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5680 5681 // Canonicalize. 5682 for (ObjCProtocolDecl *&P : Protocols) 5683 P = P->getCanonicalDecl(); 5684 5685 // Remove duplicates. 5686 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5687 Protocols.erase(ProtocolsEnd, Protocols.end()); 5688 } 5689 5690 QualType ASTContext::getObjCObjectType(QualType BaseType, 5691 ObjCProtocolDecl * const *Protocols, 5692 unsigned NumProtocols) const { 5693 return getObjCObjectType(BaseType, {}, 5694 llvm::ArrayRef(Protocols, NumProtocols), 5695 /*isKindOf=*/false); 5696 } 5697 5698 QualType ASTContext::getObjCObjectType( 5699 QualType baseType, 5700 ArrayRef<QualType> typeArgs, 5701 ArrayRef<ObjCProtocolDecl *> protocols, 5702 bool isKindOf) const { 5703 // If the base type is an interface and there aren't any protocols or 5704 // type arguments to add, then the interface type will do just fine. 5705 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5706 isa<ObjCInterfaceType>(baseType)) 5707 return baseType; 5708 5709 // Look in the folding set for an existing type. 5710 llvm::FoldingSetNodeID ID; 5711 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5712 void *InsertPos = nullptr; 5713 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5714 return QualType(QT, 0); 5715 5716 // Determine the type arguments to be used for canonicalization, 5717 // which may be explicitly specified here or written on the base 5718 // type. 5719 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5720 if (effectiveTypeArgs.empty()) { 5721 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5722 effectiveTypeArgs = baseObject->getTypeArgs(); 5723 } 5724 5725 // Build the canonical type, which has the canonical base type and a 5726 // sorted-and-uniqued list of protocols and the type arguments 5727 // canonicalized. 5728 QualType canonical; 5729 bool typeArgsAreCanonical = llvm::all_of( 5730 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); 5731 bool protocolsSorted = areSortedAndUniqued(protocols); 5732 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5733 // Determine the canonical type arguments. 5734 ArrayRef<QualType> canonTypeArgs; 5735 SmallVector<QualType, 4> canonTypeArgsVec; 5736 if (!typeArgsAreCanonical) { 5737 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5738 for (auto typeArg : effectiveTypeArgs) 5739 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5740 canonTypeArgs = canonTypeArgsVec; 5741 } else { 5742 canonTypeArgs = effectiveTypeArgs; 5743 } 5744 5745 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5746 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5747 if (!protocolsSorted) { 5748 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5749 SortAndUniqueProtocols(canonProtocolsVec); 5750 canonProtocols = canonProtocolsVec; 5751 } else { 5752 canonProtocols = protocols; 5753 } 5754 5755 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5756 canonProtocols, isKindOf); 5757 5758 // Regenerate InsertPos. 5759 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5760 } 5761 5762 unsigned size = sizeof(ObjCObjectTypeImpl); 5763 size += typeArgs.size() * sizeof(QualType); 5764 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5765 void *mem = Allocate(size, alignof(ObjCObjectTypeImpl)); 5766 auto *T = 5767 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5768 isKindOf); 5769 5770 Types.push_back(T); 5771 ObjCObjectTypes.InsertNode(T, InsertPos); 5772 return QualType(T, 0); 5773 } 5774 5775 /// Apply Objective-C protocol qualifiers to the given type. 5776 /// If this is for the canonical type of a type parameter, we can apply 5777 /// protocol qualifiers on the ObjCObjectPointerType. 5778 QualType 5779 ASTContext::applyObjCProtocolQualifiers(QualType type, 5780 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5781 bool allowOnPointerType) const { 5782 hasError = false; 5783 5784 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5785 return getObjCTypeParamType(objT->getDecl(), protocols); 5786 } 5787 5788 // Apply protocol qualifiers to ObjCObjectPointerType. 5789 if (allowOnPointerType) { 5790 if (const auto *objPtr = 5791 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5792 const ObjCObjectType *objT = objPtr->getObjectType(); 5793 // Merge protocol lists and construct ObjCObjectType. 5794 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5795 protocolsVec.append(objT->qual_begin(), 5796 objT->qual_end()); 5797 protocolsVec.append(protocols.begin(), protocols.end()); 5798 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5799 type = getObjCObjectType( 5800 objT->getBaseType(), 5801 objT->getTypeArgsAsWritten(), 5802 protocols, 5803 objT->isKindOfTypeAsWritten()); 5804 return getObjCObjectPointerType(type); 5805 } 5806 } 5807 5808 // Apply protocol qualifiers to ObjCObjectType. 5809 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5810 // FIXME: Check for protocols to which the class type is already 5811 // known to conform. 5812 5813 return getObjCObjectType(objT->getBaseType(), 5814 objT->getTypeArgsAsWritten(), 5815 protocols, 5816 objT->isKindOfTypeAsWritten()); 5817 } 5818 5819 // If the canonical type is ObjCObjectType, ... 5820 if (type->isObjCObjectType()) { 5821 // Silently overwrite any existing protocol qualifiers. 5822 // TODO: determine whether that's the right thing to do. 5823 5824 // FIXME: Check for protocols to which the class type is already 5825 // known to conform. 5826 return getObjCObjectType(type, {}, protocols, false); 5827 } 5828 5829 // id<protocol-list> 5830 if (type->isObjCIdType()) { 5831 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5832 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5833 objPtr->isKindOfType()); 5834 return getObjCObjectPointerType(type); 5835 } 5836 5837 // Class<protocol-list> 5838 if (type->isObjCClassType()) { 5839 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5840 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5841 objPtr->isKindOfType()); 5842 return getObjCObjectPointerType(type); 5843 } 5844 5845 hasError = true; 5846 return type; 5847 } 5848 5849 QualType 5850 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5851 ArrayRef<ObjCProtocolDecl *> protocols) const { 5852 // Look in the folding set for an existing type. 5853 llvm::FoldingSetNodeID ID; 5854 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5855 void *InsertPos = nullptr; 5856 if (ObjCTypeParamType *TypeParam = 5857 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5858 return QualType(TypeParam, 0); 5859 5860 // We canonicalize to the underlying type. 5861 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5862 if (!protocols.empty()) { 5863 // Apply the protocol qualifers. 5864 bool hasError; 5865 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5866 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5867 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5868 } 5869 5870 unsigned size = sizeof(ObjCTypeParamType); 5871 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5872 void *mem = Allocate(size, alignof(ObjCTypeParamType)); 5873 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5874 5875 Types.push_back(newType); 5876 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5877 return QualType(newType, 0); 5878 } 5879 5880 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5881 ObjCTypeParamDecl *New) const { 5882 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5883 // Update TypeForDecl after updating TypeSourceInfo. 5884 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5885 SmallVector<ObjCProtocolDecl *, 8> protocols; 5886 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5887 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5888 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5889 } 5890 5891 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5892 /// protocol list adopt all protocols in QT's qualified-id protocol 5893 /// list. 5894 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5895 ObjCInterfaceDecl *IC) { 5896 if (!QT->isObjCQualifiedIdType()) 5897 return false; 5898 5899 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5900 // If both the right and left sides have qualifiers. 5901 for (auto *Proto : OPT->quals()) { 5902 if (!IC->ClassImplementsProtocol(Proto, false)) 5903 return false; 5904 } 5905 return true; 5906 } 5907 return false; 5908 } 5909 5910 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5911 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5912 /// of protocols. 5913 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5914 ObjCInterfaceDecl *IDecl) { 5915 if (!QT->isObjCQualifiedIdType()) 5916 return false; 5917 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5918 if (!OPT) 5919 return false; 5920 if (!IDecl->hasDefinition()) 5921 return false; 5922 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5923 CollectInheritedProtocols(IDecl, InheritedProtocols); 5924 if (InheritedProtocols.empty()) 5925 return false; 5926 // Check that if every protocol in list of id<plist> conforms to a protocol 5927 // of IDecl's, then bridge casting is ok. 5928 bool Conforms = false; 5929 for (auto *Proto : OPT->quals()) { 5930 Conforms = false; 5931 for (auto *PI : InheritedProtocols) { 5932 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5933 Conforms = true; 5934 break; 5935 } 5936 } 5937 if (!Conforms) 5938 break; 5939 } 5940 if (Conforms) 5941 return true; 5942 5943 for (auto *PI : InheritedProtocols) { 5944 // If both the right and left sides have qualifiers. 5945 bool Adopts = false; 5946 for (auto *Proto : OPT->quals()) { 5947 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5948 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5949 break; 5950 } 5951 if (!Adopts) 5952 return false; 5953 } 5954 return true; 5955 } 5956 5957 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5958 /// the given object type. 5959 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5960 llvm::FoldingSetNodeID ID; 5961 ObjCObjectPointerType::Profile(ID, ObjectT); 5962 5963 void *InsertPos = nullptr; 5964 if (ObjCObjectPointerType *QT = 5965 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5966 return QualType(QT, 0); 5967 5968 // Find the canonical object type. 5969 QualType Canonical; 5970 if (!ObjectT.isCanonical()) { 5971 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5972 5973 // Regenerate InsertPos. 5974 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5975 } 5976 5977 // No match. 5978 void *Mem = 5979 Allocate(sizeof(ObjCObjectPointerType), alignof(ObjCObjectPointerType)); 5980 auto *QType = 5981 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5982 5983 Types.push_back(QType); 5984 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5985 return QualType(QType, 0); 5986 } 5987 5988 /// getObjCInterfaceType - Return the unique reference to the type for the 5989 /// specified ObjC interface decl. The list of protocols is optional. 5990 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5991 ObjCInterfaceDecl *PrevDecl) const { 5992 if (Decl->TypeForDecl) 5993 return QualType(Decl->TypeForDecl, 0); 5994 5995 if (PrevDecl) { 5996 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5997 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5998 return QualType(PrevDecl->TypeForDecl, 0); 5999 } 6000 6001 // Prefer the definition, if there is one. 6002 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 6003 Decl = Def; 6004 6005 void *Mem = Allocate(sizeof(ObjCInterfaceType), alignof(ObjCInterfaceType)); 6006 auto *T = new (Mem) ObjCInterfaceType(Decl); 6007 Decl->TypeForDecl = T; 6008 Types.push_back(T); 6009 return QualType(T, 0); 6010 } 6011 6012 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 6013 /// TypeOfExprType AST's (since expression's are never shared). For example, 6014 /// multiple declarations that refer to "typeof(x)" all contain different 6015 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 6016 /// on canonical type's (which are always unique). 6017 QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const { 6018 TypeOfExprType *toe; 6019 if (tofExpr->isTypeDependent()) { 6020 llvm::FoldingSetNodeID ID; 6021 DependentTypeOfExprType::Profile(ID, *this, tofExpr, 6022 Kind == TypeOfKind::Unqualified); 6023 6024 void *InsertPos = nullptr; 6025 DependentTypeOfExprType *Canon = 6026 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 6027 if (Canon) { 6028 // We already have a "canonical" version of an identical, dependent 6029 // typeof(expr) type. Use that as our canonical type. 6030 toe = new (*this, alignof(TypeOfExprType)) TypeOfExprType( 6031 *this, tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0)); 6032 } else { 6033 // Build a new, canonical typeof(expr) type. 6034 Canon = new (*this, alignof(DependentTypeOfExprType)) 6035 DependentTypeOfExprType(*this, tofExpr, Kind); 6036 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 6037 toe = Canon; 6038 } 6039 } else { 6040 QualType Canonical = getCanonicalType(tofExpr->getType()); 6041 toe = new (*this, alignof(TypeOfExprType)) 6042 TypeOfExprType(*this, tofExpr, Kind, Canonical); 6043 } 6044 Types.push_back(toe); 6045 return QualType(toe, 0); 6046 } 6047 6048 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 6049 /// TypeOfType nodes. The only motivation to unique these nodes would be 6050 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 6051 /// an issue. This doesn't affect the type checker, since it operates 6052 /// on canonical types (which are always unique). 6053 QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const { 6054 QualType Canonical = getCanonicalType(tofType); 6055 auto *tot = new (*this, alignof(TypeOfType)) 6056 TypeOfType(*this, tofType, Canonical, Kind); 6057 Types.push_back(tot); 6058 return QualType(tot, 0); 6059 } 6060 6061 /// getReferenceQualifiedType - Given an expr, will return the type for 6062 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions 6063 /// and class member access into account. 6064 QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { 6065 // C++11 [dcl.type.simple]p4: 6066 // [...] 6067 QualType T = E->getType(); 6068 switch (E->getValueKind()) { 6069 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the 6070 // type of e; 6071 case VK_XValue: 6072 return getRValueReferenceType(T); 6073 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the 6074 // type of e; 6075 case VK_LValue: 6076 return getLValueReferenceType(T); 6077 // - otherwise, decltype(e) is the type of e. 6078 case VK_PRValue: 6079 return T; 6080 } 6081 llvm_unreachable("Unknown value kind"); 6082 } 6083 6084 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 6085 /// nodes. This would never be helpful, since each such type has its own 6086 /// expression, and would not give a significant memory saving, since there 6087 /// is an Expr tree under each such type. 6088 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 6089 DecltypeType *dt; 6090 6091 // C++11 [temp.type]p2: 6092 // If an expression e involves a template parameter, decltype(e) denotes a 6093 // unique dependent type. Two such decltype-specifiers refer to the same 6094 // type only if their expressions are equivalent (14.5.6.1). 6095 if (e->isInstantiationDependent()) { 6096 llvm::FoldingSetNodeID ID; 6097 DependentDecltypeType::Profile(ID, *this, e); 6098 6099 void *InsertPos = nullptr; 6100 DependentDecltypeType *Canon 6101 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 6102 if (!Canon) { 6103 // Build a new, canonical decltype(expr) type. 6104 Canon = new (*this, alignof(DependentDecltypeType)) 6105 DependentDecltypeType(e, DependentTy); 6106 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 6107 } 6108 dt = new (*this, alignof(DecltypeType)) 6109 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 6110 } else { 6111 dt = new (*this, alignof(DecltypeType)) 6112 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 6113 } 6114 Types.push_back(dt); 6115 return QualType(dt, 0); 6116 } 6117 6118 QualType ASTContext::getPackIndexingType(QualType Pattern, Expr *IndexExpr, 6119 bool FullySubstituted, 6120 ArrayRef<QualType> Expansions, 6121 int Index) const { 6122 QualType Canonical; 6123 if (FullySubstituted && Index != -1) { 6124 Canonical = getCanonicalType(Expansions[Index]); 6125 } else { 6126 llvm::FoldingSetNodeID ID; 6127 PackIndexingType::Profile(ID, *this, Pattern, IndexExpr); 6128 void *InsertPos = nullptr; 6129 PackIndexingType *Canon = 6130 DependentPackIndexingTypes.FindNodeOrInsertPos(ID, InsertPos); 6131 if (!Canon) { 6132 void *Mem = Allocate( 6133 PackIndexingType::totalSizeToAlloc<QualType>(Expansions.size()), 6134 TypeAlignment); 6135 Canon = new (Mem) 6136 PackIndexingType(*this, QualType(), Pattern, IndexExpr, Expansions); 6137 DependentPackIndexingTypes.InsertNode(Canon, InsertPos); 6138 } 6139 Canonical = QualType(Canon, 0); 6140 } 6141 6142 void *Mem = 6143 Allocate(PackIndexingType::totalSizeToAlloc<QualType>(Expansions.size()), 6144 TypeAlignment); 6145 auto *T = new (Mem) 6146 PackIndexingType(*this, Canonical, Pattern, IndexExpr, Expansions); 6147 Types.push_back(T); 6148 return QualType(T, 0); 6149 } 6150 6151 /// getUnaryTransformationType - We don't unique these, since the memory 6152 /// savings are minimal and these are rare. 6153 QualType ASTContext::getUnaryTransformType(QualType BaseType, 6154 QualType UnderlyingType, 6155 UnaryTransformType::UTTKind Kind) 6156 const { 6157 UnaryTransformType *ut = nullptr; 6158 6159 if (BaseType->isDependentType()) { 6160 // Look in the folding set for an existing type. 6161 llvm::FoldingSetNodeID ID; 6162 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 6163 6164 void *InsertPos = nullptr; 6165 DependentUnaryTransformType *Canon 6166 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 6167 6168 if (!Canon) { 6169 // Build a new, canonical __underlying_type(type) type. 6170 Canon = new (*this, alignof(DependentUnaryTransformType)) 6171 DependentUnaryTransformType(*this, getCanonicalType(BaseType), Kind); 6172 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 6173 } 6174 ut = new (*this, alignof(UnaryTransformType)) 6175 UnaryTransformType(BaseType, QualType(), Kind, QualType(Canon, 0)); 6176 } else { 6177 QualType CanonType = getCanonicalType(UnderlyingType); 6178 ut = new (*this, alignof(UnaryTransformType)) 6179 UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType); 6180 } 6181 Types.push_back(ut); 6182 return QualType(ut, 0); 6183 } 6184 6185 QualType ASTContext::getAutoTypeInternal( 6186 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, 6187 bool IsPack, ConceptDecl *TypeConstraintConcept, 6188 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { 6189 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 6190 !TypeConstraintConcept && !IsDependent) 6191 return getAutoDeductType(); 6192 6193 // Look in the folding set for an existing type. 6194 void *InsertPos = nullptr; 6195 llvm::FoldingSetNodeID ID; 6196 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 6197 TypeConstraintConcept, TypeConstraintArgs); 6198 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 6199 return QualType(AT, 0); 6200 6201 QualType Canon; 6202 if (!IsCanon) { 6203 if (!DeducedType.isNull()) { 6204 Canon = DeducedType.getCanonicalType(); 6205 } else if (TypeConstraintConcept) { 6206 bool AnyNonCanonArgs = false; 6207 ConceptDecl *CanonicalConcept = TypeConstraintConcept->getCanonicalDecl(); 6208 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments( 6209 *this, TypeConstraintArgs, AnyNonCanonArgs); 6210 if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) { 6211 Canon = 6212 getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, 6213 CanonicalConcept, CanonicalConceptArgs, true); 6214 // Find the insert position again. 6215 [[maybe_unused]] auto *Nothing = 6216 AutoTypes.FindNodeOrInsertPos(ID, InsertPos); 6217 assert(!Nothing && "canonical type broken"); 6218 } 6219 } 6220 } 6221 6222 void *Mem = Allocate(sizeof(AutoType) + 6223 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 6224 alignof(AutoType)); 6225 auto *AT = new (Mem) AutoType( 6226 DeducedType, Keyword, 6227 (IsDependent ? TypeDependence::DependentInstantiation 6228 : TypeDependence::None) | 6229 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 6230 Canon, TypeConstraintConcept, TypeConstraintArgs); 6231 Types.push_back(AT); 6232 AutoTypes.InsertNode(AT, InsertPos); 6233 return QualType(AT, 0); 6234 } 6235 6236 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 6237 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 6238 /// canonical deduced-but-dependent 'auto' type. 6239 QualType 6240 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 6241 bool IsDependent, bool IsPack, 6242 ConceptDecl *TypeConstraintConcept, 6243 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 6244 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 6245 assert((!IsDependent || DeducedType.isNull()) && 6246 "A dependent auto should be undeduced"); 6247 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, 6248 TypeConstraintConcept, TypeConstraintArgs); 6249 } 6250 6251 QualType ASTContext::getUnconstrainedType(QualType T) const { 6252 QualType CanonT = T.getCanonicalType(); 6253 6254 // Remove a type-constraint from a top-level auto or decltype(auto). 6255 if (auto *AT = CanonT->getAs<AutoType>()) { 6256 if (!AT->isConstrained()) 6257 return T; 6258 return getQualifiedType(getAutoType(QualType(), AT->getKeyword(), 6259 AT->isDependentType(), 6260 AT->containsUnexpandedParameterPack()), 6261 T.getQualifiers()); 6262 } 6263 6264 // FIXME: We only support constrained auto at the top level in the type of a 6265 // non-type template parameter at the moment. Once we lift that restriction, 6266 // we'll need to recursively build types containing auto here. 6267 assert(!CanonT->getContainedAutoType() || 6268 !CanonT->getContainedAutoType()->isConstrained()); 6269 return T; 6270 } 6271 6272 /// Return the uniqued reference to the deduced template specialization type 6273 /// which has been deduced to the given type, or to the canonical undeduced 6274 /// such type, or the canonical deduced-but-dependent such type. 6275 QualType ASTContext::getDeducedTemplateSpecializationType( 6276 TemplateName Template, QualType DeducedType, bool IsDependent) const { 6277 // Look in the folding set for an existing type. 6278 void *InsertPos = nullptr; 6279 llvm::FoldingSetNodeID ID; 6280 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 6281 IsDependent); 6282 if (DeducedTemplateSpecializationType *DTST = 6283 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 6284 return QualType(DTST, 0); 6285 6286 auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType)) 6287 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 6288 llvm::FoldingSetNodeID TempID; 6289 DTST->Profile(TempID); 6290 assert(ID == TempID && "ID does not match"); 6291 Types.push_back(DTST); 6292 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 6293 return QualType(DTST, 0); 6294 } 6295 6296 /// getAtomicType - Return the uniqued reference to the atomic type for 6297 /// the given value type. 6298 QualType ASTContext::getAtomicType(QualType T) const { 6299 // Unique pointers, to guarantee there is only one pointer of a particular 6300 // structure. 6301 llvm::FoldingSetNodeID ID; 6302 AtomicType::Profile(ID, T); 6303 6304 void *InsertPos = nullptr; 6305 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 6306 return QualType(AT, 0); 6307 6308 // If the atomic value type isn't canonical, this won't be a canonical type 6309 // either, so fill in the canonical type field. 6310 QualType Canonical; 6311 if (!T.isCanonical()) { 6312 Canonical = getAtomicType(getCanonicalType(T)); 6313 6314 // Get the new insert position for the node we care about. 6315 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 6316 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 6317 } 6318 auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical); 6319 Types.push_back(New); 6320 AtomicTypes.InsertNode(New, InsertPos); 6321 return QualType(New, 0); 6322 } 6323 6324 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 6325 QualType ASTContext::getAutoDeductType() const { 6326 if (AutoDeductTy.isNull()) 6327 AutoDeductTy = QualType(new (*this, alignof(AutoType)) 6328 AutoType(QualType(), AutoTypeKeyword::Auto, 6329 TypeDependence::None, QualType(), 6330 /*concept*/ nullptr, /*args*/ {}), 6331 0); 6332 return AutoDeductTy; 6333 } 6334 6335 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 6336 QualType ASTContext::getAutoRRefDeductType() const { 6337 if (AutoRRefDeductTy.isNull()) 6338 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 6339 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 6340 return AutoRRefDeductTy; 6341 } 6342 6343 /// getTagDeclType - Return the unique reference to the type for the 6344 /// specified TagDecl (struct/union/class/enum) decl. 6345 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 6346 assert(Decl); 6347 // FIXME: What is the design on getTagDeclType when it requires casting 6348 // away const? mutable? 6349 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 6350 } 6351 6352 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 6353 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 6354 /// needs to agree with the definition in <stddef.h>. 6355 CanQualType ASTContext::getSizeType() const { 6356 return getFromTargetType(Target->getSizeType()); 6357 } 6358 6359 /// Return the unique signed counterpart of the integer type 6360 /// corresponding to size_t. 6361 CanQualType ASTContext::getSignedSizeType() const { 6362 return getFromTargetType(Target->getSignedSizeType()); 6363 } 6364 6365 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 6366 CanQualType ASTContext::getIntMaxType() const { 6367 return getFromTargetType(Target->getIntMaxType()); 6368 } 6369 6370 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 6371 CanQualType ASTContext::getUIntMaxType() const { 6372 return getFromTargetType(Target->getUIntMaxType()); 6373 } 6374 6375 /// getSignedWCharType - Return the type of "signed wchar_t". 6376 /// Used when in C++, as a GCC extension. 6377 QualType ASTContext::getSignedWCharType() const { 6378 // FIXME: derive from "Target" ? 6379 return WCharTy; 6380 } 6381 6382 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 6383 /// Used when in C++, as a GCC extension. 6384 QualType ASTContext::getUnsignedWCharType() const { 6385 // FIXME: derive from "Target" ? 6386 return UnsignedIntTy; 6387 } 6388 6389 QualType ASTContext::getIntPtrType() const { 6390 return getFromTargetType(Target->getIntPtrType()); 6391 } 6392 6393 QualType ASTContext::getUIntPtrType() const { 6394 return getCorrespondingUnsignedType(getIntPtrType()); 6395 } 6396 6397 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 6398 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 6399 QualType ASTContext::getPointerDiffType() const { 6400 return getFromTargetType(Target->getPtrDiffType(LangAS::Default)); 6401 } 6402 6403 /// Return the unique unsigned counterpart of "ptrdiff_t" 6404 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 6405 /// in the definition of %tu format specifier. 6406 QualType ASTContext::getUnsignedPointerDiffType() const { 6407 return getFromTargetType(Target->getUnsignedPtrDiffType(LangAS::Default)); 6408 } 6409 6410 /// Return the unique type for "pid_t" defined in 6411 /// <sys/types.h>. We need this to compute the correct type for vfork(). 6412 QualType ASTContext::getProcessIDType() const { 6413 return getFromTargetType(Target->getProcessIDType()); 6414 } 6415 6416 //===----------------------------------------------------------------------===// 6417 // Type Operators 6418 //===----------------------------------------------------------------------===// 6419 6420 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 6421 // Push qualifiers into arrays, and then discard any remaining 6422 // qualifiers. 6423 T = getCanonicalType(T); 6424 T = getVariableArrayDecayedType(T); 6425 const Type *Ty = T.getTypePtr(); 6426 QualType Result; 6427 if (getLangOpts().HLSL && isa<ConstantArrayType>(Ty)) { 6428 Result = getArrayParameterType(QualType(Ty, 0)); 6429 } else if (isa<ArrayType>(Ty)) { 6430 Result = getArrayDecayedType(QualType(Ty,0)); 6431 } else if (isa<FunctionType>(Ty)) { 6432 Result = getPointerType(QualType(Ty, 0)); 6433 } else { 6434 Result = QualType(Ty, 0); 6435 } 6436 6437 return CanQualType::CreateUnsafe(Result); 6438 } 6439 6440 QualType ASTContext::getUnqualifiedArrayType(QualType type, 6441 Qualifiers &quals) const { 6442 SplitQualType splitType = type.getSplitUnqualifiedType(); 6443 6444 // FIXME: getSplitUnqualifiedType() actually walks all the way to 6445 // the unqualified desugared type and then drops it on the floor. 6446 // We then have to strip that sugar back off with 6447 // getUnqualifiedDesugaredType(), which is silly. 6448 const auto *AT = 6449 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 6450 6451 // If we don't have an array, just use the results in splitType. 6452 if (!AT) { 6453 quals = splitType.Quals; 6454 return QualType(splitType.Ty, 0); 6455 } 6456 6457 // Otherwise, recurse on the array's element type. 6458 QualType elementType = AT->getElementType(); 6459 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 6460 6461 // If that didn't change the element type, AT has no qualifiers, so we 6462 // can just use the results in splitType. 6463 if (elementType == unqualElementType) { 6464 assert(quals.empty()); // from the recursive call 6465 quals = splitType.Quals; 6466 return QualType(splitType.Ty, 0); 6467 } 6468 6469 // Otherwise, add in the qualifiers from the outermost type, then 6470 // build the type back up. 6471 quals.addConsistentQualifiers(splitType.Quals); 6472 6473 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 6474 return getConstantArrayType(unqualElementType, CAT->getSize(), 6475 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 6476 } 6477 6478 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 6479 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 6480 } 6481 6482 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 6483 return getVariableArrayType(unqualElementType, 6484 VAT->getSizeExpr(), 6485 VAT->getSizeModifier(), 6486 VAT->getIndexTypeCVRQualifiers(), 6487 VAT->getBracketsRange()); 6488 } 6489 6490 const auto *DSAT = cast<DependentSizedArrayType>(AT); 6491 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 6492 DSAT->getSizeModifier(), 0, 6493 SourceRange()); 6494 } 6495 6496 /// Attempt to unwrap two types that may both be array types with the same bound 6497 /// (or both be array types of unknown bound) for the purpose of comparing the 6498 /// cv-decomposition of two types per C++ [conv.qual]. 6499 /// 6500 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6501 /// C++20 [conv.qual], if permitted by the current language mode. 6502 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, 6503 bool AllowPiMismatch) { 6504 while (true) { 6505 auto *AT1 = getAsArrayType(T1); 6506 if (!AT1) 6507 return; 6508 6509 auto *AT2 = getAsArrayType(T2); 6510 if (!AT2) 6511 return; 6512 6513 // If we don't have two array types with the same constant bound nor two 6514 // incomplete array types, we've unwrapped everything we can. 6515 // C++20 also permits one type to be a constant array type and the other 6516 // to be an incomplete array type. 6517 // FIXME: Consider also unwrapping array of unknown bound and VLA. 6518 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 6519 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 6520 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || 6521 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6522 isa<IncompleteArrayType>(AT2)))) 6523 return; 6524 } else if (isa<IncompleteArrayType>(AT1)) { 6525 if (!(isa<IncompleteArrayType>(AT2) || 6526 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6527 isa<ConstantArrayType>(AT2)))) 6528 return; 6529 } else { 6530 return; 6531 } 6532 6533 T1 = AT1->getElementType(); 6534 T2 = AT2->getElementType(); 6535 } 6536 } 6537 6538 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 6539 /// 6540 /// If T1 and T2 are both pointer types of the same kind, or both array types 6541 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 6542 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 6543 /// 6544 /// This function will typically be called in a loop that successively 6545 /// "unwraps" pointer and pointer-to-member types to compare them at each 6546 /// level. 6547 /// 6548 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6549 /// C++20 [conv.qual], if permitted by the current language mode. 6550 /// 6551 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 6552 /// pair of types that can't be unwrapped further. 6553 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, 6554 bool AllowPiMismatch) { 6555 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); 6556 6557 const auto *T1PtrType = T1->getAs<PointerType>(); 6558 const auto *T2PtrType = T2->getAs<PointerType>(); 6559 if (T1PtrType && T2PtrType) { 6560 T1 = T1PtrType->getPointeeType(); 6561 T2 = T2PtrType->getPointeeType(); 6562 return true; 6563 } 6564 6565 const auto *T1MPType = T1->getAs<MemberPointerType>(); 6566 const auto *T2MPType = T2->getAs<MemberPointerType>(); 6567 if (T1MPType && T2MPType && 6568 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 6569 QualType(T2MPType->getClass(), 0))) { 6570 T1 = T1MPType->getPointeeType(); 6571 T2 = T2MPType->getPointeeType(); 6572 return true; 6573 } 6574 6575 if (getLangOpts().ObjC) { 6576 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 6577 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 6578 if (T1OPType && T2OPType) { 6579 T1 = T1OPType->getPointeeType(); 6580 T2 = T2OPType->getPointeeType(); 6581 return true; 6582 } 6583 } 6584 6585 // FIXME: Block pointers, too? 6586 6587 return false; 6588 } 6589 6590 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 6591 while (true) { 6592 Qualifiers Quals; 6593 T1 = getUnqualifiedArrayType(T1, Quals); 6594 T2 = getUnqualifiedArrayType(T2, Quals); 6595 if (hasSameType(T1, T2)) 6596 return true; 6597 if (!UnwrapSimilarTypes(T1, T2)) 6598 return false; 6599 } 6600 } 6601 6602 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 6603 while (true) { 6604 Qualifiers Quals1, Quals2; 6605 T1 = getUnqualifiedArrayType(T1, Quals1); 6606 T2 = getUnqualifiedArrayType(T2, Quals2); 6607 6608 Quals1.removeCVRQualifiers(); 6609 Quals2.removeCVRQualifiers(); 6610 if (Quals1 != Quals2) 6611 return false; 6612 6613 if (hasSameType(T1, T2)) 6614 return true; 6615 6616 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) 6617 return false; 6618 } 6619 } 6620 6621 DeclarationNameInfo 6622 ASTContext::getNameForTemplate(TemplateName Name, 6623 SourceLocation NameLoc) const { 6624 switch (Name.getKind()) { 6625 case TemplateName::QualifiedTemplate: 6626 case TemplateName::Template: 6627 // DNInfo work in progress: CHECKME: what about DNLoc? 6628 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 6629 NameLoc); 6630 6631 case TemplateName::OverloadedTemplate: { 6632 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 6633 // DNInfo work in progress: CHECKME: what about DNLoc? 6634 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 6635 } 6636 6637 case TemplateName::AssumedTemplate: { 6638 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 6639 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 6640 } 6641 6642 case TemplateName::DependentTemplate: { 6643 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6644 DeclarationName DName; 6645 if (DTN->isIdentifier()) { 6646 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 6647 return DeclarationNameInfo(DName, NameLoc); 6648 } else { 6649 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 6650 // DNInfo work in progress: FIXME: source locations? 6651 DeclarationNameLoc DNLoc = 6652 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 6653 return DeclarationNameInfo(DName, NameLoc, DNLoc); 6654 } 6655 } 6656 6657 case TemplateName::SubstTemplateTemplateParm: { 6658 SubstTemplateTemplateParmStorage *subst 6659 = Name.getAsSubstTemplateTemplateParm(); 6660 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 6661 NameLoc); 6662 } 6663 6664 case TemplateName::SubstTemplateTemplateParmPack: { 6665 SubstTemplateTemplateParmPackStorage *subst 6666 = Name.getAsSubstTemplateTemplateParmPack(); 6667 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 6668 NameLoc); 6669 } 6670 case TemplateName::UsingTemplate: 6671 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(), 6672 NameLoc); 6673 } 6674 6675 llvm_unreachable("bad template name kind!"); 6676 } 6677 6678 TemplateName 6679 ASTContext::getCanonicalTemplateName(const TemplateName &Name) const { 6680 switch (Name.getKind()) { 6681 case TemplateName::UsingTemplate: 6682 case TemplateName::QualifiedTemplate: 6683 case TemplateName::Template: { 6684 TemplateDecl *Template = Name.getAsTemplateDecl(); 6685 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 6686 Template = getCanonicalTemplateTemplateParmDecl(TTP); 6687 6688 // The canonical template name is the canonical template declaration. 6689 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 6690 } 6691 6692 case TemplateName::OverloadedTemplate: 6693 case TemplateName::AssumedTemplate: 6694 llvm_unreachable("cannot canonicalize unresolved template"); 6695 6696 case TemplateName::DependentTemplate: { 6697 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6698 assert(DTN && "Non-dependent template names must refer to template decls."); 6699 return DTN->CanonicalTemplateName; 6700 } 6701 6702 case TemplateName::SubstTemplateTemplateParm: { 6703 SubstTemplateTemplateParmStorage *subst 6704 = Name.getAsSubstTemplateTemplateParm(); 6705 return getCanonicalTemplateName(subst->getReplacement()); 6706 } 6707 6708 case TemplateName::SubstTemplateTemplateParmPack: { 6709 SubstTemplateTemplateParmPackStorage *subst = 6710 Name.getAsSubstTemplateTemplateParmPack(); 6711 TemplateArgument canonArgPack = 6712 getCanonicalTemplateArgument(subst->getArgumentPack()); 6713 return getSubstTemplateTemplateParmPack( 6714 canonArgPack, subst->getAssociatedDecl()->getCanonicalDecl(), 6715 subst->getFinal(), subst->getIndex()); 6716 } 6717 } 6718 6719 llvm_unreachable("bad template name!"); 6720 } 6721 6722 bool ASTContext::hasSameTemplateName(const TemplateName &X, 6723 const TemplateName &Y) const { 6724 return getCanonicalTemplateName(X).getAsVoidPointer() == 6725 getCanonicalTemplateName(Y).getAsVoidPointer(); 6726 } 6727 6728 bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const { 6729 if (!XCE != !YCE) 6730 return false; 6731 6732 if (!XCE) 6733 return true; 6734 6735 llvm::FoldingSetNodeID XCEID, YCEID; 6736 XCE->Profile(XCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); 6737 YCE->Profile(YCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); 6738 return XCEID == YCEID; 6739 } 6740 6741 bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC, 6742 const TypeConstraint *YTC) const { 6743 if (!XTC != !YTC) 6744 return false; 6745 6746 if (!XTC) 6747 return true; 6748 6749 auto *NCX = XTC->getNamedConcept(); 6750 auto *NCY = YTC->getNamedConcept(); 6751 if (!NCX || !NCY || !isSameEntity(NCX, NCY)) 6752 return false; 6753 if (XTC->getConceptReference()->hasExplicitTemplateArgs() != 6754 YTC->getConceptReference()->hasExplicitTemplateArgs()) 6755 return false; 6756 if (XTC->getConceptReference()->hasExplicitTemplateArgs()) 6757 if (XTC->getConceptReference() 6758 ->getTemplateArgsAsWritten() 6759 ->NumTemplateArgs != 6760 YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs) 6761 return false; 6762 6763 // Compare slowly by profiling. 6764 // 6765 // We couldn't compare the profiling result for the template 6766 // args here. Consider the following example in different modules: 6767 // 6768 // template <__integer_like _Tp, C<_Tp> Sentinel> 6769 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const { 6770 // return __t; 6771 // } 6772 // 6773 // When we compare the profiling result for `C<_Tp>` in different 6774 // modules, it will compare the type of `_Tp` in different modules. 6775 // However, the type of `_Tp` in different modules refer to different 6776 // types here naturally. So we couldn't compare the profiling result 6777 // for the template args directly. 6778 return isSameConstraintExpr(XTC->getImmediatelyDeclaredConstraint(), 6779 YTC->getImmediatelyDeclaredConstraint()); 6780 } 6781 6782 bool ASTContext::isSameTemplateParameter(const NamedDecl *X, 6783 const NamedDecl *Y) const { 6784 if (X->getKind() != Y->getKind()) 6785 return false; 6786 6787 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) { 6788 auto *TY = cast<TemplateTypeParmDecl>(Y); 6789 if (TX->isParameterPack() != TY->isParameterPack()) 6790 return false; 6791 if (TX->hasTypeConstraint() != TY->hasTypeConstraint()) 6792 return false; 6793 return isSameTypeConstraint(TX->getTypeConstraint(), 6794 TY->getTypeConstraint()); 6795 } 6796 6797 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6798 auto *TY = cast<NonTypeTemplateParmDecl>(Y); 6799 return TX->isParameterPack() == TY->isParameterPack() && 6800 TX->getASTContext().hasSameType(TX->getType(), TY->getType()) && 6801 isSameConstraintExpr(TX->getPlaceholderTypeConstraint(), 6802 TY->getPlaceholderTypeConstraint()); 6803 } 6804 6805 auto *TX = cast<TemplateTemplateParmDecl>(X); 6806 auto *TY = cast<TemplateTemplateParmDecl>(Y); 6807 return TX->isParameterPack() == TY->isParameterPack() && 6808 isSameTemplateParameterList(TX->getTemplateParameters(), 6809 TY->getTemplateParameters()); 6810 } 6811 6812 bool ASTContext::isSameTemplateParameterList( 6813 const TemplateParameterList *X, const TemplateParameterList *Y) const { 6814 if (X->size() != Y->size()) 6815 return false; 6816 6817 for (unsigned I = 0, N = X->size(); I != N; ++I) 6818 if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I))) 6819 return false; 6820 6821 return isSameConstraintExpr(X->getRequiresClause(), Y->getRequiresClause()); 6822 } 6823 6824 bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X, 6825 const NamedDecl *Y) const { 6826 // If the type parameter isn't the same already, we don't need to check the 6827 // default argument further. 6828 if (!isSameTemplateParameter(X, Y)) 6829 return false; 6830 6831 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(X)) { 6832 auto *TTPY = cast<TemplateTypeParmDecl>(Y); 6833 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6834 return false; 6835 6836 return hasSameType(TTPX->getDefaultArgument().getArgument().getAsType(), 6837 TTPY->getDefaultArgument().getArgument().getAsType()); 6838 } 6839 6840 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6841 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Y); 6842 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument()) 6843 return false; 6844 6845 Expr *DefaultArgumentX = 6846 NTTPX->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts(); 6847 Expr *DefaultArgumentY = 6848 NTTPY->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts(); 6849 llvm::FoldingSetNodeID XID, YID; 6850 DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true); 6851 DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true); 6852 return XID == YID; 6853 } 6854 6855 auto *TTPX = cast<TemplateTemplateParmDecl>(X); 6856 auto *TTPY = cast<TemplateTemplateParmDecl>(Y); 6857 6858 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6859 return false; 6860 6861 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument(); 6862 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument(); 6863 return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate()); 6864 } 6865 6866 static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) { 6867 if (auto *NS = X->getAsNamespace()) 6868 return NS; 6869 if (auto *NAS = X->getAsNamespaceAlias()) 6870 return NAS->getNamespace(); 6871 return nullptr; 6872 } 6873 6874 static bool isSameQualifier(const NestedNameSpecifier *X, 6875 const NestedNameSpecifier *Y) { 6876 if (auto *NSX = getNamespace(X)) { 6877 auto *NSY = getNamespace(Y); 6878 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl()) 6879 return false; 6880 } else if (X->getKind() != Y->getKind()) 6881 return false; 6882 6883 // FIXME: For namespaces and types, we're permitted to check that the entity 6884 // is named via the same tokens. We should probably do so. 6885 switch (X->getKind()) { 6886 case NestedNameSpecifier::Identifier: 6887 if (X->getAsIdentifier() != Y->getAsIdentifier()) 6888 return false; 6889 break; 6890 case NestedNameSpecifier::Namespace: 6891 case NestedNameSpecifier::NamespaceAlias: 6892 // We've already checked that we named the same namespace. 6893 break; 6894 case NestedNameSpecifier::TypeSpec: 6895 case NestedNameSpecifier::TypeSpecWithTemplate: 6896 if (X->getAsType()->getCanonicalTypeInternal() != 6897 Y->getAsType()->getCanonicalTypeInternal()) 6898 return false; 6899 break; 6900 case NestedNameSpecifier::Global: 6901 case NestedNameSpecifier::Super: 6902 return true; 6903 } 6904 6905 // Recurse into earlier portion of NNS, if any. 6906 auto *PX = X->getPrefix(); 6907 auto *PY = Y->getPrefix(); 6908 if (PX && PY) 6909 return isSameQualifier(PX, PY); 6910 return !PX && !PY; 6911 } 6912 6913 /// Determine whether the attributes we can overload on are identical for A and 6914 /// B. Will ignore any overloadable attrs represented in the type of A and B. 6915 static bool hasSameOverloadableAttrs(const FunctionDecl *A, 6916 const FunctionDecl *B) { 6917 // Note that pass_object_size attributes are represented in the function's 6918 // ExtParameterInfo, so we don't need to check them here. 6919 6920 llvm::FoldingSetNodeID Cand1ID, Cand2ID; 6921 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>(); 6922 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>(); 6923 6924 for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) { 6925 std::optional<EnableIfAttr *> Cand1A = std::get<0>(Pair); 6926 std::optional<EnableIfAttr *> Cand2A = std::get<1>(Pair); 6927 6928 // Return false if the number of enable_if attributes is different. 6929 if (!Cand1A || !Cand2A) 6930 return false; 6931 6932 Cand1ID.clear(); 6933 Cand2ID.clear(); 6934 6935 (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true); 6936 (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true); 6937 6938 // Return false if any of the enable_if expressions of A and B are 6939 // different. 6940 if (Cand1ID != Cand2ID) 6941 return false; 6942 } 6943 return true; 6944 } 6945 6946 bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const { 6947 // Caution: this function is called by the AST reader during deserialization, 6948 // so it cannot rely on AST invariants being met. Non-trivial accessors 6949 // should be avoided, along with any traversal of redeclaration chains. 6950 6951 if (X == Y) 6952 return true; 6953 6954 if (X->getDeclName() != Y->getDeclName()) 6955 return false; 6956 6957 // Must be in the same context. 6958 // 6959 // Note that we can't use DeclContext::Equals here, because the DeclContexts 6960 // could be two different declarations of the same function. (We will fix the 6961 // semantic DC to refer to the primary definition after merging.) 6962 if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()), 6963 cast<Decl>(Y->getDeclContext()->getRedeclContext()))) 6964 return false; 6965 6966 // Two typedefs refer to the same entity if they have the same underlying 6967 // type. 6968 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X)) 6969 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y)) 6970 return hasSameType(TypedefX->getUnderlyingType(), 6971 TypedefY->getUnderlyingType()); 6972 6973 // Must have the same kind. 6974 if (X->getKind() != Y->getKind()) 6975 return false; 6976 6977 // Objective-C classes and protocols with the same name always match. 6978 if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X)) 6979 return true; 6980 6981 if (isa<ClassTemplateSpecializationDecl>(X)) { 6982 // No need to handle these here: we merge them when adding them to the 6983 // template. 6984 return false; 6985 } 6986 6987 // Compatible tags match. 6988 if (const auto *TagX = dyn_cast<TagDecl>(X)) { 6989 const auto *TagY = cast<TagDecl>(Y); 6990 return (TagX->getTagKind() == TagY->getTagKind()) || 6991 ((TagX->getTagKind() == TagTypeKind::Struct || 6992 TagX->getTagKind() == TagTypeKind::Class || 6993 TagX->getTagKind() == TagTypeKind::Interface) && 6994 (TagY->getTagKind() == TagTypeKind::Struct || 6995 TagY->getTagKind() == TagTypeKind::Class || 6996 TagY->getTagKind() == TagTypeKind::Interface)); 6997 } 6998 6999 // Functions with the same type and linkage match. 7000 // FIXME: This needs to cope with merging of prototyped/non-prototyped 7001 // functions, etc. 7002 if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) { 7003 const auto *FuncY = cast<FunctionDecl>(Y); 7004 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) { 7005 const auto *CtorY = cast<CXXConstructorDecl>(Y); 7006 if (CtorX->getInheritedConstructor() && 7007 !isSameEntity(CtorX->getInheritedConstructor().getConstructor(), 7008 CtorY->getInheritedConstructor().getConstructor())) 7009 return false; 7010 } 7011 7012 if (FuncX->isMultiVersion() != FuncY->isMultiVersion()) 7013 return false; 7014 7015 // Multiversioned functions with different feature strings are represented 7016 // as separate declarations. 7017 if (FuncX->isMultiVersion()) { 7018 const auto *TAX = FuncX->getAttr<TargetAttr>(); 7019 const auto *TAY = FuncY->getAttr<TargetAttr>(); 7020 assert(TAX && TAY && "Multiversion Function without target attribute"); 7021 7022 if (TAX->getFeaturesStr() != TAY->getFeaturesStr()) 7023 return false; 7024 } 7025 7026 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes 7027 // not the same entity if they are constrained. 7028 if ((FuncX->isMemberLikeConstrainedFriend() || 7029 FuncY->isMemberLikeConstrainedFriend()) && 7030 !FuncX->getLexicalDeclContext()->Equals( 7031 FuncY->getLexicalDeclContext())) { 7032 return false; 7033 } 7034 7035 if (!isSameConstraintExpr(FuncX->getTrailingRequiresClause(), 7036 FuncY->getTrailingRequiresClause())) 7037 return false; 7038 7039 auto GetTypeAsWritten = [](const FunctionDecl *FD) { 7040 // Map to the first declaration that we've already merged into this one. 7041 // The TSI of redeclarations might not match (due to calling conventions 7042 // being inherited onto the type but not the TSI), but the TSI type of 7043 // the first declaration of the function should match across modules. 7044 FD = FD->getCanonicalDecl(); 7045 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType() 7046 : FD->getType(); 7047 }; 7048 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY); 7049 if (!hasSameType(XT, YT)) { 7050 // We can get functions with different types on the redecl chain in C++17 7051 // if they have differing exception specifications and at least one of 7052 // the excpetion specs is unresolved. 7053 auto *XFPT = XT->getAs<FunctionProtoType>(); 7054 auto *YFPT = YT->getAs<FunctionProtoType>(); 7055 if (getLangOpts().CPlusPlus17 && XFPT && YFPT && 7056 (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) || 7057 isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) && 7058 hasSameFunctionTypeIgnoringExceptionSpec(XT, YT)) 7059 return true; 7060 return false; 7061 } 7062 7063 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() && 7064 hasSameOverloadableAttrs(FuncX, FuncY); 7065 } 7066 7067 // Variables with the same type and linkage match. 7068 if (const auto *VarX = dyn_cast<VarDecl>(X)) { 7069 const auto *VarY = cast<VarDecl>(Y); 7070 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) { 7071 // During deserialization, we might compare variables before we load 7072 // their types. Assume the types will end up being the same. 7073 if (VarX->getType().isNull() || VarY->getType().isNull()) 7074 return true; 7075 7076 if (hasSameType(VarX->getType(), VarY->getType())) 7077 return true; 7078 7079 // We can get decls with different types on the redecl chain. Eg. 7080 // template <typename T> struct S { static T Var[]; }; // #1 7081 // template <typename T> T S<T>::Var[sizeof(T)]; // #2 7082 // Only? happens when completing an incomplete array type. In this case 7083 // when comparing #1 and #2 we should go through their element type. 7084 const ArrayType *VarXTy = getAsArrayType(VarX->getType()); 7085 const ArrayType *VarYTy = getAsArrayType(VarY->getType()); 7086 if (!VarXTy || !VarYTy) 7087 return false; 7088 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType()) 7089 return hasSameType(VarXTy->getElementType(), VarYTy->getElementType()); 7090 } 7091 return false; 7092 } 7093 7094 // Namespaces with the same name and inlinedness match. 7095 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) { 7096 const auto *NamespaceY = cast<NamespaceDecl>(Y); 7097 return NamespaceX->isInline() == NamespaceY->isInline(); 7098 } 7099 7100 // Identical template names and kinds match if their template parameter lists 7101 // and patterns match. 7102 if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) { 7103 const auto *TemplateY = cast<TemplateDecl>(Y); 7104 7105 // ConceptDecl wouldn't be the same if their constraint expression differs. 7106 if (const auto *ConceptX = dyn_cast<ConceptDecl>(X)) { 7107 const auto *ConceptY = cast<ConceptDecl>(Y); 7108 if (!isSameConstraintExpr(ConceptX->getConstraintExpr(), 7109 ConceptY->getConstraintExpr())) 7110 return false; 7111 } 7112 7113 return isSameEntity(TemplateX->getTemplatedDecl(), 7114 TemplateY->getTemplatedDecl()) && 7115 isSameTemplateParameterList(TemplateX->getTemplateParameters(), 7116 TemplateY->getTemplateParameters()); 7117 } 7118 7119 // Fields with the same name and the same type match. 7120 if (const auto *FDX = dyn_cast<FieldDecl>(X)) { 7121 const auto *FDY = cast<FieldDecl>(Y); 7122 // FIXME: Also check the bitwidth is odr-equivalent, if any. 7123 return hasSameType(FDX->getType(), FDY->getType()); 7124 } 7125 7126 // Indirect fields with the same target field match. 7127 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) { 7128 const auto *IFDY = cast<IndirectFieldDecl>(Y); 7129 return IFDX->getAnonField()->getCanonicalDecl() == 7130 IFDY->getAnonField()->getCanonicalDecl(); 7131 } 7132 7133 // Enumerators with the same name match. 7134 if (isa<EnumConstantDecl>(X)) 7135 // FIXME: Also check the value is odr-equivalent. 7136 return true; 7137 7138 // Using shadow declarations with the same target match. 7139 if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) { 7140 const auto *USY = cast<UsingShadowDecl>(Y); 7141 return declaresSameEntity(USX->getTargetDecl(), USY->getTargetDecl()); 7142 } 7143 7144 // Using declarations with the same qualifier match. (We already know that 7145 // the name matches.) 7146 if (const auto *UX = dyn_cast<UsingDecl>(X)) { 7147 const auto *UY = cast<UsingDecl>(Y); 7148 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 7149 UX->hasTypename() == UY->hasTypename() && 7150 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 7151 } 7152 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) { 7153 const auto *UY = cast<UnresolvedUsingValueDecl>(Y); 7154 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 7155 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 7156 } 7157 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) { 7158 return isSameQualifier( 7159 UX->getQualifier(), 7160 cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier()); 7161 } 7162 7163 // Using-pack declarations are only created by instantiation, and match if 7164 // they're instantiated from matching UnresolvedUsing...Decls. 7165 if (const auto *UX = dyn_cast<UsingPackDecl>(X)) { 7166 return declaresSameEntity( 7167 UX->getInstantiatedFromUsingDecl(), 7168 cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl()); 7169 } 7170 7171 // Namespace alias definitions with the same target match. 7172 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) { 7173 const auto *NAY = cast<NamespaceAliasDecl>(Y); 7174 return NAX->getNamespace()->Equals(NAY->getNamespace()); 7175 } 7176 7177 return false; 7178 } 7179 7180 TemplateArgument 7181 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 7182 switch (Arg.getKind()) { 7183 case TemplateArgument::Null: 7184 return Arg; 7185 7186 case TemplateArgument::Expression: 7187 return Arg; 7188 7189 case TemplateArgument::Declaration: { 7190 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 7191 return TemplateArgument(D, getCanonicalType(Arg.getParamTypeForDecl()), 7192 Arg.getIsDefaulted()); 7193 } 7194 7195 case TemplateArgument::NullPtr: 7196 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 7197 /*isNullPtr*/ true, Arg.getIsDefaulted()); 7198 7199 case TemplateArgument::Template: 7200 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()), 7201 Arg.getIsDefaulted()); 7202 7203 case TemplateArgument::TemplateExpansion: 7204 return TemplateArgument( 7205 getCanonicalTemplateName(Arg.getAsTemplateOrTemplatePattern()), 7206 Arg.getNumTemplateExpansions(), Arg.getIsDefaulted()); 7207 7208 case TemplateArgument::Integral: 7209 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 7210 7211 case TemplateArgument::StructuralValue: 7212 return TemplateArgument(*this, 7213 getCanonicalType(Arg.getStructuralValueType()), 7214 Arg.getAsStructuralValue()); 7215 7216 case TemplateArgument::Type: 7217 return TemplateArgument(getCanonicalType(Arg.getAsType()), 7218 /*isNullPtr*/ false, Arg.getIsDefaulted()); 7219 7220 case TemplateArgument::Pack: { 7221 bool AnyNonCanonArgs = false; 7222 auto CanonArgs = ::getCanonicalTemplateArguments( 7223 *this, Arg.pack_elements(), AnyNonCanonArgs); 7224 if (!AnyNonCanonArgs) 7225 return Arg; 7226 return TemplateArgument::CreatePackCopy(const_cast<ASTContext &>(*this), 7227 CanonArgs); 7228 } 7229 } 7230 7231 // Silence GCC warning 7232 llvm_unreachable("Unhandled template argument kind"); 7233 } 7234 7235 NestedNameSpecifier * 7236 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 7237 if (!NNS) 7238 return nullptr; 7239 7240 switch (NNS->getKind()) { 7241 case NestedNameSpecifier::Identifier: 7242 // Canonicalize the prefix but keep the identifier the same. 7243 return NestedNameSpecifier::Create(*this, 7244 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 7245 NNS->getAsIdentifier()); 7246 7247 case NestedNameSpecifier::Namespace: 7248 // A namespace is canonical; build a nested-name-specifier with 7249 // this namespace and no prefix. 7250 return NestedNameSpecifier::Create(*this, nullptr, 7251 NNS->getAsNamespace()->getFirstDecl()); 7252 7253 case NestedNameSpecifier::NamespaceAlias: 7254 // A namespace is canonical; build a nested-name-specifier with 7255 // this namespace and no prefix. 7256 return NestedNameSpecifier::Create( 7257 *this, nullptr, 7258 NNS->getAsNamespaceAlias()->getNamespace()->getFirstDecl()); 7259 7260 // The difference between TypeSpec and TypeSpecWithTemplate is that the 7261 // latter will have the 'template' keyword when printed. 7262 case NestedNameSpecifier::TypeSpec: 7263 case NestedNameSpecifier::TypeSpecWithTemplate: { 7264 const Type *T = getCanonicalType(NNS->getAsType()); 7265 7266 // If we have some kind of dependent-named type (e.g., "typename T::type"), 7267 // break it apart into its prefix and identifier, then reconsititute those 7268 // as the canonical nested-name-specifier. This is required to canonicalize 7269 // a dependent nested-name-specifier involving typedefs of dependent-name 7270 // types, e.g., 7271 // typedef typename T::type T1; 7272 // typedef typename T1::type T2; 7273 if (const auto *DNT = T->getAs<DependentNameType>()) 7274 return NestedNameSpecifier::Create(*this, DNT->getQualifier(), 7275 DNT->getIdentifier()); 7276 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 7277 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, T); 7278 7279 // TODO: Set 'Template' parameter to true for other template types. 7280 return NestedNameSpecifier::Create(*this, nullptr, false, T); 7281 } 7282 7283 case NestedNameSpecifier::Global: 7284 case NestedNameSpecifier::Super: 7285 // The global specifier and __super specifer are canonical and unique. 7286 return NNS; 7287 } 7288 7289 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 7290 } 7291 7292 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 7293 // Handle the non-qualified case efficiently. 7294 if (!T.hasLocalQualifiers()) { 7295 // Handle the common positive case fast. 7296 if (const auto *AT = dyn_cast<ArrayType>(T)) 7297 return AT; 7298 } 7299 7300 // Handle the common negative case fast. 7301 if (!isa<ArrayType>(T.getCanonicalType())) 7302 return nullptr; 7303 7304 // Apply any qualifiers from the array type to the element type. This 7305 // implements C99 6.7.3p8: "If the specification of an array type includes 7306 // any type qualifiers, the element type is so qualified, not the array type." 7307 7308 // If we get here, we either have type qualifiers on the type, or we have 7309 // sugar such as a typedef in the way. If we have type qualifiers on the type 7310 // we must propagate them down into the element type. 7311 7312 SplitQualType split = T.getSplitDesugaredType(); 7313 Qualifiers qs = split.Quals; 7314 7315 // If we have a simple case, just return now. 7316 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 7317 if (!ATy || qs.empty()) 7318 return ATy; 7319 7320 // Otherwise, we have an array and we have qualifiers on it. Push the 7321 // qualifiers into the array element type and return a new array type. 7322 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 7323 7324 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 7325 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 7326 CAT->getSizeExpr(), 7327 CAT->getSizeModifier(), 7328 CAT->getIndexTypeCVRQualifiers())); 7329 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 7330 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 7331 IAT->getSizeModifier(), 7332 IAT->getIndexTypeCVRQualifiers())); 7333 7334 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 7335 return cast<ArrayType>( 7336 getDependentSizedArrayType(NewEltTy, 7337 DSAT->getSizeExpr(), 7338 DSAT->getSizeModifier(), 7339 DSAT->getIndexTypeCVRQualifiers(), 7340 DSAT->getBracketsRange())); 7341 7342 const auto *VAT = cast<VariableArrayType>(ATy); 7343 return cast<ArrayType>(getVariableArrayType(NewEltTy, 7344 VAT->getSizeExpr(), 7345 VAT->getSizeModifier(), 7346 VAT->getIndexTypeCVRQualifiers(), 7347 VAT->getBracketsRange())); 7348 } 7349 7350 QualType ASTContext::getAdjustedParameterType(QualType T) const { 7351 if (getLangOpts().HLSL && T->isConstantArrayType()) 7352 return getArrayParameterType(T); 7353 if (T->isArrayType() || T->isFunctionType()) 7354 return getDecayedType(T); 7355 return T; 7356 } 7357 7358 QualType ASTContext::getSignatureParameterType(QualType T) const { 7359 T = getVariableArrayDecayedType(T); 7360 T = getAdjustedParameterType(T); 7361 return T.getUnqualifiedType(); 7362 } 7363 7364 QualType ASTContext::getExceptionObjectType(QualType T) const { 7365 // C++ [except.throw]p3: 7366 // A throw-expression initializes a temporary object, called the exception 7367 // object, the type of which is determined by removing any top-level 7368 // cv-qualifiers from the static type of the operand of throw and adjusting 7369 // the type from "array of T" or "function returning T" to "pointer to T" 7370 // or "pointer to function returning T", [...] 7371 T = getVariableArrayDecayedType(T); 7372 if (T->isArrayType() || T->isFunctionType()) 7373 T = getDecayedType(T); 7374 return T.getUnqualifiedType(); 7375 } 7376 7377 /// getArrayDecayedType - Return the properly qualified result of decaying the 7378 /// specified array type to a pointer. This operation is non-trivial when 7379 /// handling typedefs etc. The canonical type of "T" must be an array type, 7380 /// this returns a pointer to a properly qualified element of the array. 7381 /// 7382 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 7383 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 7384 // Get the element type with 'getAsArrayType' so that we don't lose any 7385 // typedefs in the element type of the array. This also handles propagation 7386 // of type qualifiers from the array type into the element type if present 7387 // (C99 6.7.3p8). 7388 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 7389 assert(PrettyArrayType && "Not an array type!"); 7390 7391 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 7392 7393 // int x[restrict 4] -> int *restrict 7394 QualType Result = getQualifiedType(PtrTy, 7395 PrettyArrayType->getIndexTypeQualifiers()); 7396 7397 // int x[_Nullable] -> int * _Nullable 7398 if (auto Nullability = Ty->getNullability()) { 7399 Result = const_cast<ASTContext *>(this)->getAttributedType( 7400 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 7401 } 7402 return Result; 7403 } 7404 7405 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 7406 return getBaseElementType(array->getElementType()); 7407 } 7408 7409 QualType ASTContext::getBaseElementType(QualType type) const { 7410 Qualifiers qs; 7411 while (true) { 7412 SplitQualType split = type.getSplitDesugaredType(); 7413 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 7414 if (!array) break; 7415 7416 type = array->getElementType(); 7417 qs.addConsistentQualifiers(split.Quals); 7418 } 7419 7420 return getQualifiedType(type, qs); 7421 } 7422 7423 /// getConstantArrayElementCount - Returns number of constant array elements. 7424 uint64_t 7425 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 7426 uint64_t ElementCount = 1; 7427 do { 7428 ElementCount *= CA->getZExtSize(); 7429 CA = dyn_cast_or_null<ConstantArrayType>( 7430 CA->getElementType()->getAsArrayTypeUnsafe()); 7431 } while (CA); 7432 return ElementCount; 7433 } 7434 7435 uint64_t ASTContext::getArrayInitLoopExprElementCount( 7436 const ArrayInitLoopExpr *AILE) const { 7437 if (!AILE) 7438 return 0; 7439 7440 uint64_t ElementCount = 1; 7441 7442 do { 7443 ElementCount *= AILE->getArraySize().getZExtValue(); 7444 AILE = dyn_cast<ArrayInitLoopExpr>(AILE->getSubExpr()); 7445 } while (AILE); 7446 7447 return ElementCount; 7448 } 7449 7450 /// getFloatingRank - Return a relative rank for floating point types. 7451 /// This routine will assert if passed a built-in type that isn't a float. 7452 static FloatingRank getFloatingRank(QualType T) { 7453 if (const auto *CT = T->getAs<ComplexType>()) 7454 return getFloatingRank(CT->getElementType()); 7455 7456 switch (T->castAs<BuiltinType>()->getKind()) { 7457 default: llvm_unreachable("getFloatingRank(): not a floating type"); 7458 case BuiltinType::Float16: return Float16Rank; 7459 case BuiltinType::Half: return HalfRank; 7460 case BuiltinType::Float: return FloatRank; 7461 case BuiltinType::Double: return DoubleRank; 7462 case BuiltinType::LongDouble: return LongDoubleRank; 7463 case BuiltinType::Float128: return Float128Rank; 7464 case BuiltinType::BFloat16: return BFloat16Rank; 7465 case BuiltinType::Ibm128: return Ibm128Rank; 7466 } 7467 } 7468 7469 /// getFloatingTypeOrder - Compare the rank of the two specified floating 7470 /// point types, ignoring the domain of the type (i.e. 'double' == 7471 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 7472 /// LHS < RHS, return -1. 7473 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 7474 FloatingRank LHSR = getFloatingRank(LHS); 7475 FloatingRank RHSR = getFloatingRank(RHS); 7476 7477 if (LHSR == RHSR) 7478 return 0; 7479 if (LHSR > RHSR) 7480 return 1; 7481 return -1; 7482 } 7483 7484 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 7485 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 7486 return 0; 7487 return getFloatingTypeOrder(LHS, RHS); 7488 } 7489 7490 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 7491 /// routine will assert if passed a built-in type that isn't an integer or enum, 7492 /// or if it is not canonicalized. 7493 unsigned ASTContext::getIntegerRank(const Type *T) const { 7494 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 7495 7496 // Results in this 'losing' to any type of the same size, but winning if 7497 // larger. 7498 if (const auto *EIT = dyn_cast<BitIntType>(T)) 7499 return 0 + (EIT->getNumBits() << 3); 7500 7501 switch (cast<BuiltinType>(T)->getKind()) { 7502 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 7503 case BuiltinType::Bool: 7504 return 1 + (getIntWidth(BoolTy) << 3); 7505 case BuiltinType::Char_S: 7506 case BuiltinType::Char_U: 7507 case BuiltinType::SChar: 7508 case BuiltinType::UChar: 7509 return 2 + (getIntWidth(CharTy) << 3); 7510 case BuiltinType::Short: 7511 case BuiltinType::UShort: 7512 return 3 + (getIntWidth(ShortTy) << 3); 7513 case BuiltinType::Int: 7514 case BuiltinType::UInt: 7515 return 4 + (getIntWidth(IntTy) << 3); 7516 case BuiltinType::Long: 7517 case BuiltinType::ULong: 7518 return 5 + (getIntWidth(LongTy) << 3); 7519 case BuiltinType::LongLong: 7520 case BuiltinType::ULongLong: 7521 return 6 + (getIntWidth(LongLongTy) << 3); 7522 case BuiltinType::Int128: 7523 case BuiltinType::UInt128: 7524 return 7 + (getIntWidth(Int128Ty) << 3); 7525 7526 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of 7527 // their underlying types" [c++20 conv.rank] 7528 case BuiltinType::Char8: 7529 return getIntegerRank(UnsignedCharTy.getTypePtr()); 7530 case BuiltinType::Char16: 7531 return getIntegerRank( 7532 getFromTargetType(Target->getChar16Type()).getTypePtr()); 7533 case BuiltinType::Char32: 7534 return getIntegerRank( 7535 getFromTargetType(Target->getChar32Type()).getTypePtr()); 7536 case BuiltinType::WChar_S: 7537 case BuiltinType::WChar_U: 7538 return getIntegerRank( 7539 getFromTargetType(Target->getWCharType()).getTypePtr()); 7540 } 7541 } 7542 7543 /// Whether this is a promotable bitfield reference according 7544 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 7545 /// 7546 /// \returns the type this bit-field will promote to, or NULL if no 7547 /// promotion occurs. 7548 QualType ASTContext::isPromotableBitField(Expr *E) const { 7549 if (E->isTypeDependent() || E->isValueDependent()) 7550 return {}; 7551 7552 // C++ [conv.prom]p5: 7553 // If the bit-field has an enumerated type, it is treated as any other 7554 // value of that type for promotion purposes. 7555 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 7556 return {}; 7557 7558 // FIXME: We should not do this unless E->refersToBitField() is true. This 7559 // matters in C where getSourceBitField() will find bit-fields for various 7560 // cases where the source expression is not a bit-field designator. 7561 7562 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 7563 if (!Field) 7564 return {}; 7565 7566 QualType FT = Field->getType(); 7567 7568 uint64_t BitWidth = Field->getBitWidthValue(*this); 7569 uint64_t IntSize = getTypeSize(IntTy); 7570 // C++ [conv.prom]p5: 7571 // A prvalue for an integral bit-field can be converted to a prvalue of type 7572 // int if int can represent all the values of the bit-field; otherwise, it 7573 // can be converted to unsigned int if unsigned int can represent all the 7574 // values of the bit-field. If the bit-field is larger yet, no integral 7575 // promotion applies to it. 7576 // C11 6.3.1.1/2: 7577 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 7578 // If an int can represent all values of the original type (as restricted by 7579 // the width, for a bit-field), the value is converted to an int; otherwise, 7580 // it is converted to an unsigned int. 7581 // 7582 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 7583 // We perform that promotion here to match GCC and C++. 7584 // FIXME: C does not permit promotion of an enum bit-field whose rank is 7585 // greater than that of 'int'. We perform that promotion to match GCC. 7586 // 7587 // C23 6.3.1.1p2: 7588 // The value from a bit-field of a bit-precise integer type is converted to 7589 // the corresponding bit-precise integer type. (The rest is the same as in 7590 // C11.) 7591 if (QualType QT = Field->getType(); QT->isBitIntType()) 7592 return QT; 7593 7594 if (BitWidth < IntSize) 7595 return IntTy; 7596 7597 if (BitWidth == IntSize) 7598 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 7599 7600 // Bit-fields wider than int are not subject to promotions, and therefore act 7601 // like the base type. GCC has some weird bugs in this area that we 7602 // deliberately do not follow (GCC follows a pre-standard resolution to 7603 // C's DR315 which treats bit-width as being part of the type, and this leaks 7604 // into their semantics in some cases). 7605 return {}; 7606 } 7607 7608 /// getPromotedIntegerType - Returns the type that Promotable will 7609 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 7610 /// integer type. 7611 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 7612 assert(!Promotable.isNull()); 7613 assert(isPromotableIntegerType(Promotable)); 7614 if (const auto *ET = Promotable->getAs<EnumType>()) 7615 return ET->getDecl()->getPromotionType(); 7616 7617 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 7618 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 7619 // (3.9.1) can be converted to a prvalue of the first of the following 7620 // types that can represent all the values of its underlying type: 7621 // int, unsigned int, long int, unsigned long int, long long int, or 7622 // unsigned long long int [...] 7623 // FIXME: Is there some better way to compute this? 7624 if (BT->getKind() == BuiltinType::WChar_S || 7625 BT->getKind() == BuiltinType::WChar_U || 7626 BT->getKind() == BuiltinType::Char8 || 7627 BT->getKind() == BuiltinType::Char16 || 7628 BT->getKind() == BuiltinType::Char32) { 7629 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 7630 uint64_t FromSize = getTypeSize(BT); 7631 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 7632 LongLongTy, UnsignedLongLongTy }; 7633 for (const auto &PT : PromoteTypes) { 7634 uint64_t ToSize = getTypeSize(PT); 7635 if (FromSize < ToSize || 7636 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType())) 7637 return PT; 7638 } 7639 llvm_unreachable("char type should fit into long long"); 7640 } 7641 } 7642 7643 // At this point, we should have a signed or unsigned integer type. 7644 if (Promotable->isSignedIntegerType()) 7645 return IntTy; 7646 uint64_t PromotableSize = getIntWidth(Promotable); 7647 uint64_t IntSize = getIntWidth(IntTy); 7648 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 7649 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 7650 } 7651 7652 /// Recurses in pointer/array types until it finds an objc retainable 7653 /// type and returns its ownership. 7654 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 7655 while (!T.isNull()) { 7656 if (T.getObjCLifetime() != Qualifiers::OCL_None) 7657 return T.getObjCLifetime(); 7658 if (T->isArrayType()) 7659 T = getBaseElementType(T); 7660 else if (const auto *PT = T->getAs<PointerType>()) 7661 T = PT->getPointeeType(); 7662 else if (const auto *RT = T->getAs<ReferenceType>()) 7663 T = RT->getPointeeType(); 7664 else 7665 break; 7666 } 7667 7668 return Qualifiers::OCL_None; 7669 } 7670 7671 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 7672 // Incomplete enum types are not treated as integer types. 7673 // FIXME: In C++, enum types are never integer types. 7674 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 7675 return ET->getDecl()->getIntegerType().getTypePtr(); 7676 return nullptr; 7677 } 7678 7679 /// getIntegerTypeOrder - Returns the highest ranked integer type: 7680 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 7681 /// LHS < RHS, return -1. 7682 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 7683 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 7684 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 7685 7686 // Unwrap enums to their underlying type. 7687 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 7688 LHSC = getIntegerTypeForEnum(ET); 7689 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 7690 RHSC = getIntegerTypeForEnum(ET); 7691 7692 if (LHSC == RHSC) return 0; 7693 7694 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 7695 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 7696 7697 unsigned LHSRank = getIntegerRank(LHSC); 7698 unsigned RHSRank = getIntegerRank(RHSC); 7699 7700 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 7701 if (LHSRank == RHSRank) return 0; 7702 return LHSRank > RHSRank ? 1 : -1; 7703 } 7704 7705 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 7706 if (LHSUnsigned) { 7707 // If the unsigned [LHS] type is larger, return it. 7708 if (LHSRank >= RHSRank) 7709 return 1; 7710 7711 // If the signed type can represent all values of the unsigned type, it 7712 // wins. Because we are dealing with 2's complement and types that are 7713 // powers of two larger than each other, this is always safe. 7714 return -1; 7715 } 7716 7717 // If the unsigned [RHS] type is larger, return it. 7718 if (RHSRank >= LHSRank) 7719 return -1; 7720 7721 // If the signed type can represent all values of the unsigned type, it 7722 // wins. Because we are dealing with 2's complement and types that are 7723 // powers of two larger than each other, this is always safe. 7724 return 1; 7725 } 7726 7727 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 7728 if (CFConstantStringTypeDecl) 7729 return CFConstantStringTypeDecl; 7730 7731 assert(!CFConstantStringTagDecl && 7732 "tag and typedef should be initialized together"); 7733 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 7734 CFConstantStringTagDecl->startDefinition(); 7735 7736 struct { 7737 QualType Type; 7738 const char *Name; 7739 } Fields[5]; 7740 unsigned Count = 0; 7741 7742 /// Objective-C ABI 7743 /// 7744 /// typedef struct __NSConstantString_tag { 7745 /// const int *isa; 7746 /// int flags; 7747 /// const char *str; 7748 /// long length; 7749 /// } __NSConstantString; 7750 /// 7751 /// Swift ABI (4.1, 4.2) 7752 /// 7753 /// typedef struct __NSConstantString_tag { 7754 /// uintptr_t _cfisa; 7755 /// uintptr_t _swift_rc; 7756 /// _Atomic(uint64_t) _cfinfoa; 7757 /// const char *_ptr; 7758 /// uint32_t _length; 7759 /// } __NSConstantString; 7760 /// 7761 /// Swift ABI (5.0) 7762 /// 7763 /// typedef struct __NSConstantString_tag { 7764 /// uintptr_t _cfisa; 7765 /// uintptr_t _swift_rc; 7766 /// _Atomic(uint64_t) _cfinfoa; 7767 /// const char *_ptr; 7768 /// uintptr_t _length; 7769 /// } __NSConstantString; 7770 7771 const auto CFRuntime = getLangOpts().CFRuntime; 7772 if (static_cast<unsigned>(CFRuntime) < 7773 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 7774 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 7775 Fields[Count++] = { IntTy, "flags" }; 7776 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 7777 Fields[Count++] = { LongTy, "length" }; 7778 } else { 7779 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 7780 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 7781 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 7782 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 7783 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 7784 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 7785 Fields[Count++] = { IntTy, "_ptr" }; 7786 else 7787 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 7788 } 7789 7790 // Create fields 7791 for (unsigned i = 0; i < Count; ++i) { 7792 FieldDecl *Field = 7793 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 7794 SourceLocation(), &Idents.get(Fields[i].Name), 7795 Fields[i].Type, /*TInfo=*/nullptr, 7796 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7797 Field->setAccess(AS_public); 7798 CFConstantStringTagDecl->addDecl(Field); 7799 } 7800 7801 CFConstantStringTagDecl->completeDefinition(); 7802 // This type is designed to be compatible with NSConstantString, but cannot 7803 // use the same name, since NSConstantString is an interface. 7804 auto tagType = getTagDeclType(CFConstantStringTagDecl); 7805 CFConstantStringTypeDecl = 7806 buildImplicitTypedef(tagType, "__NSConstantString"); 7807 7808 return CFConstantStringTypeDecl; 7809 } 7810 7811 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 7812 if (!CFConstantStringTagDecl) 7813 getCFConstantStringDecl(); // Build the tag and the typedef. 7814 return CFConstantStringTagDecl; 7815 } 7816 7817 // getCFConstantStringType - Return the type used for constant CFStrings. 7818 QualType ASTContext::getCFConstantStringType() const { 7819 return getTypedefType(getCFConstantStringDecl()); 7820 } 7821 7822 QualType ASTContext::getObjCSuperType() const { 7823 if (ObjCSuperType.isNull()) { 7824 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 7825 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 7826 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 7827 } 7828 return ObjCSuperType; 7829 } 7830 7831 void ASTContext::setCFConstantStringType(QualType T) { 7832 const auto *TD = T->castAs<TypedefType>(); 7833 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 7834 const auto *TagType = 7835 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 7836 CFConstantStringTagDecl = TagType->getDecl(); 7837 } 7838 7839 QualType ASTContext::getBlockDescriptorType() const { 7840 if (BlockDescriptorType) 7841 return getTagDeclType(BlockDescriptorType); 7842 7843 RecordDecl *RD; 7844 // FIXME: Needs the FlagAppleBlock bit. 7845 RD = buildImplicitRecord("__block_descriptor"); 7846 RD->startDefinition(); 7847 7848 QualType FieldTypes[] = { 7849 UnsignedLongTy, 7850 UnsignedLongTy, 7851 }; 7852 7853 static const char *const FieldNames[] = { 7854 "reserved", 7855 "Size" 7856 }; 7857 7858 for (size_t i = 0; i < 2; ++i) { 7859 FieldDecl *Field = FieldDecl::Create( 7860 *this, RD, SourceLocation(), SourceLocation(), 7861 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7862 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7863 Field->setAccess(AS_public); 7864 RD->addDecl(Field); 7865 } 7866 7867 RD->completeDefinition(); 7868 7869 BlockDescriptorType = RD; 7870 7871 return getTagDeclType(BlockDescriptorType); 7872 } 7873 7874 QualType ASTContext::getBlockDescriptorExtendedType() const { 7875 if (BlockDescriptorExtendedType) 7876 return getTagDeclType(BlockDescriptorExtendedType); 7877 7878 RecordDecl *RD; 7879 // FIXME: Needs the FlagAppleBlock bit. 7880 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 7881 RD->startDefinition(); 7882 7883 QualType FieldTypes[] = { 7884 UnsignedLongTy, 7885 UnsignedLongTy, 7886 getPointerType(VoidPtrTy), 7887 getPointerType(VoidPtrTy) 7888 }; 7889 7890 static const char *const FieldNames[] = { 7891 "reserved", 7892 "Size", 7893 "CopyFuncPtr", 7894 "DestroyFuncPtr" 7895 }; 7896 7897 for (size_t i = 0; i < 4; ++i) { 7898 FieldDecl *Field = FieldDecl::Create( 7899 *this, RD, SourceLocation(), SourceLocation(), 7900 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7901 /*BitWidth=*/nullptr, 7902 /*Mutable=*/false, ICIS_NoInit); 7903 Field->setAccess(AS_public); 7904 RD->addDecl(Field); 7905 } 7906 7907 RD->completeDefinition(); 7908 7909 BlockDescriptorExtendedType = RD; 7910 return getTagDeclType(BlockDescriptorExtendedType); 7911 } 7912 7913 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 7914 const auto *BT = dyn_cast<BuiltinType>(T); 7915 7916 if (!BT) { 7917 if (isa<PipeType>(T)) 7918 return OCLTK_Pipe; 7919 7920 return OCLTK_Default; 7921 } 7922 7923 switch (BT->getKind()) { 7924 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7925 case BuiltinType::Id: \ 7926 return OCLTK_Image; 7927 #include "clang/Basic/OpenCLImageTypes.def" 7928 7929 case BuiltinType::OCLClkEvent: 7930 return OCLTK_ClkEvent; 7931 7932 case BuiltinType::OCLEvent: 7933 return OCLTK_Event; 7934 7935 case BuiltinType::OCLQueue: 7936 return OCLTK_Queue; 7937 7938 case BuiltinType::OCLReserveID: 7939 return OCLTK_ReserveID; 7940 7941 case BuiltinType::OCLSampler: 7942 return OCLTK_Sampler; 7943 7944 default: 7945 return OCLTK_Default; 7946 } 7947 } 7948 7949 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 7950 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 7951 } 7952 7953 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 7954 /// requires copy/dispose. Note that this must match the logic 7955 /// in buildByrefHelpers. 7956 bool ASTContext::BlockRequiresCopying(QualType Ty, 7957 const VarDecl *D) { 7958 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 7959 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 7960 if (!copyExpr && record->hasTrivialDestructor()) return false; 7961 7962 return true; 7963 } 7964 7965 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 7966 // move or destroy. 7967 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 7968 return true; 7969 7970 if (!Ty->isObjCRetainableType()) return false; 7971 7972 Qualifiers qs = Ty.getQualifiers(); 7973 7974 // If we have lifetime, that dominates. 7975 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 7976 switch (lifetime) { 7977 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 7978 7979 // These are just bits as far as the runtime is concerned. 7980 case Qualifiers::OCL_ExplicitNone: 7981 case Qualifiers::OCL_Autoreleasing: 7982 return false; 7983 7984 // These cases should have been taken care of when checking the type's 7985 // non-triviality. 7986 case Qualifiers::OCL_Weak: 7987 case Qualifiers::OCL_Strong: 7988 llvm_unreachable("impossible"); 7989 } 7990 llvm_unreachable("fell out of lifetime switch!"); 7991 } 7992 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 7993 Ty->isObjCObjectPointerType()); 7994 } 7995 7996 bool ASTContext::getByrefLifetime(QualType Ty, 7997 Qualifiers::ObjCLifetime &LifeTime, 7998 bool &HasByrefExtendedLayout) const { 7999 if (!getLangOpts().ObjC || 8000 getLangOpts().getGC() != LangOptions::NonGC) 8001 return false; 8002 8003 HasByrefExtendedLayout = false; 8004 if (Ty->isRecordType()) { 8005 HasByrefExtendedLayout = true; 8006 LifeTime = Qualifiers::OCL_None; 8007 } else if ((LifeTime = Ty.getObjCLifetime())) { 8008 // Honor the ARC qualifiers. 8009 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 8010 // The MRR rule. 8011 LifeTime = Qualifiers::OCL_ExplicitNone; 8012 } else { 8013 LifeTime = Qualifiers::OCL_None; 8014 } 8015 return true; 8016 } 8017 8018 CanQualType ASTContext::getNSUIntegerType() const { 8019 assert(Target && "Expected target to be initialized"); 8020 const llvm::Triple &T = Target->getTriple(); 8021 // Windows is LLP64 rather than LP64 8022 if (T.isOSWindows() && T.isArch64Bit()) 8023 return UnsignedLongLongTy; 8024 return UnsignedLongTy; 8025 } 8026 8027 CanQualType ASTContext::getNSIntegerType() const { 8028 assert(Target && "Expected target to be initialized"); 8029 const llvm::Triple &T = Target->getTriple(); 8030 // Windows is LLP64 rather than LP64 8031 if (T.isOSWindows() && T.isArch64Bit()) 8032 return LongLongTy; 8033 return LongTy; 8034 } 8035 8036 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 8037 if (!ObjCInstanceTypeDecl) 8038 ObjCInstanceTypeDecl = 8039 buildImplicitTypedef(getObjCIdType(), "instancetype"); 8040 return ObjCInstanceTypeDecl; 8041 } 8042 8043 // This returns true if a type has been typedefed to BOOL: 8044 // typedef <type> BOOL; 8045 static bool isTypeTypedefedAsBOOL(QualType T) { 8046 if (const auto *TT = dyn_cast<TypedefType>(T)) 8047 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 8048 return II->isStr("BOOL"); 8049 8050 return false; 8051 } 8052 8053 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 8054 /// purpose. 8055 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 8056 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 8057 return CharUnits::Zero(); 8058 8059 CharUnits sz = getTypeSizeInChars(type); 8060 8061 // Make all integer and enum types at least as large as an int 8062 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 8063 sz = std::max(sz, getTypeSizeInChars(IntTy)); 8064 // Treat arrays as pointers, since that's how they're passed in. 8065 else if (type->isArrayType()) 8066 sz = getTypeSizeInChars(VoidPtrTy); 8067 return sz; 8068 } 8069 8070 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 8071 return getTargetInfo().getCXXABI().isMicrosoft() && 8072 VD->isStaticDataMember() && 8073 VD->getType()->isIntegralOrEnumerationType() && 8074 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 8075 } 8076 8077 ASTContext::InlineVariableDefinitionKind 8078 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 8079 if (!VD->isInline()) 8080 return InlineVariableDefinitionKind::None; 8081 8082 // In almost all cases, it's a weak definition. 8083 auto *First = VD->getFirstDecl(); 8084 if (First->isInlineSpecified() || !First->isStaticDataMember()) 8085 return InlineVariableDefinitionKind::Weak; 8086 8087 // If there's a file-context declaration in this translation unit, it's a 8088 // non-discardable definition. 8089 for (auto *D : VD->redecls()) 8090 if (D->getLexicalDeclContext()->isFileContext() && 8091 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 8092 return InlineVariableDefinitionKind::Strong; 8093 8094 // If we've not seen one yet, we don't know. 8095 return InlineVariableDefinitionKind::WeakUnknown; 8096 } 8097 8098 static std::string charUnitsToString(const CharUnits &CU) { 8099 return llvm::itostr(CU.getQuantity()); 8100 } 8101 8102 /// getObjCEncodingForBlock - Return the encoded type for this block 8103 /// declaration. 8104 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 8105 std::string S; 8106 8107 const BlockDecl *Decl = Expr->getBlockDecl(); 8108 QualType BlockTy = 8109 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 8110 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 8111 // Encode result type. 8112 if (getLangOpts().EncodeExtendedBlockSig) 8113 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 8114 true /*Extended*/); 8115 else 8116 getObjCEncodingForType(BlockReturnTy, S); 8117 // Compute size of all parameters. 8118 // Start with computing size of a pointer in number of bytes. 8119 // FIXME: There might(should) be a better way of doing this computation! 8120 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 8121 CharUnits ParmOffset = PtrSize; 8122 for (auto *PI : Decl->parameters()) { 8123 QualType PType = PI->getType(); 8124 CharUnits sz = getObjCEncodingTypeSize(PType); 8125 if (sz.isZero()) 8126 continue; 8127 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 8128 ParmOffset += sz; 8129 } 8130 // Size of the argument frame 8131 S += charUnitsToString(ParmOffset); 8132 // Block pointer and offset. 8133 S += "@?0"; 8134 8135 // Argument types. 8136 ParmOffset = PtrSize; 8137 for (auto *PVDecl : Decl->parameters()) { 8138 QualType PType = PVDecl->getOriginalType(); 8139 if (const auto *AT = 8140 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 8141 // Use array's original type only if it has known number of 8142 // elements. 8143 if (!isa<ConstantArrayType>(AT)) 8144 PType = PVDecl->getType(); 8145 } else if (PType->isFunctionType()) 8146 PType = PVDecl->getType(); 8147 if (getLangOpts().EncodeExtendedBlockSig) 8148 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 8149 S, true /*Extended*/); 8150 else 8151 getObjCEncodingForType(PType, S); 8152 S += charUnitsToString(ParmOffset); 8153 ParmOffset += getObjCEncodingTypeSize(PType); 8154 } 8155 8156 return S; 8157 } 8158 8159 std::string 8160 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 8161 std::string S; 8162 // Encode result type. 8163 getObjCEncodingForType(Decl->getReturnType(), S); 8164 CharUnits ParmOffset; 8165 // Compute size of all parameters. 8166 for (auto *PI : Decl->parameters()) { 8167 QualType PType = PI->getType(); 8168 CharUnits sz = getObjCEncodingTypeSize(PType); 8169 if (sz.isZero()) 8170 continue; 8171 8172 assert(sz.isPositive() && 8173 "getObjCEncodingForFunctionDecl - Incomplete param type"); 8174 ParmOffset += sz; 8175 } 8176 S += charUnitsToString(ParmOffset); 8177 ParmOffset = CharUnits::Zero(); 8178 8179 // Argument types. 8180 for (auto *PVDecl : Decl->parameters()) { 8181 QualType PType = PVDecl->getOriginalType(); 8182 if (const auto *AT = 8183 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 8184 // Use array's original type only if it has known number of 8185 // elements. 8186 if (!isa<ConstantArrayType>(AT)) 8187 PType = PVDecl->getType(); 8188 } else if (PType->isFunctionType()) 8189 PType = PVDecl->getType(); 8190 getObjCEncodingForType(PType, S); 8191 S += charUnitsToString(ParmOffset); 8192 ParmOffset += getObjCEncodingTypeSize(PType); 8193 } 8194 8195 return S; 8196 } 8197 8198 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 8199 /// method parameter or return type. If Extended, include class names and 8200 /// block object types. 8201 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 8202 QualType T, std::string& S, 8203 bool Extended) const { 8204 // Encode type qualifier, 'in', 'inout', etc. for the parameter. 8205 getObjCEncodingForTypeQualifier(QT, S); 8206 // Encode parameter type. 8207 ObjCEncOptions Options = ObjCEncOptions() 8208 .setExpandPointedToStructures() 8209 .setExpandStructures() 8210 .setIsOutermostType(); 8211 if (Extended) 8212 Options.setEncodeBlockParameters().setEncodeClassNames(); 8213 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 8214 } 8215 8216 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 8217 /// declaration. 8218 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 8219 bool Extended) const { 8220 // FIXME: This is not very efficient. 8221 // Encode return type. 8222 std::string S; 8223 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 8224 Decl->getReturnType(), S, Extended); 8225 // Compute size of all parameters. 8226 // Start with computing size of a pointer in number of bytes. 8227 // FIXME: There might(should) be a better way of doing this computation! 8228 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 8229 // The first two arguments (self and _cmd) are pointers; account for 8230 // their size. 8231 CharUnits ParmOffset = 2 * PtrSize; 8232 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 8233 E = Decl->sel_param_end(); PI != E; ++PI) { 8234 QualType PType = (*PI)->getType(); 8235 CharUnits sz = getObjCEncodingTypeSize(PType); 8236 if (sz.isZero()) 8237 continue; 8238 8239 assert(sz.isPositive() && 8240 "getObjCEncodingForMethodDecl - Incomplete param type"); 8241 ParmOffset += sz; 8242 } 8243 S += charUnitsToString(ParmOffset); 8244 S += "@0:"; 8245 S += charUnitsToString(PtrSize); 8246 8247 // Argument types. 8248 ParmOffset = 2 * PtrSize; 8249 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 8250 E = Decl->sel_param_end(); PI != E; ++PI) { 8251 const ParmVarDecl *PVDecl = *PI; 8252 QualType PType = PVDecl->getOriginalType(); 8253 if (const auto *AT = 8254 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 8255 // Use array's original type only if it has known number of 8256 // elements. 8257 if (!isa<ConstantArrayType>(AT)) 8258 PType = PVDecl->getType(); 8259 } else if (PType->isFunctionType()) 8260 PType = PVDecl->getType(); 8261 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 8262 PType, S, Extended); 8263 S += charUnitsToString(ParmOffset); 8264 ParmOffset += getObjCEncodingTypeSize(PType); 8265 } 8266 8267 return S; 8268 } 8269 8270 ObjCPropertyImplDecl * 8271 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 8272 const ObjCPropertyDecl *PD, 8273 const Decl *Container) const { 8274 if (!Container) 8275 return nullptr; 8276 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 8277 for (auto *PID : CID->property_impls()) 8278 if (PID->getPropertyDecl() == PD) 8279 return PID; 8280 } else { 8281 const auto *OID = cast<ObjCImplementationDecl>(Container); 8282 for (auto *PID : OID->property_impls()) 8283 if (PID->getPropertyDecl() == PD) 8284 return PID; 8285 } 8286 return nullptr; 8287 } 8288 8289 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 8290 /// property declaration. If non-NULL, Container must be either an 8291 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 8292 /// NULL when getting encodings for protocol properties. 8293 /// Property attributes are stored as a comma-delimited C string. The simple 8294 /// attributes readonly and bycopy are encoded as single characters. The 8295 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 8296 /// encoded as single characters, followed by an identifier. Property types 8297 /// are also encoded as a parametrized attribute. The characters used to encode 8298 /// these attributes are defined by the following enumeration: 8299 /// @code 8300 /// enum PropertyAttributes { 8301 /// kPropertyReadOnly = 'R', // property is read-only. 8302 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 8303 /// kPropertyByref = '&', // property is a reference to the value last assigned 8304 /// kPropertyDynamic = 'D', // property is dynamic 8305 /// kPropertyGetter = 'G', // followed by getter selector name 8306 /// kPropertySetter = 'S', // followed by setter selector name 8307 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 8308 /// kPropertyType = 'T' // followed by old-style type encoding. 8309 /// kPropertyWeak = 'W' // 'weak' property 8310 /// kPropertyStrong = 'P' // property GC'able 8311 /// kPropertyNonAtomic = 'N' // property non-atomic 8312 /// kPropertyOptional = '?' // property optional 8313 /// }; 8314 /// @endcode 8315 std::string 8316 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 8317 const Decl *Container) const { 8318 // Collect information from the property implementation decl(s). 8319 bool Dynamic = false; 8320 ObjCPropertyImplDecl *SynthesizePID = nullptr; 8321 8322 if (ObjCPropertyImplDecl *PropertyImpDecl = 8323 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 8324 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 8325 Dynamic = true; 8326 else 8327 SynthesizePID = PropertyImpDecl; 8328 } 8329 8330 // FIXME: This is not very efficient. 8331 std::string S = "T"; 8332 8333 // Encode result type. 8334 // GCC has some special rules regarding encoding of properties which 8335 // closely resembles encoding of ivars. 8336 getObjCEncodingForPropertyType(PD->getType(), S); 8337 8338 if (PD->isOptional()) 8339 S += ",?"; 8340 8341 if (PD->isReadOnly()) { 8342 S += ",R"; 8343 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 8344 S += ",C"; 8345 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 8346 S += ",&"; 8347 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 8348 S += ",W"; 8349 } else { 8350 switch (PD->getSetterKind()) { 8351 case ObjCPropertyDecl::Assign: break; 8352 case ObjCPropertyDecl::Copy: S += ",C"; break; 8353 case ObjCPropertyDecl::Retain: S += ",&"; break; 8354 case ObjCPropertyDecl::Weak: S += ",W"; break; 8355 } 8356 } 8357 8358 // It really isn't clear at all what this means, since properties 8359 // are "dynamic by default". 8360 if (Dynamic) 8361 S += ",D"; 8362 8363 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 8364 S += ",N"; 8365 8366 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 8367 S += ",G"; 8368 S += PD->getGetterName().getAsString(); 8369 } 8370 8371 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 8372 S += ",S"; 8373 S += PD->getSetterName().getAsString(); 8374 } 8375 8376 if (SynthesizePID) { 8377 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 8378 S += ",V"; 8379 S += OID->getNameAsString(); 8380 } 8381 8382 // FIXME: OBJCGC: weak & strong 8383 return S; 8384 } 8385 8386 /// getLegacyIntegralTypeEncoding - 8387 /// Another legacy compatibility encoding: 32-bit longs are encoded as 8388 /// 'l' or 'L' , but not always. For typedefs, we need to use 8389 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 8390 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 8391 if (PointeeTy->getAs<TypedefType>()) { 8392 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 8393 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 8394 PointeeTy = UnsignedIntTy; 8395 else 8396 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 8397 PointeeTy = IntTy; 8398 } 8399 } 8400 } 8401 8402 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 8403 const FieldDecl *Field, 8404 QualType *NotEncodedT) const { 8405 // We follow the behavior of gcc, expanding structures which are 8406 // directly pointed to, and expanding embedded structures. Note that 8407 // these rules are sufficient to prevent recursive encoding of the 8408 // same type. 8409 getObjCEncodingForTypeImpl(T, S, 8410 ObjCEncOptions() 8411 .setExpandPointedToStructures() 8412 .setExpandStructures() 8413 .setIsOutermostType(), 8414 Field, NotEncodedT); 8415 } 8416 8417 void ASTContext::getObjCEncodingForPropertyType(QualType T, 8418 std::string& S) const { 8419 // Encode result type. 8420 // GCC has some special rules regarding encoding of properties which 8421 // closely resembles encoding of ivars. 8422 getObjCEncodingForTypeImpl(T, S, 8423 ObjCEncOptions() 8424 .setExpandPointedToStructures() 8425 .setExpandStructures() 8426 .setIsOutermostType() 8427 .setEncodingProperty(), 8428 /*Field=*/nullptr); 8429 } 8430 8431 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 8432 const BuiltinType *BT) { 8433 BuiltinType::Kind kind = BT->getKind(); 8434 switch (kind) { 8435 case BuiltinType::Void: return 'v'; 8436 case BuiltinType::Bool: return 'B'; 8437 case BuiltinType::Char8: 8438 case BuiltinType::Char_U: 8439 case BuiltinType::UChar: return 'C'; 8440 case BuiltinType::Char16: 8441 case BuiltinType::UShort: return 'S'; 8442 case BuiltinType::Char32: 8443 case BuiltinType::UInt: return 'I'; 8444 case BuiltinType::ULong: 8445 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 8446 case BuiltinType::UInt128: return 'T'; 8447 case BuiltinType::ULongLong: return 'Q'; 8448 case BuiltinType::Char_S: 8449 case BuiltinType::SChar: return 'c'; 8450 case BuiltinType::Short: return 's'; 8451 case BuiltinType::WChar_S: 8452 case BuiltinType::WChar_U: 8453 case BuiltinType::Int: return 'i'; 8454 case BuiltinType::Long: 8455 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 8456 case BuiltinType::LongLong: return 'q'; 8457 case BuiltinType::Int128: return 't'; 8458 case BuiltinType::Float: return 'f'; 8459 case BuiltinType::Double: return 'd'; 8460 case BuiltinType::LongDouble: return 'D'; 8461 case BuiltinType::NullPtr: return '*'; // like char* 8462 8463 case BuiltinType::BFloat16: 8464 case BuiltinType::Float16: 8465 case BuiltinType::Float128: 8466 case BuiltinType::Ibm128: 8467 case BuiltinType::Half: 8468 case BuiltinType::ShortAccum: 8469 case BuiltinType::Accum: 8470 case BuiltinType::LongAccum: 8471 case BuiltinType::UShortAccum: 8472 case BuiltinType::UAccum: 8473 case BuiltinType::ULongAccum: 8474 case BuiltinType::ShortFract: 8475 case BuiltinType::Fract: 8476 case BuiltinType::LongFract: 8477 case BuiltinType::UShortFract: 8478 case BuiltinType::UFract: 8479 case BuiltinType::ULongFract: 8480 case BuiltinType::SatShortAccum: 8481 case BuiltinType::SatAccum: 8482 case BuiltinType::SatLongAccum: 8483 case BuiltinType::SatUShortAccum: 8484 case BuiltinType::SatUAccum: 8485 case BuiltinType::SatULongAccum: 8486 case BuiltinType::SatShortFract: 8487 case BuiltinType::SatFract: 8488 case BuiltinType::SatLongFract: 8489 case BuiltinType::SatUShortFract: 8490 case BuiltinType::SatUFract: 8491 case BuiltinType::SatULongFract: 8492 // FIXME: potentially need @encodes for these! 8493 return ' '; 8494 8495 #define SVE_TYPE(Name, Id, SingletonId) \ 8496 case BuiltinType::Id: 8497 #include "clang/Basic/AArch64SVEACLETypes.def" 8498 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8499 #include "clang/Basic/RISCVVTypes.def" 8500 #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8501 #include "clang/Basic/WebAssemblyReferenceTypes.def" 8502 #define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8503 #include "clang/Basic/AMDGPUTypes.def" 8504 { 8505 DiagnosticsEngine &Diags = C->getDiagnostics(); 8506 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 8507 "cannot yet @encode type %0"); 8508 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 8509 return ' '; 8510 } 8511 8512 case BuiltinType::ObjCId: 8513 case BuiltinType::ObjCClass: 8514 case BuiltinType::ObjCSel: 8515 llvm_unreachable("@encoding ObjC primitive type"); 8516 8517 // OpenCL and placeholder types don't need @encodings. 8518 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 8519 case BuiltinType::Id: 8520 #include "clang/Basic/OpenCLImageTypes.def" 8521 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 8522 case BuiltinType::Id: 8523 #include "clang/Basic/OpenCLExtensionTypes.def" 8524 case BuiltinType::OCLEvent: 8525 case BuiltinType::OCLClkEvent: 8526 case BuiltinType::OCLQueue: 8527 case BuiltinType::OCLReserveID: 8528 case BuiltinType::OCLSampler: 8529 case BuiltinType::Dependent: 8530 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 8531 case BuiltinType::Id: 8532 #include "clang/Basic/PPCTypes.def" 8533 #define BUILTIN_TYPE(KIND, ID) 8534 #define PLACEHOLDER_TYPE(KIND, ID) \ 8535 case BuiltinType::KIND: 8536 #include "clang/AST/BuiltinTypes.def" 8537 llvm_unreachable("invalid builtin type for @encode"); 8538 } 8539 llvm_unreachable("invalid BuiltinType::Kind value"); 8540 } 8541 8542 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 8543 EnumDecl *Enum = ET->getDecl(); 8544 8545 // The encoding of an non-fixed enum type is always 'i', regardless of size. 8546 if (!Enum->isFixed()) 8547 return 'i'; 8548 8549 // The encoding of a fixed enum type matches its fixed underlying type. 8550 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 8551 return getObjCEncodingForPrimitiveType(C, BT); 8552 } 8553 8554 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 8555 QualType T, const FieldDecl *FD) { 8556 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 8557 S += 'b'; 8558 // The NeXT runtime encodes bit fields as b followed by the number of bits. 8559 // The GNU runtime requires more information; bitfields are encoded as b, 8560 // then the offset (in bits) of the first element, then the type of the 8561 // bitfield, then the size in bits. For example, in this structure: 8562 // 8563 // struct 8564 // { 8565 // int integer; 8566 // int flags:2; 8567 // }; 8568 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 8569 // runtime, but b32i2 for the GNU runtime. The reason for this extra 8570 // information is not especially sensible, but we're stuck with it for 8571 // compatibility with GCC, although providing it breaks anything that 8572 // actually uses runtime introspection and wants to work on both runtimes... 8573 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 8574 uint64_t Offset; 8575 8576 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 8577 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 8578 IVD); 8579 } else { 8580 const RecordDecl *RD = FD->getParent(); 8581 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 8582 Offset = RL.getFieldOffset(FD->getFieldIndex()); 8583 } 8584 8585 S += llvm::utostr(Offset); 8586 8587 if (const auto *ET = T->getAs<EnumType>()) 8588 S += ObjCEncodingForEnumType(Ctx, ET); 8589 else { 8590 const auto *BT = T->castAs<BuiltinType>(); 8591 S += getObjCEncodingForPrimitiveType(Ctx, BT); 8592 } 8593 } 8594 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 8595 } 8596 8597 // Helper function for determining whether the encoded type string would include 8598 // a template specialization type. 8599 static bool hasTemplateSpecializationInEncodedString(const Type *T, 8600 bool VisitBasesAndFields) { 8601 T = T->getBaseElementTypeUnsafe(); 8602 8603 if (auto *PT = T->getAs<PointerType>()) 8604 return hasTemplateSpecializationInEncodedString( 8605 PT->getPointeeType().getTypePtr(), false); 8606 8607 auto *CXXRD = T->getAsCXXRecordDecl(); 8608 8609 if (!CXXRD) 8610 return false; 8611 8612 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 8613 return true; 8614 8615 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 8616 return false; 8617 8618 for (const auto &B : CXXRD->bases()) 8619 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 8620 true)) 8621 return true; 8622 8623 for (auto *FD : CXXRD->fields()) 8624 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 8625 true)) 8626 return true; 8627 8628 return false; 8629 } 8630 8631 // FIXME: Use SmallString for accumulating string. 8632 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 8633 const ObjCEncOptions Options, 8634 const FieldDecl *FD, 8635 QualType *NotEncodedT) const { 8636 CanQualType CT = getCanonicalType(T); 8637 switch (CT->getTypeClass()) { 8638 case Type::Builtin: 8639 case Type::Enum: 8640 if (FD && FD->isBitField()) 8641 return EncodeBitField(this, S, T, FD); 8642 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 8643 S += getObjCEncodingForPrimitiveType(this, BT); 8644 else 8645 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 8646 return; 8647 8648 case Type::Complex: 8649 S += 'j'; 8650 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 8651 ObjCEncOptions(), 8652 /*Field=*/nullptr); 8653 return; 8654 8655 case Type::Atomic: 8656 S += 'A'; 8657 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 8658 ObjCEncOptions(), 8659 /*Field=*/nullptr); 8660 return; 8661 8662 // encoding for pointer or reference types. 8663 case Type::Pointer: 8664 case Type::LValueReference: 8665 case Type::RValueReference: { 8666 QualType PointeeTy; 8667 if (isa<PointerType>(CT)) { 8668 const auto *PT = T->castAs<PointerType>(); 8669 if (PT->isObjCSelType()) { 8670 S += ':'; 8671 return; 8672 } 8673 PointeeTy = PT->getPointeeType(); 8674 } else { 8675 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 8676 } 8677 8678 bool isReadOnly = false; 8679 // For historical/compatibility reasons, the read-only qualifier of the 8680 // pointee gets emitted _before_ the '^'. The read-only qualifier of 8681 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 8682 // Also, do not emit the 'r' for anything but the outermost type! 8683 if (T->getAs<TypedefType>()) { 8684 if (Options.IsOutermostType() && T.isConstQualified()) { 8685 isReadOnly = true; 8686 S += 'r'; 8687 } 8688 } else if (Options.IsOutermostType()) { 8689 QualType P = PointeeTy; 8690 while (auto PT = P->getAs<PointerType>()) 8691 P = PT->getPointeeType(); 8692 if (P.isConstQualified()) { 8693 isReadOnly = true; 8694 S += 'r'; 8695 } 8696 } 8697 if (isReadOnly) { 8698 // Another legacy compatibility encoding. Some ObjC qualifier and type 8699 // combinations need to be rearranged. 8700 // Rewrite "in const" from "nr" to "rn" 8701 if (StringRef(S).ends_with("nr")) 8702 S.replace(S.end()-2, S.end(), "rn"); 8703 } 8704 8705 if (PointeeTy->isCharType()) { 8706 // char pointer types should be encoded as '*' unless it is a 8707 // type that has been typedef'd to 'BOOL'. 8708 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 8709 S += '*'; 8710 return; 8711 } 8712 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 8713 // GCC binary compat: Need to convert "struct objc_class *" to "#". 8714 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 8715 S += '#'; 8716 return; 8717 } 8718 // GCC binary compat: Need to convert "struct objc_object *" to "@". 8719 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 8720 S += '@'; 8721 return; 8722 } 8723 // If the encoded string for the class includes template names, just emit 8724 // "^v" for pointers to the class. 8725 if (getLangOpts().CPlusPlus && 8726 (!getLangOpts().EncodeCXXClassTemplateSpec && 8727 hasTemplateSpecializationInEncodedString( 8728 RTy, Options.ExpandPointedToStructures()))) { 8729 S += "^v"; 8730 return; 8731 } 8732 // fall through... 8733 } 8734 S += '^'; 8735 getLegacyIntegralTypeEncoding(PointeeTy); 8736 8737 ObjCEncOptions NewOptions; 8738 if (Options.ExpandPointedToStructures()) 8739 NewOptions.setExpandStructures(); 8740 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 8741 /*Field=*/nullptr, NotEncodedT); 8742 return; 8743 } 8744 8745 case Type::ConstantArray: 8746 case Type::IncompleteArray: 8747 case Type::VariableArray: { 8748 const auto *AT = cast<ArrayType>(CT); 8749 8750 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 8751 // Incomplete arrays are encoded as a pointer to the array element. 8752 S += '^'; 8753 8754 getObjCEncodingForTypeImpl( 8755 AT->getElementType(), S, 8756 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 8757 } else { 8758 S += '['; 8759 8760 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 8761 S += llvm::utostr(CAT->getZExtSize()); 8762 else { 8763 //Variable length arrays are encoded as a regular array with 0 elements. 8764 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 8765 "Unknown array type!"); 8766 S += '0'; 8767 } 8768 8769 getObjCEncodingForTypeImpl( 8770 AT->getElementType(), S, 8771 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 8772 NotEncodedT); 8773 S += ']'; 8774 } 8775 return; 8776 } 8777 8778 case Type::FunctionNoProto: 8779 case Type::FunctionProto: 8780 S += '?'; 8781 return; 8782 8783 case Type::Record: { 8784 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 8785 S += RDecl->isUnion() ? '(' : '{'; 8786 // Anonymous structures print as '?' 8787 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 8788 S += II->getName(); 8789 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 8790 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 8791 llvm::raw_string_ostream OS(S); 8792 printTemplateArgumentList(OS, TemplateArgs.asArray(), 8793 getPrintingPolicy()); 8794 } 8795 } else { 8796 S += '?'; 8797 } 8798 if (Options.ExpandStructures()) { 8799 S += '='; 8800 if (!RDecl->isUnion()) { 8801 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 8802 } else { 8803 for (const auto *Field : RDecl->fields()) { 8804 if (FD) { 8805 S += '"'; 8806 S += Field->getNameAsString(); 8807 S += '"'; 8808 } 8809 8810 // Special case bit-fields. 8811 if (Field->isBitField()) { 8812 getObjCEncodingForTypeImpl(Field->getType(), S, 8813 ObjCEncOptions().setExpandStructures(), 8814 Field); 8815 } else { 8816 QualType qt = Field->getType(); 8817 getLegacyIntegralTypeEncoding(qt); 8818 getObjCEncodingForTypeImpl( 8819 qt, S, 8820 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 8821 NotEncodedT); 8822 } 8823 } 8824 } 8825 } 8826 S += RDecl->isUnion() ? ')' : '}'; 8827 return; 8828 } 8829 8830 case Type::BlockPointer: { 8831 const auto *BT = T->castAs<BlockPointerType>(); 8832 S += "@?"; // Unlike a pointer-to-function, which is "^?". 8833 if (Options.EncodeBlockParameters()) { 8834 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 8835 8836 S += '<'; 8837 // Block return type 8838 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 8839 Options.forComponentType(), FD, NotEncodedT); 8840 // Block self 8841 S += "@?"; 8842 // Block parameters 8843 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 8844 for (const auto &I : FPT->param_types()) 8845 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 8846 NotEncodedT); 8847 } 8848 S += '>'; 8849 } 8850 return; 8851 } 8852 8853 case Type::ObjCObject: { 8854 // hack to match legacy encoding of *id and *Class 8855 QualType Ty = getObjCObjectPointerType(CT); 8856 if (Ty->isObjCIdType()) { 8857 S += "{objc_object=}"; 8858 return; 8859 } 8860 else if (Ty->isObjCClassType()) { 8861 S += "{objc_class=}"; 8862 return; 8863 } 8864 // TODO: Double check to make sure this intentionally falls through. 8865 [[fallthrough]]; 8866 } 8867 8868 case Type::ObjCInterface: { 8869 // Ignore protocol qualifiers when mangling at this level. 8870 // @encode(class_name) 8871 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 8872 S += '{'; 8873 S += OI->getObjCRuntimeNameAsString(); 8874 if (Options.ExpandStructures()) { 8875 S += '='; 8876 SmallVector<const ObjCIvarDecl*, 32> Ivars; 8877 DeepCollectObjCIvars(OI, true, Ivars); 8878 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 8879 const FieldDecl *Field = Ivars[i]; 8880 if (Field->isBitField()) 8881 getObjCEncodingForTypeImpl(Field->getType(), S, 8882 ObjCEncOptions().setExpandStructures(), 8883 Field); 8884 else 8885 getObjCEncodingForTypeImpl(Field->getType(), S, 8886 ObjCEncOptions().setExpandStructures(), FD, 8887 NotEncodedT); 8888 } 8889 } 8890 S += '}'; 8891 return; 8892 } 8893 8894 case Type::ObjCObjectPointer: { 8895 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 8896 if (OPT->isObjCIdType()) { 8897 S += '@'; 8898 return; 8899 } 8900 8901 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 8902 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 8903 // Since this is a binary compatibility issue, need to consult with 8904 // runtime folks. Fortunately, this is a *very* obscure construct. 8905 S += '#'; 8906 return; 8907 } 8908 8909 if (OPT->isObjCQualifiedIdType()) { 8910 getObjCEncodingForTypeImpl( 8911 getObjCIdType(), S, 8912 Options.keepingOnly(ObjCEncOptions() 8913 .setExpandPointedToStructures() 8914 .setExpandStructures()), 8915 FD); 8916 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 8917 // Note that we do extended encoding of protocol qualifier list 8918 // Only when doing ivar or property encoding. 8919 S += '"'; 8920 for (const auto *I : OPT->quals()) { 8921 S += '<'; 8922 S += I->getObjCRuntimeNameAsString(); 8923 S += '>'; 8924 } 8925 S += '"'; 8926 } 8927 return; 8928 } 8929 8930 S += '@'; 8931 if (OPT->getInterfaceDecl() && 8932 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 8933 S += '"'; 8934 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 8935 for (const auto *I : OPT->quals()) { 8936 S += '<'; 8937 S += I->getObjCRuntimeNameAsString(); 8938 S += '>'; 8939 } 8940 S += '"'; 8941 } 8942 return; 8943 } 8944 8945 // gcc just blithely ignores member pointers. 8946 // FIXME: we should do better than that. 'M' is available. 8947 case Type::MemberPointer: 8948 // This matches gcc's encoding, even though technically it is insufficient. 8949 //FIXME. We should do a better job than gcc. 8950 case Type::Vector: 8951 case Type::ExtVector: 8952 // Until we have a coherent encoding of these three types, issue warning. 8953 if (NotEncodedT) 8954 *NotEncodedT = T; 8955 return; 8956 8957 case Type::ConstantMatrix: 8958 if (NotEncodedT) 8959 *NotEncodedT = T; 8960 return; 8961 8962 case Type::BitInt: 8963 if (NotEncodedT) 8964 *NotEncodedT = T; 8965 return; 8966 8967 // We could see an undeduced auto type here during error recovery. 8968 // Just ignore it. 8969 case Type::Auto: 8970 case Type::DeducedTemplateSpecialization: 8971 return; 8972 8973 case Type::ArrayParameter: 8974 case Type::Pipe: 8975 #define ABSTRACT_TYPE(KIND, BASE) 8976 #define TYPE(KIND, BASE) 8977 #define DEPENDENT_TYPE(KIND, BASE) \ 8978 case Type::KIND: 8979 #define NON_CANONICAL_TYPE(KIND, BASE) \ 8980 case Type::KIND: 8981 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 8982 case Type::KIND: 8983 #include "clang/AST/TypeNodes.inc" 8984 llvm_unreachable("@encode for dependent type!"); 8985 } 8986 llvm_unreachable("bad type kind!"); 8987 } 8988 8989 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 8990 std::string &S, 8991 const FieldDecl *FD, 8992 bool includeVBases, 8993 QualType *NotEncodedT) const { 8994 assert(RDecl && "Expected non-null RecordDecl"); 8995 assert(!RDecl->isUnion() && "Should not be called for unions"); 8996 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 8997 return; 8998 8999 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 9000 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 9001 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 9002 9003 if (CXXRec) { 9004 for (const auto &BI : CXXRec->bases()) { 9005 if (!BI.isVirtual()) { 9006 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 9007 if (base->isEmpty()) 9008 continue; 9009 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 9010 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 9011 std::make_pair(offs, base)); 9012 } 9013 } 9014 } 9015 9016 for (FieldDecl *Field : RDecl->fields()) { 9017 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 9018 continue; 9019 uint64_t offs = layout.getFieldOffset(Field->getFieldIndex()); 9020 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 9021 std::make_pair(offs, Field)); 9022 } 9023 9024 if (CXXRec && includeVBases) { 9025 for (const auto &BI : CXXRec->vbases()) { 9026 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 9027 if (base->isEmpty()) 9028 continue; 9029 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 9030 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 9031 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 9032 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 9033 std::make_pair(offs, base)); 9034 } 9035 } 9036 9037 CharUnits size; 9038 if (CXXRec) { 9039 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 9040 } else { 9041 size = layout.getSize(); 9042 } 9043 9044 #ifndef NDEBUG 9045 uint64_t CurOffs = 0; 9046 #endif 9047 std::multimap<uint64_t, NamedDecl *>::iterator 9048 CurLayObj = FieldOrBaseOffsets.begin(); 9049 9050 if (CXXRec && CXXRec->isDynamicClass() && 9051 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 9052 if (FD) { 9053 S += "\"_vptr$"; 9054 std::string recname = CXXRec->getNameAsString(); 9055 if (recname.empty()) recname = "?"; 9056 S += recname; 9057 S += '"'; 9058 } 9059 S += "^^?"; 9060 #ifndef NDEBUG 9061 CurOffs += getTypeSize(VoidPtrTy); 9062 #endif 9063 } 9064 9065 if (!RDecl->hasFlexibleArrayMember()) { 9066 // Mark the end of the structure. 9067 uint64_t offs = toBits(size); 9068 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 9069 std::make_pair(offs, nullptr)); 9070 } 9071 9072 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 9073 #ifndef NDEBUG 9074 assert(CurOffs <= CurLayObj->first); 9075 if (CurOffs < CurLayObj->first) { 9076 uint64_t padding = CurLayObj->first - CurOffs; 9077 // FIXME: There doesn't seem to be a way to indicate in the encoding that 9078 // packing/alignment of members is different that normal, in which case 9079 // the encoding will be out-of-sync with the real layout. 9080 // If the runtime switches to just consider the size of types without 9081 // taking into account alignment, we could make padding explicit in the 9082 // encoding (e.g. using arrays of chars). The encoding strings would be 9083 // longer then though. 9084 CurOffs += padding; 9085 } 9086 #endif 9087 9088 NamedDecl *dcl = CurLayObj->second; 9089 if (!dcl) 9090 break; // reached end of structure. 9091 9092 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 9093 // We expand the bases without their virtual bases since those are going 9094 // in the initial structure. Note that this differs from gcc which 9095 // expands virtual bases each time one is encountered in the hierarchy, 9096 // making the encoding type bigger than it really is. 9097 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 9098 NotEncodedT); 9099 assert(!base->isEmpty()); 9100 #ifndef NDEBUG 9101 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 9102 #endif 9103 } else { 9104 const auto *field = cast<FieldDecl>(dcl); 9105 if (FD) { 9106 S += '"'; 9107 S += field->getNameAsString(); 9108 S += '"'; 9109 } 9110 9111 if (field->isBitField()) { 9112 EncodeBitField(this, S, field->getType(), field); 9113 #ifndef NDEBUG 9114 CurOffs += field->getBitWidthValue(*this); 9115 #endif 9116 } else { 9117 QualType qt = field->getType(); 9118 getLegacyIntegralTypeEncoding(qt); 9119 getObjCEncodingForTypeImpl( 9120 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 9121 FD, NotEncodedT); 9122 #ifndef NDEBUG 9123 CurOffs += getTypeSize(field->getType()); 9124 #endif 9125 } 9126 } 9127 } 9128 } 9129 9130 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 9131 std::string& S) const { 9132 if (QT & Decl::OBJC_TQ_In) 9133 S += 'n'; 9134 if (QT & Decl::OBJC_TQ_Inout) 9135 S += 'N'; 9136 if (QT & Decl::OBJC_TQ_Out) 9137 S += 'o'; 9138 if (QT & Decl::OBJC_TQ_Bycopy) 9139 S += 'O'; 9140 if (QT & Decl::OBJC_TQ_Byref) 9141 S += 'R'; 9142 if (QT & Decl::OBJC_TQ_Oneway) 9143 S += 'V'; 9144 } 9145 9146 TypedefDecl *ASTContext::getObjCIdDecl() const { 9147 if (!ObjCIdDecl) { 9148 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 9149 T = getObjCObjectPointerType(T); 9150 ObjCIdDecl = buildImplicitTypedef(T, "id"); 9151 } 9152 return ObjCIdDecl; 9153 } 9154 9155 TypedefDecl *ASTContext::getObjCSelDecl() const { 9156 if (!ObjCSelDecl) { 9157 QualType T = getPointerType(ObjCBuiltinSelTy); 9158 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 9159 } 9160 return ObjCSelDecl; 9161 } 9162 9163 TypedefDecl *ASTContext::getObjCClassDecl() const { 9164 if (!ObjCClassDecl) { 9165 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 9166 T = getObjCObjectPointerType(T); 9167 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 9168 } 9169 return ObjCClassDecl; 9170 } 9171 9172 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 9173 if (!ObjCProtocolClassDecl) { 9174 ObjCProtocolClassDecl 9175 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 9176 SourceLocation(), 9177 &Idents.get("Protocol"), 9178 /*typeParamList=*/nullptr, 9179 /*PrevDecl=*/nullptr, 9180 SourceLocation(), true); 9181 } 9182 9183 return ObjCProtocolClassDecl; 9184 } 9185 9186 //===----------------------------------------------------------------------===// 9187 // __builtin_va_list Construction Functions 9188 //===----------------------------------------------------------------------===// 9189 9190 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 9191 StringRef Name) { 9192 // typedef char* __builtin[_ms]_va_list; 9193 QualType T = Context->getPointerType(Context->CharTy); 9194 return Context->buildImplicitTypedef(T, Name); 9195 } 9196 9197 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 9198 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 9199 } 9200 9201 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 9202 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 9203 } 9204 9205 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 9206 // typedef void* __builtin_va_list; 9207 QualType T = Context->getPointerType(Context->VoidTy); 9208 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 9209 } 9210 9211 static TypedefDecl * 9212 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 9213 // struct __va_list 9214 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 9215 if (Context->getLangOpts().CPlusPlus) { 9216 // namespace std { struct __va_list { 9217 auto *NS = NamespaceDecl::Create( 9218 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), 9219 /*Inline=*/false, SourceLocation(), SourceLocation(), 9220 &Context->Idents.get("std"), 9221 /*PrevDecl=*/nullptr, /*Nested=*/false); 9222 NS->setImplicit(); 9223 VaListTagDecl->setDeclContext(NS); 9224 } 9225 9226 VaListTagDecl->startDefinition(); 9227 9228 const size_t NumFields = 5; 9229 QualType FieldTypes[NumFields]; 9230 const char *FieldNames[NumFields]; 9231 9232 // void *__stack; 9233 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 9234 FieldNames[0] = "__stack"; 9235 9236 // void *__gr_top; 9237 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 9238 FieldNames[1] = "__gr_top"; 9239 9240 // void *__vr_top; 9241 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9242 FieldNames[2] = "__vr_top"; 9243 9244 // int __gr_offs; 9245 FieldTypes[3] = Context->IntTy; 9246 FieldNames[3] = "__gr_offs"; 9247 9248 // int __vr_offs; 9249 FieldTypes[4] = Context->IntTy; 9250 FieldNames[4] = "__vr_offs"; 9251 9252 // Create fields 9253 for (unsigned i = 0; i < NumFields; ++i) { 9254 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 9255 VaListTagDecl, 9256 SourceLocation(), 9257 SourceLocation(), 9258 &Context->Idents.get(FieldNames[i]), 9259 FieldTypes[i], /*TInfo=*/nullptr, 9260 /*BitWidth=*/nullptr, 9261 /*Mutable=*/false, 9262 ICIS_NoInit); 9263 Field->setAccess(AS_public); 9264 VaListTagDecl->addDecl(Field); 9265 } 9266 VaListTagDecl->completeDefinition(); 9267 Context->VaListTagDecl = VaListTagDecl; 9268 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9269 9270 // } __builtin_va_list; 9271 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 9272 } 9273 9274 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 9275 // typedef struct __va_list_tag { 9276 RecordDecl *VaListTagDecl; 9277 9278 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 9279 VaListTagDecl->startDefinition(); 9280 9281 const size_t NumFields = 5; 9282 QualType FieldTypes[NumFields]; 9283 const char *FieldNames[NumFields]; 9284 9285 // unsigned char gpr; 9286 FieldTypes[0] = Context->UnsignedCharTy; 9287 FieldNames[0] = "gpr"; 9288 9289 // unsigned char fpr; 9290 FieldTypes[1] = Context->UnsignedCharTy; 9291 FieldNames[1] = "fpr"; 9292 9293 // unsigned short reserved; 9294 FieldTypes[2] = Context->UnsignedShortTy; 9295 FieldNames[2] = "reserved"; 9296 9297 // void* overflow_arg_area; 9298 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 9299 FieldNames[3] = "overflow_arg_area"; 9300 9301 // void* reg_save_area; 9302 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 9303 FieldNames[4] = "reg_save_area"; 9304 9305 // Create fields 9306 for (unsigned i = 0; i < NumFields; ++i) { 9307 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 9308 SourceLocation(), 9309 SourceLocation(), 9310 &Context->Idents.get(FieldNames[i]), 9311 FieldTypes[i], /*TInfo=*/nullptr, 9312 /*BitWidth=*/nullptr, 9313 /*Mutable=*/false, 9314 ICIS_NoInit); 9315 Field->setAccess(AS_public); 9316 VaListTagDecl->addDecl(Field); 9317 } 9318 VaListTagDecl->completeDefinition(); 9319 Context->VaListTagDecl = VaListTagDecl; 9320 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9321 9322 // } __va_list_tag; 9323 TypedefDecl *VaListTagTypedefDecl = 9324 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 9325 9326 QualType VaListTagTypedefType = 9327 Context->getTypedefType(VaListTagTypedefDecl); 9328 9329 // typedef __va_list_tag __builtin_va_list[1]; 9330 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9331 QualType VaListTagArrayType = Context->getConstantArrayType( 9332 VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0); 9333 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9334 } 9335 9336 static TypedefDecl * 9337 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 9338 // struct __va_list_tag { 9339 RecordDecl *VaListTagDecl; 9340 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 9341 VaListTagDecl->startDefinition(); 9342 9343 const size_t NumFields = 4; 9344 QualType FieldTypes[NumFields]; 9345 const char *FieldNames[NumFields]; 9346 9347 // unsigned gp_offset; 9348 FieldTypes[0] = Context->UnsignedIntTy; 9349 FieldNames[0] = "gp_offset"; 9350 9351 // unsigned fp_offset; 9352 FieldTypes[1] = Context->UnsignedIntTy; 9353 FieldNames[1] = "fp_offset"; 9354 9355 // void* overflow_arg_area; 9356 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9357 FieldNames[2] = "overflow_arg_area"; 9358 9359 // void* reg_save_area; 9360 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 9361 FieldNames[3] = "reg_save_area"; 9362 9363 // Create fields 9364 for (unsigned i = 0; i < NumFields; ++i) { 9365 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 9366 VaListTagDecl, 9367 SourceLocation(), 9368 SourceLocation(), 9369 &Context->Idents.get(FieldNames[i]), 9370 FieldTypes[i], /*TInfo=*/nullptr, 9371 /*BitWidth=*/nullptr, 9372 /*Mutable=*/false, 9373 ICIS_NoInit); 9374 Field->setAccess(AS_public); 9375 VaListTagDecl->addDecl(Field); 9376 } 9377 VaListTagDecl->completeDefinition(); 9378 Context->VaListTagDecl = VaListTagDecl; 9379 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9380 9381 // }; 9382 9383 // typedef struct __va_list_tag __builtin_va_list[1]; 9384 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9385 QualType VaListTagArrayType = Context->getConstantArrayType( 9386 VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0); 9387 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9388 } 9389 9390 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 9391 // typedef int __builtin_va_list[4]; 9392 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 9393 QualType IntArrayType = Context->getConstantArrayType( 9394 Context->IntTy, Size, nullptr, ArraySizeModifier::Normal, 0); 9395 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 9396 } 9397 9398 static TypedefDecl * 9399 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 9400 // struct __va_list 9401 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 9402 if (Context->getLangOpts().CPlusPlus) { 9403 // namespace std { struct __va_list { 9404 NamespaceDecl *NS; 9405 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 9406 Context->getTranslationUnitDecl(), 9407 /*Inline=*/false, SourceLocation(), 9408 SourceLocation(), &Context->Idents.get("std"), 9409 /*PrevDecl=*/nullptr, /*Nested=*/false); 9410 NS->setImplicit(); 9411 VaListDecl->setDeclContext(NS); 9412 } 9413 9414 VaListDecl->startDefinition(); 9415 9416 // void * __ap; 9417 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 9418 VaListDecl, 9419 SourceLocation(), 9420 SourceLocation(), 9421 &Context->Idents.get("__ap"), 9422 Context->getPointerType(Context->VoidTy), 9423 /*TInfo=*/nullptr, 9424 /*BitWidth=*/nullptr, 9425 /*Mutable=*/false, 9426 ICIS_NoInit); 9427 Field->setAccess(AS_public); 9428 VaListDecl->addDecl(Field); 9429 9430 // }; 9431 VaListDecl->completeDefinition(); 9432 Context->VaListTagDecl = VaListDecl; 9433 9434 // typedef struct __va_list __builtin_va_list; 9435 QualType T = Context->getRecordType(VaListDecl); 9436 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 9437 } 9438 9439 static TypedefDecl * 9440 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 9441 // struct __va_list_tag { 9442 RecordDecl *VaListTagDecl; 9443 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 9444 VaListTagDecl->startDefinition(); 9445 9446 const size_t NumFields = 4; 9447 QualType FieldTypes[NumFields]; 9448 const char *FieldNames[NumFields]; 9449 9450 // long __gpr; 9451 FieldTypes[0] = Context->LongTy; 9452 FieldNames[0] = "__gpr"; 9453 9454 // long __fpr; 9455 FieldTypes[1] = Context->LongTy; 9456 FieldNames[1] = "__fpr"; 9457 9458 // void *__overflow_arg_area; 9459 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9460 FieldNames[2] = "__overflow_arg_area"; 9461 9462 // void *__reg_save_area; 9463 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 9464 FieldNames[3] = "__reg_save_area"; 9465 9466 // Create fields 9467 for (unsigned i = 0; i < NumFields; ++i) { 9468 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 9469 VaListTagDecl, 9470 SourceLocation(), 9471 SourceLocation(), 9472 &Context->Idents.get(FieldNames[i]), 9473 FieldTypes[i], /*TInfo=*/nullptr, 9474 /*BitWidth=*/nullptr, 9475 /*Mutable=*/false, 9476 ICIS_NoInit); 9477 Field->setAccess(AS_public); 9478 VaListTagDecl->addDecl(Field); 9479 } 9480 VaListTagDecl->completeDefinition(); 9481 Context->VaListTagDecl = VaListTagDecl; 9482 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9483 9484 // }; 9485 9486 // typedef __va_list_tag __builtin_va_list[1]; 9487 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9488 QualType VaListTagArrayType = Context->getConstantArrayType( 9489 VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0); 9490 9491 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9492 } 9493 9494 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 9495 // typedef struct __va_list_tag { 9496 RecordDecl *VaListTagDecl; 9497 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 9498 VaListTagDecl->startDefinition(); 9499 9500 const size_t NumFields = 3; 9501 QualType FieldTypes[NumFields]; 9502 const char *FieldNames[NumFields]; 9503 9504 // void *CurrentSavedRegisterArea; 9505 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 9506 FieldNames[0] = "__current_saved_reg_area_pointer"; 9507 9508 // void *SavedRegAreaEnd; 9509 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 9510 FieldNames[1] = "__saved_reg_area_end_pointer"; 9511 9512 // void *OverflowArea; 9513 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9514 FieldNames[2] = "__overflow_area_pointer"; 9515 9516 // Create fields 9517 for (unsigned i = 0; i < NumFields; ++i) { 9518 FieldDecl *Field = FieldDecl::Create( 9519 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 9520 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 9521 /*TInfo=*/nullptr, 9522 /*BitWidth=*/nullptr, 9523 /*Mutable=*/false, ICIS_NoInit); 9524 Field->setAccess(AS_public); 9525 VaListTagDecl->addDecl(Field); 9526 } 9527 VaListTagDecl->completeDefinition(); 9528 Context->VaListTagDecl = VaListTagDecl; 9529 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9530 9531 // } __va_list_tag; 9532 TypedefDecl *VaListTagTypedefDecl = 9533 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 9534 9535 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 9536 9537 // typedef __va_list_tag __builtin_va_list[1]; 9538 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9539 QualType VaListTagArrayType = Context->getConstantArrayType( 9540 VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0); 9541 9542 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9543 } 9544 9545 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 9546 TargetInfo::BuiltinVaListKind Kind) { 9547 switch (Kind) { 9548 case TargetInfo::CharPtrBuiltinVaList: 9549 return CreateCharPtrBuiltinVaListDecl(Context); 9550 case TargetInfo::VoidPtrBuiltinVaList: 9551 return CreateVoidPtrBuiltinVaListDecl(Context); 9552 case TargetInfo::AArch64ABIBuiltinVaList: 9553 return CreateAArch64ABIBuiltinVaListDecl(Context); 9554 case TargetInfo::PowerABIBuiltinVaList: 9555 return CreatePowerABIBuiltinVaListDecl(Context); 9556 case TargetInfo::X86_64ABIBuiltinVaList: 9557 return CreateX86_64ABIBuiltinVaListDecl(Context); 9558 case TargetInfo::PNaClABIBuiltinVaList: 9559 return CreatePNaClABIBuiltinVaListDecl(Context); 9560 case TargetInfo::AAPCSABIBuiltinVaList: 9561 return CreateAAPCSABIBuiltinVaListDecl(Context); 9562 case TargetInfo::SystemZBuiltinVaList: 9563 return CreateSystemZBuiltinVaListDecl(Context); 9564 case TargetInfo::HexagonBuiltinVaList: 9565 return CreateHexagonBuiltinVaListDecl(Context); 9566 } 9567 9568 llvm_unreachable("Unhandled __builtin_va_list type kind"); 9569 } 9570 9571 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 9572 if (!BuiltinVaListDecl) { 9573 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 9574 assert(BuiltinVaListDecl->isImplicit()); 9575 } 9576 9577 return BuiltinVaListDecl; 9578 } 9579 9580 Decl *ASTContext::getVaListTagDecl() const { 9581 // Force the creation of VaListTagDecl by building the __builtin_va_list 9582 // declaration. 9583 if (!VaListTagDecl) 9584 (void)getBuiltinVaListDecl(); 9585 9586 return VaListTagDecl; 9587 } 9588 9589 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 9590 if (!BuiltinMSVaListDecl) 9591 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 9592 9593 return BuiltinMSVaListDecl; 9594 } 9595 9596 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 9597 // Allow redecl custom type checking builtin for HLSL. 9598 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin && 9599 BuiltinInfo.hasCustomTypechecking(FD->getBuiltinID())) 9600 return true; 9601 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 9602 } 9603 9604 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 9605 assert(ObjCConstantStringType.isNull() && 9606 "'NSConstantString' type already set!"); 9607 9608 ObjCConstantStringType = getObjCInterfaceType(Decl); 9609 } 9610 9611 /// Retrieve the template name that corresponds to a non-empty 9612 /// lookup. 9613 TemplateName 9614 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 9615 UnresolvedSetIterator End) const { 9616 unsigned size = End - Begin; 9617 assert(size > 1 && "set is not overloaded!"); 9618 9619 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 9620 size * sizeof(FunctionTemplateDecl*)); 9621 auto *OT = new (memory) OverloadedTemplateStorage(size); 9622 9623 NamedDecl **Storage = OT->getStorage(); 9624 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 9625 NamedDecl *D = *I; 9626 assert(isa<FunctionTemplateDecl>(D) || 9627 isa<UnresolvedUsingValueDecl>(D) || 9628 (isa<UsingShadowDecl>(D) && 9629 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 9630 *Storage++ = D; 9631 } 9632 9633 return TemplateName(OT); 9634 } 9635 9636 /// Retrieve a template name representing an unqualified-id that has been 9637 /// assumed to name a template for ADL purposes. 9638 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 9639 auto *OT = new (*this) AssumedTemplateStorage(Name); 9640 return TemplateName(OT); 9641 } 9642 9643 /// Retrieve the template name that represents a qualified 9644 /// template name such as \c std::vector. 9645 TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 9646 bool TemplateKeyword, 9647 TemplateName Template) const { 9648 assert(Template.getKind() == TemplateName::Template || 9649 Template.getKind() == TemplateName::UsingTemplate); 9650 9651 // FIXME: Canonicalization? 9652 llvm::FoldingSetNodeID ID; 9653 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 9654 9655 void *InsertPos = nullptr; 9656 QualifiedTemplateName *QTN = 9657 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9658 if (!QTN) { 9659 QTN = new (*this, alignof(QualifiedTemplateName)) 9660 QualifiedTemplateName(NNS, TemplateKeyword, Template); 9661 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 9662 } 9663 9664 return TemplateName(QTN); 9665 } 9666 9667 /// Retrieve the template name that represents a dependent 9668 /// template name such as \c MetaFun::template apply. 9669 TemplateName 9670 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9671 const IdentifierInfo *Name) const { 9672 assert((!NNS || NNS->isDependent()) && 9673 "Nested name specifier must be dependent"); 9674 9675 llvm::FoldingSetNodeID ID; 9676 DependentTemplateName::Profile(ID, NNS, Name); 9677 9678 void *InsertPos = nullptr; 9679 DependentTemplateName *QTN = 9680 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9681 9682 if (QTN) 9683 return TemplateName(QTN); 9684 9685 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9686 if (CanonNNS == NNS) { 9687 QTN = new (*this, alignof(DependentTemplateName)) 9688 DependentTemplateName(NNS, Name); 9689 } else { 9690 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 9691 QTN = new (*this, alignof(DependentTemplateName)) 9692 DependentTemplateName(NNS, Name, Canon); 9693 DependentTemplateName *CheckQTN = 9694 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9695 assert(!CheckQTN && "Dependent type name canonicalization broken"); 9696 (void)CheckQTN; 9697 } 9698 9699 DependentTemplateNames.InsertNode(QTN, InsertPos); 9700 return TemplateName(QTN); 9701 } 9702 9703 /// Retrieve the template name that represents a dependent 9704 /// template name such as \c MetaFun::template operator+. 9705 TemplateName 9706 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9707 OverloadedOperatorKind Operator) const { 9708 assert((!NNS || NNS->isDependent()) && 9709 "Nested name specifier must be dependent"); 9710 9711 llvm::FoldingSetNodeID ID; 9712 DependentTemplateName::Profile(ID, NNS, Operator); 9713 9714 void *InsertPos = nullptr; 9715 DependentTemplateName *QTN 9716 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9717 9718 if (QTN) 9719 return TemplateName(QTN); 9720 9721 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9722 if (CanonNNS == NNS) { 9723 QTN = new (*this, alignof(DependentTemplateName)) 9724 DependentTemplateName(NNS, Operator); 9725 } else { 9726 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 9727 QTN = new (*this, alignof(DependentTemplateName)) 9728 DependentTemplateName(NNS, Operator, Canon); 9729 9730 DependentTemplateName *CheckQTN 9731 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9732 assert(!CheckQTN && "Dependent template name canonicalization broken"); 9733 (void)CheckQTN; 9734 } 9735 9736 DependentTemplateNames.InsertNode(QTN, InsertPos); 9737 return TemplateName(QTN); 9738 } 9739 9740 TemplateName ASTContext::getSubstTemplateTemplateParm( 9741 TemplateName Replacement, Decl *AssociatedDecl, unsigned Index, 9742 std::optional<unsigned> PackIndex) const { 9743 llvm::FoldingSetNodeID ID; 9744 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl, 9745 Index, PackIndex); 9746 9747 void *insertPos = nullptr; 9748 SubstTemplateTemplateParmStorage *subst 9749 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 9750 9751 if (!subst) { 9752 subst = new (*this) SubstTemplateTemplateParmStorage( 9753 Replacement, AssociatedDecl, Index, PackIndex); 9754 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 9755 } 9756 9757 return TemplateName(subst); 9758 } 9759 9760 TemplateName 9761 ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack, 9762 Decl *AssociatedDecl, 9763 unsigned Index, bool Final) const { 9764 auto &Self = const_cast<ASTContext &>(*this); 9765 llvm::FoldingSetNodeID ID; 9766 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, ArgPack, 9767 AssociatedDecl, Index, Final); 9768 9769 void *InsertPos = nullptr; 9770 SubstTemplateTemplateParmPackStorage *Subst 9771 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 9772 9773 if (!Subst) { 9774 Subst = new (*this) SubstTemplateTemplateParmPackStorage( 9775 ArgPack.pack_elements(), AssociatedDecl, Index, Final); 9776 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 9777 } 9778 9779 return TemplateName(Subst); 9780 } 9781 9782 /// getFromTargetType - Given one of the integer types provided by 9783 /// TargetInfo, produce the corresponding type. The unsigned @p Type 9784 /// is actually a value of type @c TargetInfo::IntType. 9785 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 9786 switch (Type) { 9787 case TargetInfo::NoInt: return {}; 9788 case TargetInfo::SignedChar: return SignedCharTy; 9789 case TargetInfo::UnsignedChar: return UnsignedCharTy; 9790 case TargetInfo::SignedShort: return ShortTy; 9791 case TargetInfo::UnsignedShort: return UnsignedShortTy; 9792 case TargetInfo::SignedInt: return IntTy; 9793 case TargetInfo::UnsignedInt: return UnsignedIntTy; 9794 case TargetInfo::SignedLong: return LongTy; 9795 case TargetInfo::UnsignedLong: return UnsignedLongTy; 9796 case TargetInfo::SignedLongLong: return LongLongTy; 9797 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 9798 } 9799 9800 llvm_unreachable("Unhandled TargetInfo::IntType value"); 9801 } 9802 9803 //===----------------------------------------------------------------------===// 9804 // Type Predicates. 9805 //===----------------------------------------------------------------------===// 9806 9807 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 9808 /// garbage collection attribute. 9809 /// 9810 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 9811 if (getLangOpts().getGC() == LangOptions::NonGC) 9812 return Qualifiers::GCNone; 9813 9814 assert(getLangOpts().ObjC); 9815 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 9816 9817 // Default behaviour under objective-C's gc is for ObjC pointers 9818 // (or pointers to them) be treated as though they were declared 9819 // as __strong. 9820 if (GCAttrs == Qualifiers::GCNone) { 9821 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 9822 return Qualifiers::Strong; 9823 else if (Ty->isPointerType()) 9824 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 9825 } else { 9826 // It's not valid to set GC attributes on anything that isn't a 9827 // pointer. 9828 #ifndef NDEBUG 9829 QualType CT = Ty->getCanonicalTypeInternal(); 9830 while (const auto *AT = dyn_cast<ArrayType>(CT)) 9831 CT = AT->getElementType(); 9832 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 9833 #endif 9834 } 9835 return GCAttrs; 9836 } 9837 9838 //===----------------------------------------------------------------------===// 9839 // Type Compatibility Testing 9840 //===----------------------------------------------------------------------===// 9841 9842 /// areCompatVectorTypes - Return true if the two specified vector types are 9843 /// compatible. 9844 static bool areCompatVectorTypes(const VectorType *LHS, 9845 const VectorType *RHS) { 9846 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9847 return LHS->getElementType() == RHS->getElementType() && 9848 LHS->getNumElements() == RHS->getNumElements(); 9849 } 9850 9851 /// areCompatMatrixTypes - Return true if the two specified matrix types are 9852 /// compatible. 9853 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 9854 const ConstantMatrixType *RHS) { 9855 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9856 return LHS->getElementType() == RHS->getElementType() && 9857 LHS->getNumRows() == RHS->getNumRows() && 9858 LHS->getNumColumns() == RHS->getNumColumns(); 9859 } 9860 9861 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 9862 QualType SecondVec) { 9863 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 9864 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 9865 9866 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 9867 return true; 9868 9869 // Treat Neon vector types and most AltiVec vector types as if they are the 9870 // equivalent GCC vector types. 9871 const auto *First = FirstVec->castAs<VectorType>(); 9872 const auto *Second = SecondVec->castAs<VectorType>(); 9873 if (First->getNumElements() == Second->getNumElements() && 9874 hasSameType(First->getElementType(), Second->getElementType()) && 9875 First->getVectorKind() != VectorKind::AltiVecPixel && 9876 First->getVectorKind() != VectorKind::AltiVecBool && 9877 Second->getVectorKind() != VectorKind::AltiVecPixel && 9878 Second->getVectorKind() != VectorKind::AltiVecBool && 9879 First->getVectorKind() != VectorKind::SveFixedLengthData && 9880 First->getVectorKind() != VectorKind::SveFixedLengthPredicate && 9881 Second->getVectorKind() != VectorKind::SveFixedLengthData && 9882 Second->getVectorKind() != VectorKind::SveFixedLengthPredicate && 9883 First->getVectorKind() != VectorKind::RVVFixedLengthData && 9884 Second->getVectorKind() != VectorKind::RVVFixedLengthData && 9885 First->getVectorKind() != VectorKind::RVVFixedLengthMask && 9886 Second->getVectorKind() != VectorKind::RVVFixedLengthMask) 9887 return true; 9888 9889 return false; 9890 } 9891 9892 /// getSVETypeSize - Return SVE vector or predicate register size. 9893 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 9894 assert(Ty->isSveVLSBuiltinType() && "Invalid SVE Type"); 9895 if (Ty->getKind() == BuiltinType::SveBool || 9896 Ty->getKind() == BuiltinType::SveCount) 9897 return (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth(); 9898 return Context.getLangOpts().VScaleMin * 128; 9899 } 9900 9901 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 9902 QualType SecondType) { 9903 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9904 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9905 if (const auto *VT = SecondType->getAs<VectorType>()) { 9906 // Predicates have the same representation as uint8 so we also have to 9907 // check the kind to make these types incompatible. 9908 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) 9909 return BT->getKind() == BuiltinType::SveBool; 9910 else if (VT->getVectorKind() == VectorKind::SveFixedLengthData) 9911 return VT->getElementType().getCanonicalType() == 9912 FirstType->getSveEltType(*this); 9913 else if (VT->getVectorKind() == VectorKind::Generic) 9914 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 9915 hasSameType(VT->getElementType(), 9916 getBuiltinVectorTypeInfo(BT).ElementType); 9917 } 9918 } 9919 return false; 9920 }; 9921 9922 return IsValidCast(FirstType, SecondType) || 9923 IsValidCast(SecondType, FirstType); 9924 } 9925 9926 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 9927 QualType SecondType) { 9928 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9929 const auto *BT = FirstType->getAs<BuiltinType>(); 9930 if (!BT) 9931 return false; 9932 9933 const auto *VecTy = SecondType->getAs<VectorType>(); 9934 if (VecTy && (VecTy->getVectorKind() == VectorKind::SveFixedLengthData || 9935 VecTy->getVectorKind() == VectorKind::Generic)) { 9936 const LangOptions::LaxVectorConversionKind LVCKind = 9937 getLangOpts().getLaxVectorConversions(); 9938 9939 // Can not convert between sve predicates and sve vectors because of 9940 // different size. 9941 if (BT->getKind() == BuiltinType::SveBool && 9942 VecTy->getVectorKind() == VectorKind::SveFixedLengthData) 9943 return false; 9944 9945 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 9946 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 9947 // converts to VLAT and VLAT implicitly converts to GNUT." 9948 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 9949 // predicates. 9950 if (VecTy->getVectorKind() == VectorKind::Generic && 9951 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 9952 return false; 9953 9954 // If -flax-vector-conversions=all is specified, the types are 9955 // certainly compatible. 9956 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9957 return true; 9958 9959 // If -flax-vector-conversions=integer is specified, the types are 9960 // compatible if the elements are integer types. 9961 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9962 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9963 FirstType->getSveEltType(*this)->isIntegerType(); 9964 } 9965 9966 return false; 9967 }; 9968 9969 return IsLaxCompatible(FirstType, SecondType) || 9970 IsLaxCompatible(SecondType, FirstType); 9971 } 9972 9973 /// getRVVTypeSize - Return RVV vector register size. 9974 static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) { 9975 assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type"); 9976 auto VScale = Context.getTargetInfo().getVScaleRange(Context.getLangOpts()); 9977 if (!VScale) 9978 return 0; 9979 9980 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty); 9981 9982 uint64_t EltSize = Context.getTypeSize(Info.ElementType); 9983 if (Info.ElementType == Context.BoolTy) 9984 EltSize = 1; 9985 9986 uint64_t MinElts = Info.EC.getKnownMinValue(); 9987 return VScale->first * MinElts * EltSize; 9988 } 9989 9990 bool ASTContext::areCompatibleRVVTypes(QualType FirstType, 9991 QualType SecondType) { 9992 assert( 9993 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || 9994 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && 9995 "Expected RVV builtin type and vector type!"); 9996 9997 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9998 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9999 if (const auto *VT = SecondType->getAs<VectorType>()) { 10000 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) { 10001 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(BT); 10002 return FirstType->isRVVVLSBuiltinType() && 10003 Info.ElementType == BoolTy && 10004 getTypeSize(SecondType) == getRVVTypeSize(*this, BT); 10005 } 10006 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData || 10007 VT->getVectorKind() == VectorKind::Generic) 10008 return FirstType->isRVVVLSBuiltinType() && 10009 getTypeSize(SecondType) == getRVVTypeSize(*this, BT) && 10010 hasSameType(VT->getElementType(), 10011 getBuiltinVectorTypeInfo(BT).ElementType); 10012 } 10013 } 10014 return false; 10015 }; 10016 10017 return IsValidCast(FirstType, SecondType) || 10018 IsValidCast(SecondType, FirstType); 10019 } 10020 10021 bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType, 10022 QualType SecondType) { 10023 assert( 10024 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || 10025 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && 10026 "Expected RVV builtin type and vector type!"); 10027 10028 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 10029 const auto *BT = FirstType->getAs<BuiltinType>(); 10030 if (!BT) 10031 return false; 10032 10033 if (!BT->isRVVVLSBuiltinType()) 10034 return false; 10035 10036 const auto *VecTy = SecondType->getAs<VectorType>(); 10037 if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) { 10038 const LangOptions::LaxVectorConversionKind LVCKind = 10039 getLangOpts().getLaxVectorConversions(); 10040 10041 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion. 10042 if (getTypeSize(SecondType) != getRVVTypeSize(*this, BT)) 10043 return false; 10044 10045 // If -flax-vector-conversions=all is specified, the types are 10046 // certainly compatible. 10047 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 10048 return true; 10049 10050 // If -flax-vector-conversions=integer is specified, the types are 10051 // compatible if the elements are integer types. 10052 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 10053 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 10054 FirstType->getRVVEltType(*this)->isIntegerType(); 10055 } 10056 10057 return false; 10058 }; 10059 10060 return IsLaxCompatible(FirstType, SecondType) || 10061 IsLaxCompatible(SecondType, FirstType); 10062 } 10063 10064 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 10065 while (true) { 10066 // __strong id 10067 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 10068 if (Attr->getAttrKind() == attr::ObjCOwnership) 10069 return true; 10070 10071 Ty = Attr->getModifiedType(); 10072 10073 // X *__strong (...) 10074 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 10075 Ty = Paren->getInnerType(); 10076 10077 // We do not want to look through typedefs, typeof(expr), 10078 // typeof(type), or any other way that the type is somehow 10079 // abstracted. 10080 } else { 10081 return false; 10082 } 10083 } 10084 } 10085 10086 //===----------------------------------------------------------------------===// 10087 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 10088 //===----------------------------------------------------------------------===// 10089 10090 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 10091 /// inheritance hierarchy of 'rProto'. 10092 bool 10093 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 10094 ObjCProtocolDecl *rProto) const { 10095 if (declaresSameEntity(lProto, rProto)) 10096 return true; 10097 for (auto *PI : rProto->protocols()) 10098 if (ProtocolCompatibleWithProtocol(lProto, PI)) 10099 return true; 10100 return false; 10101 } 10102 10103 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 10104 /// Class<pr1, ...>. 10105 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 10106 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 10107 for (auto *lhsProto : lhs->quals()) { 10108 bool match = false; 10109 for (auto *rhsProto : rhs->quals()) { 10110 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 10111 match = true; 10112 break; 10113 } 10114 } 10115 if (!match) 10116 return false; 10117 } 10118 return true; 10119 } 10120 10121 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 10122 /// ObjCQualifiedIDType. 10123 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 10124 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 10125 bool compare) { 10126 // Allow id<P..> and an 'id' in all cases. 10127 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 10128 return true; 10129 10130 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 10131 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 10132 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 10133 return false; 10134 10135 if (lhs->isObjCQualifiedIdType()) { 10136 if (rhs->qual_empty()) { 10137 // If the RHS is a unqualified interface pointer "NSString*", 10138 // make sure we check the class hierarchy. 10139 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 10140 for (auto *I : lhs->quals()) { 10141 // when comparing an id<P> on lhs with a static type on rhs, 10142 // see if static class implements all of id's protocols, directly or 10143 // through its super class and categories. 10144 if (!rhsID->ClassImplementsProtocol(I, true)) 10145 return false; 10146 } 10147 } 10148 // If there are no qualifiers and no interface, we have an 'id'. 10149 return true; 10150 } 10151 // Both the right and left sides have qualifiers. 10152 for (auto *lhsProto : lhs->quals()) { 10153 bool match = false; 10154 10155 // when comparing an id<P> on lhs with a static type on rhs, 10156 // see if static class implements all of id's protocols, directly or 10157 // through its super class and categories. 10158 for (auto *rhsProto : rhs->quals()) { 10159 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 10160 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 10161 match = true; 10162 break; 10163 } 10164 } 10165 // If the RHS is a qualified interface pointer "NSString<P>*", 10166 // make sure we check the class hierarchy. 10167 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 10168 for (auto *I : lhs->quals()) { 10169 // when comparing an id<P> on lhs with a static type on rhs, 10170 // see if static class implements all of id's protocols, directly or 10171 // through its super class and categories. 10172 if (rhsID->ClassImplementsProtocol(I, true)) { 10173 match = true; 10174 break; 10175 } 10176 } 10177 } 10178 if (!match) 10179 return false; 10180 } 10181 10182 return true; 10183 } 10184 10185 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 10186 10187 if (lhs->getInterfaceType()) { 10188 // If both the right and left sides have qualifiers. 10189 for (auto *lhsProto : lhs->quals()) { 10190 bool match = false; 10191 10192 // when comparing an id<P> on rhs with a static type on lhs, 10193 // see if static class implements all of id's protocols, directly or 10194 // through its super class and categories. 10195 // First, lhs protocols in the qualifier list must be found, direct 10196 // or indirect in rhs's qualifier list or it is a mismatch. 10197 for (auto *rhsProto : rhs->quals()) { 10198 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 10199 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 10200 match = true; 10201 break; 10202 } 10203 } 10204 if (!match) 10205 return false; 10206 } 10207 10208 // Static class's protocols, or its super class or category protocols 10209 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 10210 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 10211 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 10212 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 10213 // This is rather dubious but matches gcc's behavior. If lhs has 10214 // no type qualifier and its class has no static protocol(s) 10215 // assume that it is mismatch. 10216 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 10217 return false; 10218 for (auto *lhsProto : LHSInheritedProtocols) { 10219 bool match = false; 10220 for (auto *rhsProto : rhs->quals()) { 10221 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 10222 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 10223 match = true; 10224 break; 10225 } 10226 } 10227 if (!match) 10228 return false; 10229 } 10230 } 10231 return true; 10232 } 10233 return false; 10234 } 10235 10236 /// canAssignObjCInterfaces - Return true if the two interface types are 10237 /// compatible for assignment from RHS to LHS. This handles validation of any 10238 /// protocol qualifiers on the LHS or RHS. 10239 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 10240 const ObjCObjectPointerType *RHSOPT) { 10241 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 10242 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 10243 10244 // If either type represents the built-in 'id' type, return true. 10245 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 10246 return true; 10247 10248 // Function object that propagates a successful result or handles 10249 // __kindof types. 10250 auto finish = [&](bool succeeded) -> bool { 10251 if (succeeded) 10252 return true; 10253 10254 if (!RHS->isKindOfType()) 10255 return false; 10256 10257 // Strip off __kindof and protocol qualifiers, then check whether 10258 // we can assign the other way. 10259 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 10260 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 10261 }; 10262 10263 // Casts from or to id<P> are allowed when the other side has compatible 10264 // protocols. 10265 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 10266 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 10267 } 10268 10269 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 10270 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 10271 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 10272 } 10273 10274 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 10275 if (LHS->isObjCClass() && RHS->isObjCClass()) { 10276 return true; 10277 } 10278 10279 // If we have 2 user-defined types, fall into that path. 10280 if (LHS->getInterface() && RHS->getInterface()) { 10281 return finish(canAssignObjCInterfaces(LHS, RHS)); 10282 } 10283 10284 return false; 10285 } 10286 10287 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 10288 /// for providing type-safety for objective-c pointers used to pass/return 10289 /// arguments in block literals. When passed as arguments, passing 'A*' where 10290 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 10291 /// not OK. For the return type, the opposite is not OK. 10292 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 10293 const ObjCObjectPointerType *LHSOPT, 10294 const ObjCObjectPointerType *RHSOPT, 10295 bool BlockReturnType) { 10296 10297 // Function object that propagates a successful result or handles 10298 // __kindof types. 10299 auto finish = [&](bool succeeded) -> bool { 10300 if (succeeded) 10301 return true; 10302 10303 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 10304 if (!Expected->isKindOfType()) 10305 return false; 10306 10307 // Strip off __kindof and protocol qualifiers, then check whether 10308 // we can assign the other way. 10309 return canAssignObjCInterfacesInBlockPointer( 10310 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 10311 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 10312 BlockReturnType); 10313 }; 10314 10315 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 10316 return true; 10317 10318 if (LHSOPT->isObjCBuiltinType()) { 10319 return finish(RHSOPT->isObjCBuiltinType() || 10320 RHSOPT->isObjCQualifiedIdType()); 10321 } 10322 10323 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 10324 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 10325 // Use for block parameters previous type checking for compatibility. 10326 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 10327 // Or corrected type checking as in non-compat mode. 10328 (!BlockReturnType && 10329 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 10330 else 10331 return finish(ObjCQualifiedIdTypesAreCompatible( 10332 (BlockReturnType ? LHSOPT : RHSOPT), 10333 (BlockReturnType ? RHSOPT : LHSOPT), false)); 10334 } 10335 10336 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 10337 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 10338 if (LHS && RHS) { // We have 2 user-defined types. 10339 if (LHS != RHS) { 10340 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 10341 return finish(BlockReturnType); 10342 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 10343 return finish(!BlockReturnType); 10344 } 10345 else 10346 return true; 10347 } 10348 return false; 10349 } 10350 10351 /// Comparison routine for Objective-C protocols to be used with 10352 /// llvm::array_pod_sort. 10353 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 10354 ObjCProtocolDecl * const *rhs) { 10355 return (*lhs)->getName().compare((*rhs)->getName()); 10356 } 10357 10358 /// getIntersectionOfProtocols - This routine finds the intersection of set 10359 /// of protocols inherited from two distinct objective-c pointer objects with 10360 /// the given common base. 10361 /// It is used to build composite qualifier list of the composite type of 10362 /// the conditional expression involving two objective-c pointer objects. 10363 static 10364 void getIntersectionOfProtocols(ASTContext &Context, 10365 const ObjCInterfaceDecl *CommonBase, 10366 const ObjCObjectPointerType *LHSOPT, 10367 const ObjCObjectPointerType *RHSOPT, 10368 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 10369 10370 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 10371 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 10372 assert(LHS->getInterface() && "LHS must have an interface base"); 10373 assert(RHS->getInterface() && "RHS must have an interface base"); 10374 10375 // Add all of the protocols for the LHS. 10376 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 10377 10378 // Start with the protocol qualifiers. 10379 for (auto *proto : LHS->quals()) { 10380 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 10381 } 10382 10383 // Also add the protocols associated with the LHS interface. 10384 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 10385 10386 // Add all of the protocols for the RHS. 10387 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 10388 10389 // Start with the protocol qualifiers. 10390 for (auto *proto : RHS->quals()) { 10391 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 10392 } 10393 10394 // Also add the protocols associated with the RHS interface. 10395 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 10396 10397 // Compute the intersection of the collected protocol sets. 10398 for (auto *proto : LHSProtocolSet) { 10399 if (RHSProtocolSet.count(proto)) 10400 IntersectionSet.push_back(proto); 10401 } 10402 10403 // Compute the set of protocols that is implied by either the common type or 10404 // the protocols within the intersection. 10405 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 10406 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 10407 10408 // Remove any implied protocols from the list of inherited protocols. 10409 if (!ImpliedProtocols.empty()) { 10410 llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { 10411 return ImpliedProtocols.contains(proto); 10412 }); 10413 } 10414 10415 // Sort the remaining protocols by name. 10416 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 10417 compareObjCProtocolsByName); 10418 } 10419 10420 /// Determine whether the first type is a subtype of the second. 10421 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 10422 QualType rhs) { 10423 // Common case: two object pointers. 10424 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 10425 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 10426 if (lhsOPT && rhsOPT) 10427 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 10428 10429 // Two block pointers. 10430 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 10431 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 10432 if (lhsBlock && rhsBlock) 10433 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 10434 10435 // If either is an unqualified 'id' and the other is a block, it's 10436 // acceptable. 10437 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 10438 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 10439 return true; 10440 10441 return false; 10442 } 10443 10444 // Check that the given Objective-C type argument lists are equivalent. 10445 static bool sameObjCTypeArgs(ASTContext &ctx, 10446 const ObjCInterfaceDecl *iface, 10447 ArrayRef<QualType> lhsArgs, 10448 ArrayRef<QualType> rhsArgs, 10449 bool stripKindOf) { 10450 if (lhsArgs.size() != rhsArgs.size()) 10451 return false; 10452 10453 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 10454 if (!typeParams) 10455 return false; 10456 10457 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 10458 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 10459 continue; 10460 10461 switch (typeParams->begin()[i]->getVariance()) { 10462 case ObjCTypeParamVariance::Invariant: 10463 if (!stripKindOf || 10464 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 10465 rhsArgs[i].stripObjCKindOfType(ctx))) { 10466 return false; 10467 } 10468 break; 10469 10470 case ObjCTypeParamVariance::Covariant: 10471 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 10472 return false; 10473 break; 10474 10475 case ObjCTypeParamVariance::Contravariant: 10476 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 10477 return false; 10478 break; 10479 } 10480 } 10481 10482 return true; 10483 } 10484 10485 QualType ASTContext::areCommonBaseCompatible( 10486 const ObjCObjectPointerType *Lptr, 10487 const ObjCObjectPointerType *Rptr) { 10488 const ObjCObjectType *LHS = Lptr->getObjectType(); 10489 const ObjCObjectType *RHS = Rptr->getObjectType(); 10490 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 10491 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 10492 10493 if (!LDecl || !RDecl) 10494 return {}; 10495 10496 // When either LHS or RHS is a kindof type, we should return a kindof type. 10497 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 10498 // kindof(A). 10499 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 10500 10501 // Follow the left-hand side up the class hierarchy until we either hit a 10502 // root or find the RHS. Record the ancestors in case we don't find it. 10503 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 10504 LHSAncestors; 10505 while (true) { 10506 // Record this ancestor. We'll need this if the common type isn't in the 10507 // path from the LHS to the root. 10508 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 10509 10510 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 10511 // Get the type arguments. 10512 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 10513 bool anyChanges = false; 10514 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10515 // Both have type arguments, compare them. 10516 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10517 LHS->getTypeArgs(), RHS->getTypeArgs(), 10518 /*stripKindOf=*/true)) 10519 return {}; 10520 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10521 // If only one has type arguments, the result will not have type 10522 // arguments. 10523 LHSTypeArgs = {}; 10524 anyChanges = true; 10525 } 10526 10527 // Compute the intersection of protocols. 10528 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10529 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 10530 Protocols); 10531 if (!Protocols.empty()) 10532 anyChanges = true; 10533 10534 // If anything in the LHS will have changed, build a new result type. 10535 // If we need to return a kindof type but LHS is not a kindof type, we 10536 // build a new result type. 10537 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 10538 QualType Result = getObjCInterfaceType(LHS->getInterface()); 10539 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 10540 anyKindOf || LHS->isKindOfType()); 10541 return getObjCObjectPointerType(Result); 10542 } 10543 10544 return getObjCObjectPointerType(QualType(LHS, 0)); 10545 } 10546 10547 // Find the superclass. 10548 QualType LHSSuperType = LHS->getSuperClassType(); 10549 if (LHSSuperType.isNull()) 10550 break; 10551 10552 LHS = LHSSuperType->castAs<ObjCObjectType>(); 10553 } 10554 10555 // We didn't find anything by following the LHS to its root; now check 10556 // the RHS against the cached set of ancestors. 10557 while (true) { 10558 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 10559 if (KnownLHS != LHSAncestors.end()) { 10560 LHS = KnownLHS->second; 10561 10562 // Get the type arguments. 10563 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 10564 bool anyChanges = false; 10565 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10566 // Both have type arguments, compare them. 10567 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10568 LHS->getTypeArgs(), RHS->getTypeArgs(), 10569 /*stripKindOf=*/true)) 10570 return {}; 10571 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10572 // If only one has type arguments, the result will not have type 10573 // arguments. 10574 RHSTypeArgs = {}; 10575 anyChanges = true; 10576 } 10577 10578 // Compute the intersection of protocols. 10579 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10580 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 10581 Protocols); 10582 if (!Protocols.empty()) 10583 anyChanges = true; 10584 10585 // If we need to return a kindof type but RHS is not a kindof type, we 10586 // build a new result type. 10587 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 10588 QualType Result = getObjCInterfaceType(RHS->getInterface()); 10589 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 10590 anyKindOf || RHS->isKindOfType()); 10591 return getObjCObjectPointerType(Result); 10592 } 10593 10594 return getObjCObjectPointerType(QualType(RHS, 0)); 10595 } 10596 10597 // Find the superclass of the RHS. 10598 QualType RHSSuperType = RHS->getSuperClassType(); 10599 if (RHSSuperType.isNull()) 10600 break; 10601 10602 RHS = RHSSuperType->castAs<ObjCObjectType>(); 10603 } 10604 10605 return {}; 10606 } 10607 10608 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 10609 const ObjCObjectType *RHS) { 10610 assert(LHS->getInterface() && "LHS is not an interface type"); 10611 assert(RHS->getInterface() && "RHS is not an interface type"); 10612 10613 // Verify that the base decls are compatible: the RHS must be a subclass of 10614 // the LHS. 10615 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 10616 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 10617 if (!IsSuperClass) 10618 return false; 10619 10620 // If the LHS has protocol qualifiers, determine whether all of them are 10621 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 10622 // LHS). 10623 if (LHS->getNumProtocols() > 0) { 10624 // OK if conversion of LHS to SuperClass results in narrowing of types 10625 // ; i.e., SuperClass may implement at least one of the protocols 10626 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 10627 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 10628 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 10629 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 10630 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 10631 // qualifiers. 10632 for (auto *RHSPI : RHS->quals()) 10633 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 10634 // If there is no protocols associated with RHS, it is not a match. 10635 if (SuperClassInheritedProtocols.empty()) 10636 return false; 10637 10638 for (const auto *LHSProto : LHS->quals()) { 10639 bool SuperImplementsProtocol = false; 10640 for (auto *SuperClassProto : SuperClassInheritedProtocols) 10641 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 10642 SuperImplementsProtocol = true; 10643 break; 10644 } 10645 if (!SuperImplementsProtocol) 10646 return false; 10647 } 10648 } 10649 10650 // If the LHS is specialized, we may need to check type arguments. 10651 if (LHS->isSpecialized()) { 10652 // Follow the superclass chain until we've matched the LHS class in the 10653 // hierarchy. This substitutes type arguments through. 10654 const ObjCObjectType *RHSSuper = RHS; 10655 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 10656 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 10657 10658 // If the RHS is specializd, compare type arguments. 10659 if (RHSSuper->isSpecialized() && 10660 !sameObjCTypeArgs(*this, LHS->getInterface(), 10661 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 10662 /*stripKindOf=*/true)) { 10663 return false; 10664 } 10665 } 10666 10667 return true; 10668 } 10669 10670 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 10671 // get the "pointed to" types 10672 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 10673 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 10674 10675 if (!LHSOPT || !RHSOPT) 10676 return false; 10677 10678 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 10679 canAssignObjCInterfaces(RHSOPT, LHSOPT); 10680 } 10681 10682 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 10683 return canAssignObjCInterfaces( 10684 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 10685 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 10686 } 10687 10688 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 10689 /// both shall have the identically qualified version of a compatible type. 10690 /// C99 6.2.7p1: Two types have compatible types if their types are the 10691 /// same. See 6.7.[2,3,5] for additional rules. 10692 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 10693 bool CompareUnqualified) { 10694 if (getLangOpts().CPlusPlus) 10695 return hasSameType(LHS, RHS); 10696 10697 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 10698 } 10699 10700 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 10701 return typesAreCompatible(LHS, RHS); 10702 } 10703 10704 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 10705 return !mergeTypes(LHS, RHS, true).isNull(); 10706 } 10707 10708 /// mergeTransparentUnionType - if T is a transparent union type and a member 10709 /// of T is compatible with SubType, return the merged type, else return 10710 /// QualType() 10711 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 10712 bool OfBlockPointer, 10713 bool Unqualified) { 10714 if (const RecordType *UT = T->getAsUnionType()) { 10715 RecordDecl *UD = UT->getDecl(); 10716 if (UD->hasAttr<TransparentUnionAttr>()) { 10717 for (const auto *I : UD->fields()) { 10718 QualType ET = I->getType().getUnqualifiedType(); 10719 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 10720 if (!MT.isNull()) 10721 return MT; 10722 } 10723 } 10724 } 10725 10726 return {}; 10727 } 10728 10729 /// mergeFunctionParameterTypes - merge two types which appear as function 10730 /// parameter types 10731 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 10732 bool OfBlockPointer, 10733 bool Unqualified) { 10734 // GNU extension: two types are compatible if they appear as a function 10735 // argument, one of the types is a transparent union type and the other 10736 // type is compatible with a union member 10737 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 10738 Unqualified); 10739 if (!lmerge.isNull()) 10740 return lmerge; 10741 10742 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 10743 Unqualified); 10744 if (!rmerge.isNull()) 10745 return rmerge; 10746 10747 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 10748 } 10749 10750 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 10751 bool OfBlockPointer, bool Unqualified, 10752 bool AllowCXX, 10753 bool IsConditionalOperator) { 10754 const auto *lbase = lhs->castAs<FunctionType>(); 10755 const auto *rbase = rhs->castAs<FunctionType>(); 10756 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 10757 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 10758 bool allLTypes = true; 10759 bool allRTypes = true; 10760 10761 // Check return type 10762 QualType retType; 10763 if (OfBlockPointer) { 10764 QualType RHS = rbase->getReturnType(); 10765 QualType LHS = lbase->getReturnType(); 10766 bool UnqualifiedResult = Unqualified; 10767 if (!UnqualifiedResult) 10768 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 10769 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 10770 } 10771 else 10772 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 10773 Unqualified); 10774 if (retType.isNull()) 10775 return {}; 10776 10777 if (Unqualified) 10778 retType = retType.getUnqualifiedType(); 10779 10780 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 10781 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 10782 if (Unqualified) { 10783 LRetType = LRetType.getUnqualifiedType(); 10784 RRetType = RRetType.getUnqualifiedType(); 10785 } 10786 10787 if (getCanonicalType(retType) != LRetType) 10788 allLTypes = false; 10789 if (getCanonicalType(retType) != RRetType) 10790 allRTypes = false; 10791 10792 // FIXME: double check this 10793 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 10794 // rbase->getRegParmAttr() != 0 && 10795 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 10796 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 10797 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 10798 10799 // Compatible functions must have compatible calling conventions 10800 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 10801 return {}; 10802 10803 // Regparm is part of the calling convention. 10804 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 10805 return {}; 10806 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 10807 return {}; 10808 10809 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 10810 return {}; 10811 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 10812 return {}; 10813 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 10814 return {}; 10815 10816 // When merging declarations, it's common for supplemental information like 10817 // attributes to only be present in one of the declarations, and we generally 10818 // want type merging to preserve the union of information. So a merged 10819 // function type should be noreturn if it was noreturn in *either* operand 10820 // type. 10821 // 10822 // But for the conditional operator, this is backwards. The result of the 10823 // operator could be either operand, and its type should conservatively 10824 // reflect that. So a function type in a composite type is noreturn only 10825 // if it's noreturn in *both* operand types. 10826 // 10827 // Arguably, noreturn is a kind of subtype, and the conditional operator 10828 // ought to produce the most specific common supertype of its operand types. 10829 // That would differ from this rule in contravariant positions. However, 10830 // neither C nor C++ generally uses this kind of subtype reasoning. Also, 10831 // as a practical matter, it would only affect C code that does abstraction of 10832 // higher-order functions (taking noreturn callbacks!), which is uncommon to 10833 // say the least. So we use the simpler rule. 10834 bool NoReturn = IsConditionalOperator 10835 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn() 10836 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 10837 if (lbaseInfo.getNoReturn() != NoReturn) 10838 allLTypes = false; 10839 if (rbaseInfo.getNoReturn() != NoReturn) 10840 allRTypes = false; 10841 10842 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 10843 10844 std::optional<FunctionEffectSet> MergedFX; 10845 10846 if (lproto && rproto) { // two C99 style function prototypes 10847 assert((AllowCXX || 10848 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 10849 "C++ shouldn't be here"); 10850 // Compatible functions must have the same number of parameters 10851 if (lproto->getNumParams() != rproto->getNumParams()) 10852 return {}; 10853 10854 // Variadic and non-variadic functions aren't compatible 10855 if (lproto->isVariadic() != rproto->isVariadic()) 10856 return {}; 10857 10858 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 10859 return {}; 10860 10861 // Function effects are handled similarly to noreturn, see above. 10862 FunctionEffectsRef LHSFX = lproto->getFunctionEffects(); 10863 FunctionEffectsRef RHSFX = rproto->getFunctionEffects(); 10864 if (LHSFX != RHSFX) { 10865 if (IsConditionalOperator) 10866 MergedFX = FunctionEffectSet::getIntersection(LHSFX, RHSFX); 10867 else { 10868 FunctionEffectSet::Conflicts Errs; 10869 MergedFX = FunctionEffectSet::getUnion(LHSFX, RHSFX, Errs); 10870 // Here we're discarding a possible error due to conflicts in the effect 10871 // sets. But we're not in a context where we can report it. The 10872 // operation does however guarantee maintenance of invariants. 10873 } 10874 if (*MergedFX != LHSFX) 10875 allLTypes = false; 10876 if (*MergedFX != RHSFX) 10877 allRTypes = false; 10878 } 10879 10880 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 10881 bool canUseLeft, canUseRight; 10882 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 10883 newParamInfos)) 10884 return {}; 10885 10886 if (!canUseLeft) 10887 allLTypes = false; 10888 if (!canUseRight) 10889 allRTypes = false; 10890 10891 // Check parameter type compatibility 10892 SmallVector<QualType, 10> types; 10893 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 10894 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 10895 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 10896 QualType paramType = mergeFunctionParameterTypes( 10897 lParamType, rParamType, OfBlockPointer, Unqualified); 10898 if (paramType.isNull()) 10899 return {}; 10900 10901 if (Unqualified) 10902 paramType = paramType.getUnqualifiedType(); 10903 10904 types.push_back(paramType); 10905 if (Unqualified) { 10906 lParamType = lParamType.getUnqualifiedType(); 10907 rParamType = rParamType.getUnqualifiedType(); 10908 } 10909 10910 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 10911 allLTypes = false; 10912 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 10913 allRTypes = false; 10914 } 10915 10916 if (allLTypes) return lhs; 10917 if (allRTypes) return rhs; 10918 10919 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 10920 EPI.ExtInfo = einfo; 10921 EPI.ExtParameterInfos = 10922 newParamInfos.empty() ? nullptr : newParamInfos.data(); 10923 if (MergedFX) 10924 EPI.FunctionEffects = *MergedFX; 10925 return getFunctionType(retType, types, EPI); 10926 } 10927 10928 if (lproto) allRTypes = false; 10929 if (rproto) allLTypes = false; 10930 10931 const FunctionProtoType *proto = lproto ? lproto : rproto; 10932 if (proto) { 10933 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 10934 if (proto->isVariadic()) 10935 return {}; 10936 // Check that the types are compatible with the types that 10937 // would result from default argument promotions (C99 6.7.5.3p15). 10938 // The only types actually affected are promotable integer 10939 // types and floats, which would be passed as a different 10940 // type depending on whether the prototype is visible. 10941 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 10942 QualType paramTy = proto->getParamType(i); 10943 10944 // Look at the converted type of enum types, since that is the type used 10945 // to pass enum values. 10946 if (const auto *Enum = paramTy->getAs<EnumType>()) { 10947 paramTy = Enum->getDecl()->getIntegerType(); 10948 if (paramTy.isNull()) 10949 return {}; 10950 } 10951 10952 if (isPromotableIntegerType(paramTy) || 10953 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 10954 return {}; 10955 } 10956 10957 if (allLTypes) return lhs; 10958 if (allRTypes) return rhs; 10959 10960 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 10961 EPI.ExtInfo = einfo; 10962 if (MergedFX) 10963 EPI.FunctionEffects = *MergedFX; 10964 return getFunctionType(retType, proto->getParamTypes(), EPI); 10965 } 10966 10967 if (allLTypes) return lhs; 10968 if (allRTypes) return rhs; 10969 return getFunctionNoProtoType(retType, einfo); 10970 } 10971 10972 /// Given that we have an enum type and a non-enum type, try to merge them. 10973 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 10974 QualType other, bool isBlockReturnType) { 10975 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 10976 // a signed integer type, or an unsigned integer type. 10977 // Compatibility is based on the underlying type, not the promotion 10978 // type. 10979 QualType underlyingType = ET->getDecl()->getIntegerType(); 10980 if (underlyingType.isNull()) 10981 return {}; 10982 if (Context.hasSameType(underlyingType, other)) 10983 return other; 10984 10985 // In block return types, we're more permissive and accept any 10986 // integral type of the same size. 10987 if (isBlockReturnType && other->isIntegerType() && 10988 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 10989 return other; 10990 10991 return {}; 10992 } 10993 10994 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer, 10995 bool Unqualified, bool BlockReturnType, 10996 bool IsConditionalOperator) { 10997 // For C++ we will not reach this code with reference types (see below), 10998 // for OpenMP variant call overloading we might. 10999 // 11000 // C++ [expr]: If an expression initially has the type "reference to T", the 11001 // type is adjusted to "T" prior to any further analysis, the expression 11002 // designates the object or function denoted by the reference, and the 11003 // expression is an lvalue unless the reference is an rvalue reference and 11004 // the expression is a function call (possibly inside parentheses). 11005 auto *LHSRefTy = LHS->getAs<ReferenceType>(); 11006 auto *RHSRefTy = RHS->getAs<ReferenceType>(); 11007 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy && 11008 LHS->getTypeClass() == RHS->getTypeClass()) 11009 return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(), 11010 OfBlockPointer, Unqualified, BlockReturnType); 11011 if (LHSRefTy || RHSRefTy) 11012 return {}; 11013 11014 if (Unqualified) { 11015 LHS = LHS.getUnqualifiedType(); 11016 RHS = RHS.getUnqualifiedType(); 11017 } 11018 11019 QualType LHSCan = getCanonicalType(LHS), 11020 RHSCan = getCanonicalType(RHS); 11021 11022 // If two types are identical, they are compatible. 11023 if (LHSCan == RHSCan) 11024 return LHS; 11025 11026 // If the qualifiers are different, the types aren't compatible... mostly. 11027 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 11028 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 11029 if (LQuals != RQuals) { 11030 // If any of these qualifiers are different, we have a type 11031 // mismatch. 11032 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 11033 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 11034 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 11035 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 11036 return {}; 11037 11038 // Exactly one GC qualifier difference is allowed: __strong is 11039 // okay if the other type has no GC qualifier but is an Objective 11040 // C object pointer (i.e. implicitly strong by default). We fix 11041 // this by pretending that the unqualified type was actually 11042 // qualified __strong. 11043 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 11044 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 11045 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 11046 11047 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 11048 return {}; 11049 11050 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 11051 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 11052 } 11053 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 11054 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 11055 } 11056 return {}; 11057 } 11058 11059 // Okay, qualifiers are equal. 11060 11061 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 11062 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 11063 11064 // We want to consider the two function types to be the same for these 11065 // comparisons, just force one to the other. 11066 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 11067 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 11068 11069 // Same as above for arrays 11070 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 11071 LHSClass = Type::ConstantArray; 11072 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 11073 RHSClass = Type::ConstantArray; 11074 11075 // ObjCInterfaces are just specialized ObjCObjects. 11076 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 11077 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 11078 11079 // Canonicalize ExtVector -> Vector. 11080 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 11081 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 11082 11083 // If the canonical type classes don't match. 11084 if (LHSClass != RHSClass) { 11085 // Note that we only have special rules for turning block enum 11086 // returns into block int returns, not vice-versa. 11087 if (const auto *ETy = LHS->getAs<EnumType>()) { 11088 return mergeEnumWithInteger(*this, ETy, RHS, false); 11089 } 11090 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 11091 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 11092 } 11093 // allow block pointer type to match an 'id' type. 11094 if (OfBlockPointer && !BlockReturnType) { 11095 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 11096 return LHS; 11097 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 11098 return RHS; 11099 } 11100 // Allow __auto_type to match anything; it merges to the type with more 11101 // information. 11102 if (const auto *AT = LHS->getAs<AutoType>()) { 11103 if (!AT->isDeduced() && AT->isGNUAutoType()) 11104 return RHS; 11105 } 11106 if (const auto *AT = RHS->getAs<AutoType>()) { 11107 if (!AT->isDeduced() && AT->isGNUAutoType()) 11108 return LHS; 11109 } 11110 return {}; 11111 } 11112 11113 // The canonical type classes match. 11114 switch (LHSClass) { 11115 #define TYPE(Class, Base) 11116 #define ABSTRACT_TYPE(Class, Base) 11117 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 11118 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 11119 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 11120 #include "clang/AST/TypeNodes.inc" 11121 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 11122 11123 case Type::Auto: 11124 case Type::DeducedTemplateSpecialization: 11125 case Type::LValueReference: 11126 case Type::RValueReference: 11127 case Type::MemberPointer: 11128 llvm_unreachable("C++ should never be in mergeTypes"); 11129 11130 case Type::ObjCInterface: 11131 case Type::IncompleteArray: 11132 case Type::VariableArray: 11133 case Type::FunctionProto: 11134 case Type::ExtVector: 11135 llvm_unreachable("Types are eliminated above"); 11136 11137 case Type::Pointer: 11138 { 11139 // Merge two pointer types, while trying to preserve typedef info 11140 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 11141 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 11142 if (Unqualified) { 11143 LHSPointee = LHSPointee.getUnqualifiedType(); 11144 RHSPointee = RHSPointee.getUnqualifiedType(); 11145 } 11146 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 11147 Unqualified); 11148 if (ResultType.isNull()) 11149 return {}; 11150 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 11151 return LHS; 11152 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 11153 return RHS; 11154 return getPointerType(ResultType); 11155 } 11156 case Type::BlockPointer: 11157 { 11158 // Merge two block pointer types, while trying to preserve typedef info 11159 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 11160 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 11161 if (Unqualified) { 11162 LHSPointee = LHSPointee.getUnqualifiedType(); 11163 RHSPointee = RHSPointee.getUnqualifiedType(); 11164 } 11165 if (getLangOpts().OpenCL) { 11166 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 11167 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 11168 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 11169 // 6.12.5) thus the following check is asymmetric. 11170 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 11171 return {}; 11172 LHSPteeQual.removeAddressSpace(); 11173 RHSPteeQual.removeAddressSpace(); 11174 LHSPointee = 11175 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 11176 RHSPointee = 11177 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 11178 } 11179 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 11180 Unqualified); 11181 if (ResultType.isNull()) 11182 return {}; 11183 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 11184 return LHS; 11185 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 11186 return RHS; 11187 return getBlockPointerType(ResultType); 11188 } 11189 case Type::Atomic: 11190 { 11191 // Merge two pointer types, while trying to preserve typedef info 11192 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 11193 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 11194 if (Unqualified) { 11195 LHSValue = LHSValue.getUnqualifiedType(); 11196 RHSValue = RHSValue.getUnqualifiedType(); 11197 } 11198 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 11199 Unqualified); 11200 if (ResultType.isNull()) 11201 return {}; 11202 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 11203 return LHS; 11204 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 11205 return RHS; 11206 return getAtomicType(ResultType); 11207 } 11208 case Type::ConstantArray: 11209 { 11210 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 11211 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 11212 if (LCAT && RCAT && RCAT->getZExtSize() != LCAT->getZExtSize()) 11213 return {}; 11214 11215 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 11216 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 11217 if (Unqualified) { 11218 LHSElem = LHSElem.getUnqualifiedType(); 11219 RHSElem = RHSElem.getUnqualifiedType(); 11220 } 11221 11222 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 11223 if (ResultType.isNull()) 11224 return {}; 11225 11226 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 11227 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 11228 11229 // If either side is a variable array, and both are complete, check whether 11230 // the current dimension is definite. 11231 if (LVAT || RVAT) { 11232 auto SizeFetch = [this](const VariableArrayType* VAT, 11233 const ConstantArrayType* CAT) 11234 -> std::pair<bool,llvm::APInt> { 11235 if (VAT) { 11236 std::optional<llvm::APSInt> TheInt; 11237 Expr *E = VAT->getSizeExpr(); 11238 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 11239 return std::make_pair(true, *TheInt); 11240 return std::make_pair(false, llvm::APSInt()); 11241 } 11242 if (CAT) 11243 return std::make_pair(true, CAT->getSize()); 11244 return std::make_pair(false, llvm::APInt()); 11245 }; 11246 11247 bool HaveLSize, HaveRSize; 11248 llvm::APInt LSize, RSize; 11249 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 11250 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 11251 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 11252 return {}; // Definite, but unequal, array dimension 11253 } 11254 11255 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 11256 return LHS; 11257 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 11258 return RHS; 11259 if (LCAT) 11260 return getConstantArrayType(ResultType, LCAT->getSize(), 11261 LCAT->getSizeExpr(), ArraySizeModifier(), 0); 11262 if (RCAT) 11263 return getConstantArrayType(ResultType, RCAT->getSize(), 11264 RCAT->getSizeExpr(), ArraySizeModifier(), 0); 11265 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 11266 return LHS; 11267 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 11268 return RHS; 11269 if (LVAT) { 11270 // FIXME: This isn't correct! But tricky to implement because 11271 // the array's size has to be the size of LHS, but the type 11272 // has to be different. 11273 return LHS; 11274 } 11275 if (RVAT) { 11276 // FIXME: This isn't correct! But tricky to implement because 11277 // the array's size has to be the size of RHS, but the type 11278 // has to be different. 11279 return RHS; 11280 } 11281 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 11282 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 11283 return getIncompleteArrayType(ResultType, ArraySizeModifier(), 0); 11284 } 11285 case Type::FunctionNoProto: 11286 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified, 11287 /*AllowCXX=*/false, IsConditionalOperator); 11288 case Type::Record: 11289 case Type::Enum: 11290 return {}; 11291 case Type::Builtin: 11292 // Only exactly equal builtin types are compatible, which is tested above. 11293 return {}; 11294 case Type::Complex: 11295 // Distinct complex types are incompatible. 11296 return {}; 11297 case Type::Vector: 11298 // FIXME: The merged type should be an ExtVector! 11299 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 11300 RHSCan->castAs<VectorType>())) 11301 return LHS; 11302 return {}; 11303 case Type::ConstantMatrix: 11304 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 11305 RHSCan->castAs<ConstantMatrixType>())) 11306 return LHS; 11307 return {}; 11308 case Type::ObjCObject: { 11309 // Check if the types are assignment compatible. 11310 // FIXME: This should be type compatibility, e.g. whether 11311 // "LHS x; RHS x;" at global scope is legal. 11312 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 11313 RHS->castAs<ObjCObjectType>())) 11314 return LHS; 11315 return {}; 11316 } 11317 case Type::ObjCObjectPointer: 11318 if (OfBlockPointer) { 11319 if (canAssignObjCInterfacesInBlockPointer( 11320 LHS->castAs<ObjCObjectPointerType>(), 11321 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 11322 return LHS; 11323 return {}; 11324 } 11325 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 11326 RHS->castAs<ObjCObjectPointerType>())) 11327 return LHS; 11328 return {}; 11329 case Type::Pipe: 11330 assert(LHS != RHS && 11331 "Equivalent pipe types should have already been handled!"); 11332 return {}; 11333 case Type::ArrayParameter: 11334 assert(LHS != RHS && 11335 "Equivalent ArrayParameter types should have already been handled!"); 11336 return {}; 11337 case Type::BitInt: { 11338 // Merge two bit-precise int types, while trying to preserve typedef info. 11339 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); 11340 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned(); 11341 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits(); 11342 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits(); 11343 11344 // Like unsigned/int, shouldn't have a type if they don't match. 11345 if (LHSUnsigned != RHSUnsigned) 11346 return {}; 11347 11348 if (LHSBits != RHSBits) 11349 return {}; 11350 return LHS; 11351 } 11352 } 11353 11354 llvm_unreachable("Invalid Type::Class!"); 11355 } 11356 11357 bool ASTContext::mergeExtParameterInfo( 11358 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 11359 bool &CanUseFirst, bool &CanUseSecond, 11360 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 11361 assert(NewParamInfos.empty() && "param info list not empty"); 11362 CanUseFirst = CanUseSecond = true; 11363 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 11364 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 11365 11366 // Fast path: if the first type doesn't have ext parameter infos, 11367 // we match if and only if the second type also doesn't have them. 11368 if (!FirstHasInfo && !SecondHasInfo) 11369 return true; 11370 11371 bool NeedParamInfo = false; 11372 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 11373 : SecondFnType->getExtParameterInfos().size(); 11374 11375 for (size_t I = 0; I < E; ++I) { 11376 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 11377 if (FirstHasInfo) 11378 FirstParam = FirstFnType->getExtParameterInfo(I); 11379 if (SecondHasInfo) 11380 SecondParam = SecondFnType->getExtParameterInfo(I); 11381 11382 // Cannot merge unless everything except the noescape flag matches. 11383 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 11384 return false; 11385 11386 bool FirstNoEscape = FirstParam.isNoEscape(); 11387 bool SecondNoEscape = SecondParam.isNoEscape(); 11388 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 11389 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 11390 if (NewParamInfos.back().getOpaqueValue()) 11391 NeedParamInfo = true; 11392 if (FirstNoEscape != IsNoEscape) 11393 CanUseFirst = false; 11394 if (SecondNoEscape != IsNoEscape) 11395 CanUseSecond = false; 11396 } 11397 11398 if (!NeedParamInfo) 11399 NewParamInfos.clear(); 11400 11401 return true; 11402 } 11403 11404 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 11405 ObjCLayouts[CD] = nullptr; 11406 } 11407 11408 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 11409 /// 'RHS' attributes and returns the merged version; including for function 11410 /// return types. 11411 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 11412 QualType LHSCan = getCanonicalType(LHS), 11413 RHSCan = getCanonicalType(RHS); 11414 // If two types are identical, they are compatible. 11415 if (LHSCan == RHSCan) 11416 return LHS; 11417 if (RHSCan->isFunctionType()) { 11418 if (!LHSCan->isFunctionType()) 11419 return {}; 11420 QualType OldReturnType = 11421 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 11422 QualType NewReturnType = 11423 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 11424 QualType ResReturnType = 11425 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 11426 if (ResReturnType.isNull()) 11427 return {}; 11428 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 11429 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 11430 // In either case, use OldReturnType to build the new function type. 11431 const auto *F = LHS->castAs<FunctionType>(); 11432 if (const auto *FPT = cast<FunctionProtoType>(F)) { 11433 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 11434 EPI.ExtInfo = getFunctionExtInfo(LHS); 11435 QualType ResultType = 11436 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 11437 return ResultType; 11438 } 11439 } 11440 return {}; 11441 } 11442 11443 // If the qualifiers are different, the types can still be merged. 11444 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 11445 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 11446 if (LQuals != RQuals) { 11447 // If any of these qualifiers are different, we have a type mismatch. 11448 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 11449 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 11450 return {}; 11451 11452 // Exactly one GC qualifier difference is allowed: __strong is 11453 // okay if the other type has no GC qualifier but is an Objective 11454 // C object pointer (i.e. implicitly strong by default). We fix 11455 // this by pretending that the unqualified type was actually 11456 // qualified __strong. 11457 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 11458 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 11459 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 11460 11461 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 11462 return {}; 11463 11464 if (GC_L == Qualifiers::Strong) 11465 return LHS; 11466 if (GC_R == Qualifiers::Strong) 11467 return RHS; 11468 return {}; 11469 } 11470 11471 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 11472 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 11473 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 11474 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 11475 if (ResQT == LHSBaseQT) 11476 return LHS; 11477 if (ResQT == RHSBaseQT) 11478 return RHS; 11479 } 11480 return {}; 11481 } 11482 11483 //===----------------------------------------------------------------------===// 11484 // Integer Predicates 11485 //===----------------------------------------------------------------------===// 11486 11487 unsigned ASTContext::getIntWidth(QualType T) const { 11488 if (const auto *ET = T->getAs<EnumType>()) 11489 T = ET->getDecl()->getIntegerType(); 11490 if (T->isBooleanType()) 11491 return 1; 11492 if (const auto *EIT = T->getAs<BitIntType>()) 11493 return EIT->getNumBits(); 11494 // For builtin types, just use the standard type sizing method 11495 return (unsigned)getTypeSize(T); 11496 } 11497 11498 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 11499 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 11500 T->isFixedPointType()) && 11501 "Unexpected type"); 11502 11503 // Turn <4 x signed int> -> <4 x unsigned int> 11504 if (const auto *VTy = T->getAs<VectorType>()) 11505 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 11506 VTy->getNumElements(), VTy->getVectorKind()); 11507 11508 // For _BitInt, return an unsigned _BitInt with same width. 11509 if (const auto *EITy = T->getAs<BitIntType>()) 11510 return getBitIntType(/*Unsigned=*/true, EITy->getNumBits()); 11511 11512 // For enums, get the underlying integer type of the enum, and let the general 11513 // integer type signchanging code handle it. 11514 if (const auto *ETy = T->getAs<EnumType>()) 11515 T = ETy->getDecl()->getIntegerType(); 11516 11517 switch (T->castAs<BuiltinType>()->getKind()) { 11518 case BuiltinType::Char_U: 11519 // Plain `char` is mapped to `unsigned char` even if it's already unsigned 11520 case BuiltinType::Char_S: 11521 case BuiltinType::SChar: 11522 case BuiltinType::Char8: 11523 return UnsignedCharTy; 11524 case BuiltinType::Short: 11525 return UnsignedShortTy; 11526 case BuiltinType::Int: 11527 return UnsignedIntTy; 11528 case BuiltinType::Long: 11529 return UnsignedLongTy; 11530 case BuiltinType::LongLong: 11531 return UnsignedLongLongTy; 11532 case BuiltinType::Int128: 11533 return UnsignedInt128Ty; 11534 // wchar_t is special. It is either signed or not, but when it's signed, 11535 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 11536 // version of its underlying type instead. 11537 case BuiltinType::WChar_S: 11538 return getUnsignedWCharType(); 11539 11540 case BuiltinType::ShortAccum: 11541 return UnsignedShortAccumTy; 11542 case BuiltinType::Accum: 11543 return UnsignedAccumTy; 11544 case BuiltinType::LongAccum: 11545 return UnsignedLongAccumTy; 11546 case BuiltinType::SatShortAccum: 11547 return SatUnsignedShortAccumTy; 11548 case BuiltinType::SatAccum: 11549 return SatUnsignedAccumTy; 11550 case BuiltinType::SatLongAccum: 11551 return SatUnsignedLongAccumTy; 11552 case BuiltinType::ShortFract: 11553 return UnsignedShortFractTy; 11554 case BuiltinType::Fract: 11555 return UnsignedFractTy; 11556 case BuiltinType::LongFract: 11557 return UnsignedLongFractTy; 11558 case BuiltinType::SatShortFract: 11559 return SatUnsignedShortFractTy; 11560 case BuiltinType::SatFract: 11561 return SatUnsignedFractTy; 11562 case BuiltinType::SatLongFract: 11563 return SatUnsignedLongFractTy; 11564 default: 11565 assert((T->hasUnsignedIntegerRepresentation() || 11566 T->isUnsignedFixedPointType()) && 11567 "Unexpected signed integer or fixed point type"); 11568 return T; 11569 } 11570 } 11571 11572 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 11573 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 11574 T->isFixedPointType()) && 11575 "Unexpected type"); 11576 11577 // Turn <4 x unsigned int> -> <4 x signed int> 11578 if (const auto *VTy = T->getAs<VectorType>()) 11579 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 11580 VTy->getNumElements(), VTy->getVectorKind()); 11581 11582 // For _BitInt, return a signed _BitInt with same width. 11583 if (const auto *EITy = T->getAs<BitIntType>()) 11584 return getBitIntType(/*Unsigned=*/false, EITy->getNumBits()); 11585 11586 // For enums, get the underlying integer type of the enum, and let the general 11587 // integer type signchanging code handle it. 11588 if (const auto *ETy = T->getAs<EnumType>()) 11589 T = ETy->getDecl()->getIntegerType(); 11590 11591 switch (T->castAs<BuiltinType>()->getKind()) { 11592 case BuiltinType::Char_S: 11593 // Plain `char` is mapped to `signed char` even if it's already signed 11594 case BuiltinType::Char_U: 11595 case BuiltinType::UChar: 11596 case BuiltinType::Char8: 11597 return SignedCharTy; 11598 case BuiltinType::UShort: 11599 return ShortTy; 11600 case BuiltinType::UInt: 11601 return IntTy; 11602 case BuiltinType::ULong: 11603 return LongTy; 11604 case BuiltinType::ULongLong: 11605 return LongLongTy; 11606 case BuiltinType::UInt128: 11607 return Int128Ty; 11608 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 11609 // there's no matching "signed wchar_t". Therefore we return the signed 11610 // version of its underlying type instead. 11611 case BuiltinType::WChar_U: 11612 return getSignedWCharType(); 11613 11614 case BuiltinType::UShortAccum: 11615 return ShortAccumTy; 11616 case BuiltinType::UAccum: 11617 return AccumTy; 11618 case BuiltinType::ULongAccum: 11619 return LongAccumTy; 11620 case BuiltinType::SatUShortAccum: 11621 return SatShortAccumTy; 11622 case BuiltinType::SatUAccum: 11623 return SatAccumTy; 11624 case BuiltinType::SatULongAccum: 11625 return SatLongAccumTy; 11626 case BuiltinType::UShortFract: 11627 return ShortFractTy; 11628 case BuiltinType::UFract: 11629 return FractTy; 11630 case BuiltinType::ULongFract: 11631 return LongFractTy; 11632 case BuiltinType::SatUShortFract: 11633 return SatShortFractTy; 11634 case BuiltinType::SatUFract: 11635 return SatFractTy; 11636 case BuiltinType::SatULongFract: 11637 return SatLongFractTy; 11638 default: 11639 assert( 11640 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 11641 "Unexpected signed integer or fixed point type"); 11642 return T; 11643 } 11644 } 11645 11646 ASTMutationListener::~ASTMutationListener() = default; 11647 11648 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 11649 QualType ReturnType) {} 11650 11651 //===----------------------------------------------------------------------===// 11652 // Builtin Type Computation 11653 //===----------------------------------------------------------------------===// 11654 11655 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 11656 /// pointer over the consumed characters. This returns the resultant type. If 11657 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 11658 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 11659 /// a vector of "i*". 11660 /// 11661 /// RequiresICE is filled in on return to indicate whether the value is required 11662 /// to be an Integer Constant Expression. 11663 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 11664 ASTContext::GetBuiltinTypeError &Error, 11665 bool &RequiresICE, 11666 bool AllowTypeModifiers) { 11667 // Modifiers. 11668 int HowLong = 0; 11669 bool Signed = false, Unsigned = false; 11670 RequiresICE = false; 11671 11672 // Read the prefixed modifiers first. 11673 bool Done = false; 11674 #ifndef NDEBUG 11675 bool IsSpecial = false; 11676 #endif 11677 while (!Done) { 11678 switch (*Str++) { 11679 default: Done = true; --Str; break; 11680 case 'I': 11681 RequiresICE = true; 11682 break; 11683 case 'S': 11684 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 11685 assert(!Signed && "Can't use 'S' modifier multiple times!"); 11686 Signed = true; 11687 break; 11688 case 'U': 11689 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 11690 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 11691 Unsigned = true; 11692 break; 11693 case 'L': 11694 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 11695 assert(HowLong <= 2 && "Can't have LLLL modifier"); 11696 ++HowLong; 11697 break; 11698 case 'N': 11699 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 11700 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11701 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 11702 #ifndef NDEBUG 11703 IsSpecial = true; 11704 #endif 11705 if (Context.getTargetInfo().getLongWidth() == 32) 11706 ++HowLong; 11707 break; 11708 case 'W': 11709 // This modifier represents int64 type. 11710 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11711 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 11712 #ifndef NDEBUG 11713 IsSpecial = true; 11714 #endif 11715 switch (Context.getTargetInfo().getInt64Type()) { 11716 default: 11717 llvm_unreachable("Unexpected integer type"); 11718 case TargetInfo::SignedLong: 11719 HowLong = 1; 11720 break; 11721 case TargetInfo::SignedLongLong: 11722 HowLong = 2; 11723 break; 11724 } 11725 break; 11726 case 'Z': 11727 // This modifier represents int32 type. 11728 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11729 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 11730 #ifndef NDEBUG 11731 IsSpecial = true; 11732 #endif 11733 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 11734 default: 11735 llvm_unreachable("Unexpected integer type"); 11736 case TargetInfo::SignedInt: 11737 HowLong = 0; 11738 break; 11739 case TargetInfo::SignedLong: 11740 HowLong = 1; 11741 break; 11742 case TargetInfo::SignedLongLong: 11743 HowLong = 2; 11744 break; 11745 } 11746 break; 11747 case 'O': 11748 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11749 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 11750 #ifndef NDEBUG 11751 IsSpecial = true; 11752 #endif 11753 if (Context.getLangOpts().OpenCL) 11754 HowLong = 1; 11755 else 11756 HowLong = 2; 11757 break; 11758 } 11759 } 11760 11761 QualType Type; 11762 11763 // Read the base type. 11764 switch (*Str++) { 11765 default: llvm_unreachable("Unknown builtin type letter!"); 11766 case 'x': 11767 assert(HowLong == 0 && !Signed && !Unsigned && 11768 "Bad modifiers used with 'x'!"); 11769 Type = Context.Float16Ty; 11770 break; 11771 case 'y': 11772 assert(HowLong == 0 && !Signed && !Unsigned && 11773 "Bad modifiers used with 'y'!"); 11774 Type = Context.BFloat16Ty; 11775 break; 11776 case 'v': 11777 assert(HowLong == 0 && !Signed && !Unsigned && 11778 "Bad modifiers used with 'v'!"); 11779 Type = Context.VoidTy; 11780 break; 11781 case 'h': 11782 assert(HowLong == 0 && !Signed && !Unsigned && 11783 "Bad modifiers used with 'h'!"); 11784 Type = Context.HalfTy; 11785 break; 11786 case 'f': 11787 assert(HowLong == 0 && !Signed && !Unsigned && 11788 "Bad modifiers used with 'f'!"); 11789 Type = Context.FloatTy; 11790 break; 11791 case 'd': 11792 assert(HowLong < 3 && !Signed && !Unsigned && 11793 "Bad modifiers used with 'd'!"); 11794 if (HowLong == 1) 11795 Type = Context.LongDoubleTy; 11796 else if (HowLong == 2) 11797 Type = Context.Float128Ty; 11798 else 11799 Type = Context.DoubleTy; 11800 break; 11801 case 's': 11802 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 11803 if (Unsigned) 11804 Type = Context.UnsignedShortTy; 11805 else 11806 Type = Context.ShortTy; 11807 break; 11808 case 'i': 11809 if (HowLong == 3) 11810 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 11811 else if (HowLong == 2) 11812 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 11813 else if (HowLong == 1) 11814 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 11815 else 11816 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 11817 break; 11818 case 'c': 11819 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 11820 if (Signed) 11821 Type = Context.SignedCharTy; 11822 else if (Unsigned) 11823 Type = Context.UnsignedCharTy; 11824 else 11825 Type = Context.CharTy; 11826 break; 11827 case 'b': // boolean 11828 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 11829 Type = Context.BoolTy; 11830 break; 11831 case 'z': // size_t. 11832 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 11833 Type = Context.getSizeType(); 11834 break; 11835 case 'w': // wchar_t. 11836 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 11837 Type = Context.getWideCharType(); 11838 break; 11839 case 'F': 11840 Type = Context.getCFConstantStringType(); 11841 break; 11842 case 'G': 11843 Type = Context.getObjCIdType(); 11844 break; 11845 case 'H': 11846 Type = Context.getObjCSelType(); 11847 break; 11848 case 'M': 11849 Type = Context.getObjCSuperType(); 11850 break; 11851 case 'a': 11852 Type = Context.getBuiltinVaListType(); 11853 assert(!Type.isNull() && "builtin va list type not initialized!"); 11854 break; 11855 case 'A': 11856 // This is a "reference" to a va_list; however, what exactly 11857 // this means depends on how va_list is defined. There are two 11858 // different kinds of va_list: ones passed by value, and ones 11859 // passed by reference. An example of a by-value va_list is 11860 // x86, where va_list is a char*. An example of by-ref va_list 11861 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 11862 // we want this argument to be a char*&; for x86-64, we want 11863 // it to be a __va_list_tag*. 11864 Type = Context.getBuiltinVaListType(); 11865 assert(!Type.isNull() && "builtin va list type not initialized!"); 11866 if (Type->isArrayType()) 11867 Type = Context.getArrayDecayedType(Type); 11868 else 11869 Type = Context.getLValueReferenceType(Type); 11870 break; 11871 case 'q': { 11872 char *End; 11873 unsigned NumElements = strtoul(Str, &End, 10); 11874 assert(End != Str && "Missing vector size"); 11875 Str = End; 11876 11877 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11878 RequiresICE, false); 11879 assert(!RequiresICE && "Can't require vector ICE"); 11880 11881 Type = Context.getScalableVectorType(ElementType, NumElements); 11882 break; 11883 } 11884 case 'Q': { 11885 switch (*Str++) { 11886 case 'a': { 11887 Type = Context.SveCountTy; 11888 break; 11889 } 11890 case 'b': { 11891 Type = Context.AMDGPUBufferRsrcTy; 11892 break; 11893 } 11894 default: 11895 llvm_unreachable("Unexpected target builtin type"); 11896 } 11897 break; 11898 } 11899 case 'V': { 11900 char *End; 11901 unsigned NumElements = strtoul(Str, &End, 10); 11902 assert(End != Str && "Missing vector size"); 11903 Str = End; 11904 11905 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11906 RequiresICE, false); 11907 assert(!RequiresICE && "Can't require vector ICE"); 11908 11909 // TODO: No way to make AltiVec vectors in builtins yet. 11910 Type = Context.getVectorType(ElementType, NumElements, VectorKind::Generic); 11911 break; 11912 } 11913 case 'E': { 11914 char *End; 11915 11916 unsigned NumElements = strtoul(Str, &End, 10); 11917 assert(End != Str && "Missing vector size"); 11918 11919 Str = End; 11920 11921 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11922 false); 11923 Type = Context.getExtVectorType(ElementType, NumElements); 11924 break; 11925 } 11926 case 'X': { 11927 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11928 false); 11929 assert(!RequiresICE && "Can't require complex ICE"); 11930 Type = Context.getComplexType(ElementType); 11931 break; 11932 } 11933 case 'Y': 11934 Type = Context.getPointerDiffType(); 11935 break; 11936 case 'P': 11937 Type = Context.getFILEType(); 11938 if (Type.isNull()) { 11939 Error = ASTContext::GE_Missing_stdio; 11940 return {}; 11941 } 11942 break; 11943 case 'J': 11944 if (Signed) 11945 Type = Context.getsigjmp_bufType(); 11946 else 11947 Type = Context.getjmp_bufType(); 11948 11949 if (Type.isNull()) { 11950 Error = ASTContext::GE_Missing_setjmp; 11951 return {}; 11952 } 11953 break; 11954 case 'K': 11955 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 11956 Type = Context.getucontext_tType(); 11957 11958 if (Type.isNull()) { 11959 Error = ASTContext::GE_Missing_ucontext; 11960 return {}; 11961 } 11962 break; 11963 case 'p': 11964 Type = Context.getProcessIDType(); 11965 break; 11966 } 11967 11968 // If there are modifiers and if we're allowed to parse them, go for it. 11969 Done = !AllowTypeModifiers; 11970 while (!Done) { 11971 switch (char c = *Str++) { 11972 default: Done = true; --Str; break; 11973 case '*': 11974 case '&': { 11975 // Both pointers and references can have their pointee types 11976 // qualified with an address space. 11977 char *End; 11978 unsigned AddrSpace = strtoul(Str, &End, 10); 11979 if (End != Str) { 11980 // Note AddrSpace == 0 is not the same as an unspecified address space. 11981 Type = Context.getAddrSpaceQualType( 11982 Type, 11983 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 11984 Str = End; 11985 } 11986 if (c == '*') 11987 Type = Context.getPointerType(Type); 11988 else 11989 Type = Context.getLValueReferenceType(Type); 11990 break; 11991 } 11992 // FIXME: There's no way to have a built-in with an rvalue ref arg. 11993 case 'C': 11994 Type = Type.withConst(); 11995 break; 11996 case 'D': 11997 Type = Context.getVolatileType(Type); 11998 break; 11999 case 'R': 12000 Type = Type.withRestrict(); 12001 break; 12002 } 12003 } 12004 12005 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 12006 "Integer constant 'I' type must be an integer"); 12007 12008 return Type; 12009 } 12010 12011 // On some targets such as PowerPC, some of the builtins are defined with custom 12012 // type descriptors for target-dependent types. These descriptors are decoded in 12013 // other functions, but it may be useful to be able to fall back to default 12014 // descriptor decoding to define builtins mixing target-dependent and target- 12015 // independent types. This function allows decoding one type descriptor with 12016 // default decoding. 12017 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 12018 GetBuiltinTypeError &Error, bool &RequireICE, 12019 bool AllowTypeModifiers) const { 12020 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 12021 } 12022 12023 /// GetBuiltinType - Return the type for the specified builtin. 12024 QualType ASTContext::GetBuiltinType(unsigned Id, 12025 GetBuiltinTypeError &Error, 12026 unsigned *IntegerConstantArgs) const { 12027 const char *TypeStr = BuiltinInfo.getTypeString(Id); 12028 if (TypeStr[0] == '\0') { 12029 Error = GE_Missing_type; 12030 return {}; 12031 } 12032 12033 SmallVector<QualType, 8> ArgTypes; 12034 12035 bool RequiresICE = false; 12036 Error = GE_None; 12037 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 12038 RequiresICE, true); 12039 if (Error != GE_None) 12040 return {}; 12041 12042 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 12043 12044 while (TypeStr[0] && TypeStr[0] != '.') { 12045 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 12046 if (Error != GE_None) 12047 return {}; 12048 12049 // If this argument is required to be an IntegerConstantExpression and the 12050 // caller cares, fill in the bitmask we return. 12051 if (RequiresICE && IntegerConstantArgs) 12052 *IntegerConstantArgs |= 1 << ArgTypes.size(); 12053 12054 // Do array -> pointer decay. The builtin should use the decayed type. 12055 if (Ty->isArrayType()) 12056 Ty = getArrayDecayedType(Ty); 12057 12058 ArgTypes.push_back(Ty); 12059 } 12060 12061 if (Id == Builtin::BI__GetExceptionInfo) 12062 return {}; 12063 12064 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 12065 "'.' should only occur at end of builtin type list!"); 12066 12067 bool Variadic = (TypeStr[0] == '.'); 12068 12069 FunctionType::ExtInfo EI(getDefaultCallingConvention( 12070 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 12071 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 12072 12073 12074 // We really shouldn't be making a no-proto type here. 12075 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes()) 12076 return getFunctionNoProtoType(ResType, EI); 12077 12078 FunctionProtoType::ExtProtoInfo EPI; 12079 EPI.ExtInfo = EI; 12080 EPI.Variadic = Variadic; 12081 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 12082 EPI.ExceptionSpec.Type = 12083 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 12084 12085 return getFunctionType(ResType, ArgTypes, EPI); 12086 } 12087 12088 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 12089 const FunctionDecl *FD) { 12090 if (!FD->isExternallyVisible()) 12091 return GVA_Internal; 12092 12093 // Non-user-provided functions get emitted as weak definitions with every 12094 // use, no matter whether they've been explicitly instantiated etc. 12095 if (!FD->isUserProvided()) 12096 return GVA_DiscardableODR; 12097 12098 GVALinkage External; 12099 switch (FD->getTemplateSpecializationKind()) { 12100 case TSK_Undeclared: 12101 case TSK_ExplicitSpecialization: 12102 External = GVA_StrongExternal; 12103 break; 12104 12105 case TSK_ExplicitInstantiationDefinition: 12106 return GVA_StrongODR; 12107 12108 // C++11 [temp.explicit]p10: 12109 // [ Note: The intent is that an inline function that is the subject of 12110 // an explicit instantiation declaration will still be implicitly 12111 // instantiated when used so that the body can be considered for 12112 // inlining, but that no out-of-line copy of the inline function would be 12113 // generated in the translation unit. -- end note ] 12114 case TSK_ExplicitInstantiationDeclaration: 12115 return GVA_AvailableExternally; 12116 12117 case TSK_ImplicitInstantiation: 12118 External = GVA_DiscardableODR; 12119 break; 12120 } 12121 12122 if (!FD->isInlined()) 12123 return External; 12124 12125 if ((!Context.getLangOpts().CPlusPlus && 12126 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 12127 !FD->hasAttr<DLLExportAttr>()) || 12128 FD->hasAttr<GNUInlineAttr>()) { 12129 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 12130 12131 // GNU or C99 inline semantics. Determine whether this symbol should be 12132 // externally visible. 12133 if (FD->isInlineDefinitionExternallyVisible()) 12134 return External; 12135 12136 // C99 inline semantics, where the symbol is not externally visible. 12137 return GVA_AvailableExternally; 12138 } 12139 12140 // Functions specified with extern and inline in -fms-compatibility mode 12141 // forcibly get emitted. While the body of the function cannot be later 12142 // replaced, the function definition cannot be discarded. 12143 if (FD->isMSExternInline()) 12144 return GVA_StrongODR; 12145 12146 if (Context.getTargetInfo().getCXXABI().isMicrosoft() && 12147 isa<CXXConstructorDecl>(FD) && 12148 cast<CXXConstructorDecl>(FD)->isInheritingConstructor()) 12149 // Our approach to inheriting constructors is fundamentally different from 12150 // that used by the MS ABI, so keep our inheriting constructor thunks 12151 // internal rather than trying to pick an unambiguous mangling for them. 12152 return GVA_Internal; 12153 12154 return GVA_DiscardableODR; 12155 } 12156 12157 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 12158 const Decl *D, GVALinkage L) { 12159 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 12160 // dllexport/dllimport on inline functions. 12161 if (D->hasAttr<DLLImportAttr>()) { 12162 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 12163 return GVA_AvailableExternally; 12164 } else if (D->hasAttr<DLLExportAttr>()) { 12165 if (L == GVA_DiscardableODR) 12166 return GVA_StrongODR; 12167 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 12168 // Device-side functions with __global__ attribute must always be 12169 // visible externally so they can be launched from host. 12170 if (D->hasAttr<CUDAGlobalAttr>() && 12171 (L == GVA_DiscardableODR || L == GVA_Internal)) 12172 return GVA_StrongODR; 12173 // Single source offloading languages like CUDA/HIP need to be able to 12174 // access static device variables from host code of the same compilation 12175 // unit. This is done by externalizing the static variable with a shared 12176 // name between the host and device compilation which is the same for the 12177 // same compilation unit whereas different among different compilation 12178 // units. 12179 if (Context.shouldExternalize(D)) 12180 return GVA_StrongExternal; 12181 } 12182 return L; 12183 } 12184 12185 /// Adjust the GVALinkage for a declaration based on what an external AST source 12186 /// knows about whether there can be other definitions of this declaration. 12187 static GVALinkage 12188 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 12189 GVALinkage L) { 12190 ExternalASTSource *Source = Ctx.getExternalSource(); 12191 if (!Source) 12192 return L; 12193 12194 switch (Source->hasExternalDefinitions(D)) { 12195 case ExternalASTSource::EK_Never: 12196 // Other translation units rely on us to provide the definition. 12197 if (L == GVA_DiscardableODR) 12198 return GVA_StrongODR; 12199 break; 12200 12201 case ExternalASTSource::EK_Always: 12202 return GVA_AvailableExternally; 12203 12204 case ExternalASTSource::EK_ReplyHazy: 12205 break; 12206 } 12207 return L; 12208 } 12209 12210 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 12211 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 12212 adjustGVALinkageForAttributes(*this, FD, 12213 basicGVALinkageForFunction(*this, FD))); 12214 } 12215 12216 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 12217 const VarDecl *VD) { 12218 // As an extension for interactive REPLs, make sure constant variables are 12219 // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl 12220 // marking them as internal. 12221 if (Context.getLangOpts().CPlusPlus && 12222 Context.getLangOpts().IncrementalExtensions && 12223 VD->getType().isConstQualified() && 12224 !VD->getType().isVolatileQualified() && !VD->isInline() && 12225 !isa<VarTemplateSpecializationDecl>(VD) && !VD->getDescribedVarTemplate()) 12226 return GVA_DiscardableODR; 12227 12228 if (!VD->isExternallyVisible()) 12229 return GVA_Internal; 12230 12231 if (VD->isStaticLocal()) { 12232 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 12233 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 12234 LexicalContext = LexicalContext->getLexicalParent(); 12235 12236 // ObjC Blocks can create local variables that don't have a FunctionDecl 12237 // LexicalContext. 12238 if (!LexicalContext) 12239 return GVA_DiscardableODR; 12240 12241 // Otherwise, let the static local variable inherit its linkage from the 12242 // nearest enclosing function. 12243 auto StaticLocalLinkage = 12244 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 12245 12246 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 12247 // be emitted in any object with references to the symbol for the object it 12248 // contains, whether inline or out-of-line." 12249 // Similar behavior is observed with MSVC. An alternative ABI could use 12250 // StrongODR/AvailableExternally to match the function, but none are 12251 // known/supported currently. 12252 if (StaticLocalLinkage == GVA_StrongODR || 12253 StaticLocalLinkage == GVA_AvailableExternally) 12254 return GVA_DiscardableODR; 12255 return StaticLocalLinkage; 12256 } 12257 12258 // MSVC treats in-class initialized static data members as definitions. 12259 // By giving them non-strong linkage, out-of-line definitions won't 12260 // cause link errors. 12261 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 12262 return GVA_DiscardableODR; 12263 12264 // Most non-template variables have strong linkage; inline variables are 12265 // linkonce_odr or (occasionally, for compatibility) weak_odr. 12266 GVALinkage StrongLinkage; 12267 switch (Context.getInlineVariableDefinitionKind(VD)) { 12268 case ASTContext::InlineVariableDefinitionKind::None: 12269 StrongLinkage = GVA_StrongExternal; 12270 break; 12271 case ASTContext::InlineVariableDefinitionKind::Weak: 12272 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 12273 StrongLinkage = GVA_DiscardableODR; 12274 break; 12275 case ASTContext::InlineVariableDefinitionKind::Strong: 12276 StrongLinkage = GVA_StrongODR; 12277 break; 12278 } 12279 12280 switch (VD->getTemplateSpecializationKind()) { 12281 case TSK_Undeclared: 12282 return StrongLinkage; 12283 12284 case TSK_ExplicitSpecialization: 12285 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 12286 VD->isStaticDataMember() 12287 ? GVA_StrongODR 12288 : StrongLinkage; 12289 12290 case TSK_ExplicitInstantiationDefinition: 12291 return GVA_StrongODR; 12292 12293 case TSK_ExplicitInstantiationDeclaration: 12294 return GVA_AvailableExternally; 12295 12296 case TSK_ImplicitInstantiation: 12297 return GVA_DiscardableODR; 12298 } 12299 12300 llvm_unreachable("Invalid Linkage!"); 12301 } 12302 12303 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const { 12304 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 12305 adjustGVALinkageForAttributes(*this, VD, 12306 basicGVALinkageForVariable(*this, VD))); 12307 } 12308 12309 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 12310 if (const auto *VD = dyn_cast<VarDecl>(D)) { 12311 if (!VD->isFileVarDecl()) 12312 return false; 12313 // Global named register variables (GNU extension) are never emitted. 12314 if (VD->getStorageClass() == SC_Register) 12315 return false; 12316 if (VD->getDescribedVarTemplate() || 12317 isa<VarTemplatePartialSpecializationDecl>(VD)) 12318 return false; 12319 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 12320 // We never need to emit an uninstantiated function template. 12321 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 12322 return false; 12323 } else if (isa<PragmaCommentDecl>(D)) 12324 return true; 12325 else if (isa<PragmaDetectMismatchDecl>(D)) 12326 return true; 12327 else if (isa<OMPRequiresDecl>(D)) 12328 return true; 12329 else if (isa<OMPThreadPrivateDecl>(D)) 12330 return !D->getDeclContext()->isDependentContext(); 12331 else if (isa<OMPAllocateDecl>(D)) 12332 return !D->getDeclContext()->isDependentContext(); 12333 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 12334 return !D->getDeclContext()->isDependentContext(); 12335 else if (isa<ImportDecl>(D)) 12336 return true; 12337 else 12338 return false; 12339 12340 // If this is a member of a class template, we do not need to emit it. 12341 if (D->getDeclContext()->isDependentContext()) 12342 return false; 12343 12344 // Weak references don't produce any output by themselves. 12345 if (D->hasAttr<WeakRefAttr>()) 12346 return false; 12347 12348 // Aliases and used decls are required. 12349 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 12350 return true; 12351 12352 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 12353 // Forward declarations aren't required. 12354 if (!FD->doesThisDeclarationHaveABody()) 12355 return FD->doesDeclarationForceExternallyVisibleDefinition(); 12356 12357 // Constructors and destructors are required. 12358 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 12359 return true; 12360 12361 // The key function for a class is required. This rule only comes 12362 // into play when inline functions can be key functions, though. 12363 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 12364 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 12365 const CXXRecordDecl *RD = MD->getParent(); 12366 if (MD->isOutOfLine() && RD->isDynamicClass()) { 12367 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 12368 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 12369 return true; 12370 } 12371 } 12372 } 12373 12374 GVALinkage Linkage = GetGVALinkageForFunction(FD); 12375 12376 // static, static inline, always_inline, and extern inline functions can 12377 // always be deferred. Normal inline functions can be deferred in C99/C++. 12378 // Implicit template instantiations can also be deferred in C++. 12379 return !isDiscardableGVALinkage(Linkage); 12380 } 12381 12382 const auto *VD = cast<VarDecl>(D); 12383 assert(VD->isFileVarDecl() && "Expected file scoped var"); 12384 12385 // If the decl is marked as `declare target to`, it should be emitted for the 12386 // host and for the device. 12387 if (LangOpts.OpenMP && 12388 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 12389 return true; 12390 12391 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 12392 !isMSStaticDataMemberInlineDefinition(VD)) 12393 return false; 12394 12395 if (VD->shouldEmitInExternalSource()) 12396 return false; 12397 12398 // Variables that can be needed in other TUs are required. 12399 auto Linkage = GetGVALinkageForVariable(VD); 12400 if (!isDiscardableGVALinkage(Linkage)) 12401 return true; 12402 12403 // We never need to emit a variable that is available in another TU. 12404 if (Linkage == GVA_AvailableExternally) 12405 return false; 12406 12407 // Variables that have destruction with side-effects are required. 12408 if (VD->needsDestruction(*this)) 12409 return true; 12410 12411 // Variables that have initialization with side-effects are required. 12412 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 12413 // We can get a value-dependent initializer during error recovery. 12414 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 12415 return true; 12416 12417 // Likewise, variables with tuple-like bindings are required if their 12418 // bindings have side-effects. 12419 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 12420 for (const auto *BD : DD->bindings()) 12421 if (const auto *BindingVD = BD->getHoldingVar()) 12422 if (DeclMustBeEmitted(BindingVD)) 12423 return true; 12424 12425 return false; 12426 } 12427 12428 void ASTContext::forEachMultiversionedFunctionVersion( 12429 const FunctionDecl *FD, 12430 llvm::function_ref<void(FunctionDecl *)> Pred) const { 12431 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 12432 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 12433 FD = FD->getMostRecentDecl(); 12434 // FIXME: The order of traversal here matters and depends on the order of 12435 // lookup results, which happens to be (mostly) oldest-to-newest, but we 12436 // shouldn't rely on that. 12437 for (auto *CurDecl : 12438 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 12439 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 12440 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 12441 !SeenDecls.contains(CurFD)) { 12442 SeenDecls.insert(CurFD); 12443 Pred(CurFD); 12444 } 12445 } 12446 } 12447 12448 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 12449 bool IsCXXMethod, 12450 bool IsBuiltin) const { 12451 // Pass through to the C++ ABI object 12452 if (IsCXXMethod) 12453 return ABI->getDefaultMethodCallConv(IsVariadic); 12454 12455 // Builtins ignore user-specified default calling convention and remain the 12456 // Target's default calling convention. 12457 if (!IsBuiltin) { 12458 switch (LangOpts.getDefaultCallingConv()) { 12459 case LangOptions::DCC_None: 12460 break; 12461 case LangOptions::DCC_CDecl: 12462 return CC_C; 12463 case LangOptions::DCC_FastCall: 12464 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 12465 return CC_X86FastCall; 12466 break; 12467 case LangOptions::DCC_StdCall: 12468 if (!IsVariadic) 12469 return CC_X86StdCall; 12470 break; 12471 case LangOptions::DCC_VectorCall: 12472 // __vectorcall cannot be applied to variadic functions. 12473 if (!IsVariadic) 12474 return CC_X86VectorCall; 12475 break; 12476 case LangOptions::DCC_RegCall: 12477 // __regcall cannot be applied to variadic functions. 12478 if (!IsVariadic) 12479 return CC_X86RegCall; 12480 break; 12481 case LangOptions::DCC_RtdCall: 12482 if (!IsVariadic) 12483 return CC_M68kRTD; 12484 break; 12485 } 12486 } 12487 return Target->getDefaultCallingConv(); 12488 } 12489 12490 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 12491 // Pass through to the C++ ABI object 12492 return ABI->isNearlyEmpty(RD); 12493 } 12494 12495 VTableContextBase *ASTContext::getVTableContext() { 12496 if (!VTContext.get()) { 12497 auto ABI = Target->getCXXABI(); 12498 if (ABI.isMicrosoft()) 12499 VTContext.reset(new MicrosoftVTableContext(*this)); 12500 else { 12501 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 12502 ? ItaniumVTableContext::Relative 12503 : ItaniumVTableContext::Pointer; 12504 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 12505 } 12506 } 12507 return VTContext.get(); 12508 } 12509 12510 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 12511 if (!T) 12512 T = Target; 12513 switch (T->getCXXABI().getKind()) { 12514 case TargetCXXABI::AppleARM64: 12515 case TargetCXXABI::Fuchsia: 12516 case TargetCXXABI::GenericAArch64: 12517 case TargetCXXABI::GenericItanium: 12518 case TargetCXXABI::GenericARM: 12519 case TargetCXXABI::GenericMIPS: 12520 case TargetCXXABI::iOS: 12521 case TargetCXXABI::WebAssembly: 12522 case TargetCXXABI::WatchOS: 12523 case TargetCXXABI::XL: 12524 return ItaniumMangleContext::create(*this, getDiagnostics()); 12525 case TargetCXXABI::Microsoft: 12526 return MicrosoftMangleContext::create(*this, getDiagnostics()); 12527 } 12528 llvm_unreachable("Unsupported ABI"); 12529 } 12530 12531 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 12532 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 12533 "Device mangle context does not support Microsoft mangling."); 12534 switch (T.getCXXABI().getKind()) { 12535 case TargetCXXABI::AppleARM64: 12536 case TargetCXXABI::Fuchsia: 12537 case TargetCXXABI::GenericAArch64: 12538 case TargetCXXABI::GenericItanium: 12539 case TargetCXXABI::GenericARM: 12540 case TargetCXXABI::GenericMIPS: 12541 case TargetCXXABI::iOS: 12542 case TargetCXXABI::WebAssembly: 12543 case TargetCXXABI::WatchOS: 12544 case TargetCXXABI::XL: 12545 return ItaniumMangleContext::create( 12546 *this, getDiagnostics(), 12547 [](ASTContext &, const NamedDecl *ND) -> std::optional<unsigned> { 12548 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 12549 return RD->getDeviceLambdaManglingNumber(); 12550 return std::nullopt; 12551 }, 12552 /*IsAux=*/true); 12553 case TargetCXXABI::Microsoft: 12554 return MicrosoftMangleContext::create(*this, getDiagnostics(), 12555 /*IsAux=*/true); 12556 } 12557 llvm_unreachable("Unsupported ABI"); 12558 } 12559 12560 CXXABI::~CXXABI() = default; 12561 12562 size_t ASTContext::getSideTableAllocatedMemory() const { 12563 return ASTRecordLayouts.getMemorySize() + 12564 llvm::capacity_in_bytes(ObjCLayouts) + 12565 llvm::capacity_in_bytes(KeyFunctions) + 12566 llvm::capacity_in_bytes(ObjCImpls) + 12567 llvm::capacity_in_bytes(BlockVarCopyInits) + 12568 llvm::capacity_in_bytes(DeclAttrs) + 12569 llvm::capacity_in_bytes(TemplateOrInstantiation) + 12570 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 12571 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 12572 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 12573 llvm::capacity_in_bytes(OverriddenMethods) + 12574 llvm::capacity_in_bytes(Types) + 12575 llvm::capacity_in_bytes(VariableArrayTypes); 12576 } 12577 12578 /// getIntTypeForBitwidth - 12579 /// sets integer QualTy according to specified details: 12580 /// bitwidth, signed/unsigned. 12581 /// Returns empty type if there is no appropriate target types. 12582 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 12583 unsigned Signed) const { 12584 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 12585 CanQualType QualTy = getFromTargetType(Ty); 12586 if (!QualTy && DestWidth == 128) 12587 return Signed ? Int128Ty : UnsignedInt128Ty; 12588 return QualTy; 12589 } 12590 12591 /// getRealTypeForBitwidth - 12592 /// sets floating point QualTy according to specified bitwidth. 12593 /// Returns empty type if there is no appropriate target types. 12594 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 12595 FloatModeKind ExplicitType) const { 12596 FloatModeKind Ty = 12597 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); 12598 switch (Ty) { 12599 case FloatModeKind::Half: 12600 return HalfTy; 12601 case FloatModeKind::Float: 12602 return FloatTy; 12603 case FloatModeKind::Double: 12604 return DoubleTy; 12605 case FloatModeKind::LongDouble: 12606 return LongDoubleTy; 12607 case FloatModeKind::Float128: 12608 return Float128Ty; 12609 case FloatModeKind::Ibm128: 12610 return Ibm128Ty; 12611 case FloatModeKind::NoFloat: 12612 return {}; 12613 } 12614 12615 llvm_unreachable("Unhandled TargetInfo::RealType value"); 12616 } 12617 12618 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 12619 if (Number <= 1) 12620 return; 12621 12622 MangleNumbers[ND] = Number; 12623 12624 if (Listener) 12625 Listener->AddedManglingNumber(ND, Number); 12626 } 12627 12628 unsigned ASTContext::getManglingNumber(const NamedDecl *ND, 12629 bool ForAuxTarget) const { 12630 auto I = MangleNumbers.find(ND); 12631 unsigned Res = I != MangleNumbers.end() ? I->second : 1; 12632 // CUDA/HIP host compilation encodes host and device mangling numbers 12633 // as lower and upper half of 32 bit integer. 12634 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) { 12635 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF; 12636 } else { 12637 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling " 12638 "number for aux target"); 12639 } 12640 return Res > 1 ? Res : 1; 12641 } 12642 12643 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 12644 if (Number <= 1) 12645 return; 12646 12647 StaticLocalNumbers[VD] = Number; 12648 12649 if (Listener) 12650 Listener->AddedStaticLocalNumbers(VD, Number); 12651 } 12652 12653 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 12654 auto I = StaticLocalNumbers.find(VD); 12655 return I != StaticLocalNumbers.end() ? I->second : 1; 12656 } 12657 12658 MangleNumberingContext & 12659 ASTContext::getManglingNumberContext(const DeclContext *DC) { 12660 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12661 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 12662 if (!MCtx) 12663 MCtx = createMangleNumberingContext(); 12664 return *MCtx; 12665 } 12666 12667 MangleNumberingContext & 12668 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 12669 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12670 std::unique_ptr<MangleNumberingContext> &MCtx = 12671 ExtraMangleNumberingContexts[D]; 12672 if (!MCtx) 12673 MCtx = createMangleNumberingContext(); 12674 return *MCtx; 12675 } 12676 12677 std::unique_ptr<MangleNumberingContext> 12678 ASTContext::createMangleNumberingContext() const { 12679 return ABI->createMangleNumberingContext(); 12680 } 12681 12682 const CXXConstructorDecl * 12683 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 12684 return ABI->getCopyConstructorForExceptionObject( 12685 cast<CXXRecordDecl>(RD->getFirstDecl())); 12686 } 12687 12688 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 12689 CXXConstructorDecl *CD) { 12690 return ABI->addCopyConstructorForExceptionObject( 12691 cast<CXXRecordDecl>(RD->getFirstDecl()), 12692 cast<CXXConstructorDecl>(CD->getFirstDecl())); 12693 } 12694 12695 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 12696 TypedefNameDecl *DD) { 12697 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 12698 } 12699 12700 TypedefNameDecl * 12701 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 12702 return ABI->getTypedefNameForUnnamedTagDecl(TD); 12703 } 12704 12705 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 12706 DeclaratorDecl *DD) { 12707 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 12708 } 12709 12710 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 12711 return ABI->getDeclaratorForUnnamedTagDecl(TD); 12712 } 12713 12714 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 12715 ParamIndices[D] = index; 12716 } 12717 12718 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 12719 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 12720 assert(I != ParamIndices.end() && 12721 "ParmIndices lacks entry set by ParmVarDecl"); 12722 return I->second; 12723 } 12724 12725 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 12726 unsigned Length) const { 12727 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 12728 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 12729 EltTy = EltTy.withConst(); 12730 12731 EltTy = adjustStringLiteralBaseType(EltTy); 12732 12733 // Get an array type for the string, according to C99 6.4.5. This includes 12734 // the null terminator character. 12735 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 12736 ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0); 12737 } 12738 12739 StringLiteral * 12740 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 12741 StringLiteral *&Result = StringLiteralCache[Key]; 12742 if (!Result) 12743 Result = StringLiteral::Create( 12744 *this, Key, StringLiteralKind::Ordinary, 12745 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 12746 SourceLocation()); 12747 return Result; 12748 } 12749 12750 MSGuidDecl * 12751 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 12752 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 12753 12754 llvm::FoldingSetNodeID ID; 12755 MSGuidDecl::Profile(ID, Parts); 12756 12757 void *InsertPos; 12758 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 12759 return Existing; 12760 12761 QualType GUIDType = getMSGuidType().withConst(); 12762 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 12763 MSGuidDecls.InsertNode(New, InsertPos); 12764 return New; 12765 } 12766 12767 UnnamedGlobalConstantDecl * 12768 ASTContext::getUnnamedGlobalConstantDecl(QualType Ty, 12769 const APValue &APVal) const { 12770 llvm::FoldingSetNodeID ID; 12771 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal); 12772 12773 void *InsertPos; 12774 if (UnnamedGlobalConstantDecl *Existing = 12775 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos)) 12776 return Existing; 12777 12778 UnnamedGlobalConstantDecl *New = 12779 UnnamedGlobalConstantDecl::Create(*this, Ty, APVal); 12780 UnnamedGlobalConstantDecls.InsertNode(New, InsertPos); 12781 return New; 12782 } 12783 12784 TemplateParamObjectDecl * 12785 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 12786 assert(T->isRecordType() && "template param object of unexpected type"); 12787 12788 // C++ [temp.param]p8: 12789 // [...] a static storage duration object of type 'const T' [...] 12790 T.addConst(); 12791 12792 llvm::FoldingSetNodeID ID; 12793 TemplateParamObjectDecl::Profile(ID, T, V); 12794 12795 void *InsertPos; 12796 if (TemplateParamObjectDecl *Existing = 12797 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 12798 return Existing; 12799 12800 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 12801 TemplateParamObjectDecls.InsertNode(New, InsertPos); 12802 return New; 12803 } 12804 12805 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 12806 const llvm::Triple &T = getTargetInfo().getTriple(); 12807 if (!T.isOSDarwin()) 12808 return false; 12809 12810 if (!(T.isiOS() && T.isOSVersionLT(7)) && 12811 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 12812 return false; 12813 12814 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 12815 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 12816 uint64_t Size = sizeChars.getQuantity(); 12817 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 12818 unsigned Align = alignChars.getQuantity(); 12819 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 12820 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 12821 } 12822 12823 bool 12824 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 12825 const ObjCMethodDecl *MethodImpl) { 12826 // No point trying to match an unavailable/deprecated mothod. 12827 if (MethodDecl->hasAttr<UnavailableAttr>() 12828 || MethodDecl->hasAttr<DeprecatedAttr>()) 12829 return false; 12830 if (MethodDecl->getObjCDeclQualifier() != 12831 MethodImpl->getObjCDeclQualifier()) 12832 return false; 12833 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 12834 return false; 12835 12836 if (MethodDecl->param_size() != MethodImpl->param_size()) 12837 return false; 12838 12839 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 12840 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 12841 EF = MethodDecl->param_end(); 12842 IM != EM && IF != EF; ++IM, ++IF) { 12843 const ParmVarDecl *DeclVar = (*IF); 12844 const ParmVarDecl *ImplVar = (*IM); 12845 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 12846 return false; 12847 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 12848 return false; 12849 } 12850 12851 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 12852 } 12853 12854 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 12855 LangAS AS; 12856 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 12857 AS = LangAS::Default; 12858 else 12859 AS = QT->getPointeeType().getAddressSpace(); 12860 12861 return getTargetInfo().getNullPointerValue(AS); 12862 } 12863 12864 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 12865 return getTargetInfo().getTargetAddressSpace(AS); 12866 } 12867 12868 bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const { 12869 if (X == Y) 12870 return true; 12871 if (!X || !Y) 12872 return false; 12873 llvm::FoldingSetNodeID IDX, IDY; 12874 X->Profile(IDX, *this, /*Canonical=*/true); 12875 Y->Profile(IDY, *this, /*Canonical=*/true); 12876 return IDX == IDY; 12877 } 12878 12879 // The getCommon* helpers return, for given 'same' X and Y entities given as 12880 // inputs, another entity which is also the 'same' as the inputs, but which 12881 // is closer to the canonical form of the inputs, each according to a given 12882 // criteria. 12883 // The getCommon*Checked variants are 'null inputs not-allowed' equivalents of 12884 // the regular ones. 12885 12886 static Decl *getCommonDecl(Decl *X, Decl *Y) { 12887 if (!declaresSameEntity(X, Y)) 12888 return nullptr; 12889 for (const Decl *DX : X->redecls()) { 12890 // If we reach Y before reaching the first decl, that means X is older. 12891 if (DX == Y) 12892 return X; 12893 // If we reach the first decl, then Y is older. 12894 if (DX->isFirstDecl()) 12895 return Y; 12896 } 12897 llvm_unreachable("Corrupt redecls chain"); 12898 } 12899 12900 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12901 static T *getCommonDecl(T *X, T *Y) { 12902 return cast_or_null<T>( 12903 getCommonDecl(const_cast<Decl *>(cast_or_null<Decl>(X)), 12904 const_cast<Decl *>(cast_or_null<Decl>(Y)))); 12905 } 12906 12907 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12908 static T *getCommonDeclChecked(T *X, T *Y) { 12909 return cast<T>(getCommonDecl(const_cast<Decl *>(cast<Decl>(X)), 12910 const_cast<Decl *>(cast<Decl>(Y)))); 12911 } 12912 12913 static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X, 12914 TemplateName Y) { 12915 if (X.getAsVoidPointer() == Y.getAsVoidPointer()) 12916 return X; 12917 // FIXME: There are cases here where we could find a common template name 12918 // with more sugar. For example one could be a SubstTemplateTemplate* 12919 // replacing the other. 12920 TemplateName CX = Ctx.getCanonicalTemplateName(X); 12921 if (CX.getAsVoidPointer() != 12922 Ctx.getCanonicalTemplateName(Y).getAsVoidPointer()) 12923 return TemplateName(); 12924 return CX; 12925 } 12926 12927 static TemplateName 12928 getCommonTemplateNameChecked(ASTContext &Ctx, TemplateName X, TemplateName Y) { 12929 TemplateName R = getCommonTemplateName(Ctx, X, Y); 12930 assert(R.getAsVoidPointer() != nullptr); 12931 return R; 12932 } 12933 12934 static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs, 12935 ArrayRef<QualType> Ys, bool Unqualified = false) { 12936 assert(Xs.size() == Ys.size()); 12937 SmallVector<QualType, 8> Rs(Xs.size()); 12938 for (size_t I = 0; I < Rs.size(); ++I) 12939 Rs[I] = Ctx.getCommonSugaredType(Xs[I], Ys[I], Unqualified); 12940 return Rs; 12941 } 12942 12943 template <class T> 12944 static SourceLocation getCommonAttrLoc(const T *X, const T *Y) { 12945 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc() 12946 : SourceLocation(); 12947 } 12948 12949 static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx, 12950 const TemplateArgument &X, 12951 const TemplateArgument &Y) { 12952 if (X.getKind() != Y.getKind()) 12953 return TemplateArgument(); 12954 12955 switch (X.getKind()) { 12956 case TemplateArgument::ArgKind::Type: 12957 if (!Ctx.hasSameType(X.getAsType(), Y.getAsType())) 12958 return TemplateArgument(); 12959 return TemplateArgument( 12960 Ctx.getCommonSugaredType(X.getAsType(), Y.getAsType())); 12961 case TemplateArgument::ArgKind::NullPtr: 12962 if (!Ctx.hasSameType(X.getNullPtrType(), Y.getNullPtrType())) 12963 return TemplateArgument(); 12964 return TemplateArgument( 12965 Ctx.getCommonSugaredType(X.getNullPtrType(), Y.getNullPtrType()), 12966 /*Unqualified=*/true); 12967 case TemplateArgument::ArgKind::Expression: 12968 if (!Ctx.hasSameType(X.getAsExpr()->getType(), Y.getAsExpr()->getType())) 12969 return TemplateArgument(); 12970 // FIXME: Try to keep the common sugar. 12971 return X; 12972 case TemplateArgument::ArgKind::Template: { 12973 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate(); 12974 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12975 if (!CTN.getAsVoidPointer()) 12976 return TemplateArgument(); 12977 return TemplateArgument(CTN); 12978 } 12979 case TemplateArgument::ArgKind::TemplateExpansion: { 12980 TemplateName TX = X.getAsTemplateOrTemplatePattern(), 12981 TY = Y.getAsTemplateOrTemplatePattern(); 12982 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12983 if (!CTN.getAsVoidPointer()) 12984 return TemplateName(); 12985 auto NExpX = X.getNumTemplateExpansions(); 12986 assert(NExpX == Y.getNumTemplateExpansions()); 12987 return TemplateArgument(CTN, NExpX); 12988 } 12989 default: 12990 // FIXME: Handle the other argument kinds. 12991 return X; 12992 } 12993 } 12994 12995 static bool getCommonTemplateArguments(ASTContext &Ctx, 12996 SmallVectorImpl<TemplateArgument> &R, 12997 ArrayRef<TemplateArgument> Xs, 12998 ArrayRef<TemplateArgument> Ys) { 12999 if (Xs.size() != Ys.size()) 13000 return true; 13001 R.resize(Xs.size()); 13002 for (size_t I = 0; I < R.size(); ++I) { 13003 R[I] = getCommonTemplateArgument(Ctx, Xs[I], Ys[I]); 13004 if (R[I].isNull()) 13005 return true; 13006 } 13007 return false; 13008 } 13009 13010 static auto getCommonTemplateArguments(ASTContext &Ctx, 13011 ArrayRef<TemplateArgument> Xs, 13012 ArrayRef<TemplateArgument> Ys) { 13013 SmallVector<TemplateArgument, 8> R; 13014 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys); 13015 assert(!Different); 13016 (void)Different; 13017 return R; 13018 } 13019 13020 template <class T> 13021 static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) { 13022 return X->getKeyword() == Y->getKeyword() ? X->getKeyword() 13023 : ElaboratedTypeKeyword::None; 13024 } 13025 13026 template <class T> 13027 static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx, const T *X, 13028 const T *Y) { 13029 // FIXME: Try to keep the common NNS sugar. 13030 return X->getQualifier() == Y->getQualifier() 13031 ? X->getQualifier() 13032 : Ctx.getCanonicalNestedNameSpecifier(X->getQualifier()); 13033 } 13034 13035 template <class T> 13036 static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) { 13037 return Ctx.getCommonSugaredType(X->getElementType(), Y->getElementType()); 13038 } 13039 13040 template <class T> 13041 static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X, 13042 Qualifiers &QX, const T *Y, 13043 Qualifiers &QY) { 13044 QualType EX = X->getElementType(), EY = Y->getElementType(); 13045 QualType R = Ctx.getCommonSugaredType(EX, EY, 13046 /*Unqualified=*/true); 13047 Qualifiers RQ = R.getQualifiers(); 13048 QX += EX.getQualifiers() - RQ; 13049 QY += EY.getQualifiers() - RQ; 13050 return R; 13051 } 13052 13053 template <class T> 13054 static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) { 13055 return Ctx.getCommonSugaredType(X->getPointeeType(), Y->getPointeeType()); 13056 } 13057 13058 template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) { 13059 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr())); 13060 return X->getSizeExpr(); 13061 } 13062 13063 static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) { 13064 assert(X->getSizeModifier() == Y->getSizeModifier()); 13065 return X->getSizeModifier(); 13066 } 13067 13068 static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X, 13069 const ArrayType *Y) { 13070 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers()); 13071 return X->getIndexTypeCVRQualifiers(); 13072 } 13073 13074 // Merges two type lists such that the resulting vector will contain 13075 // each type (in a canonical sense) only once, in the order they appear 13076 // from X to Y. If they occur in both X and Y, the result will contain 13077 // the common sugared type between them. 13078 static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out, 13079 ArrayRef<QualType> X, ArrayRef<QualType> Y) { 13080 llvm::DenseMap<QualType, unsigned> Found; 13081 for (auto Ts : {X, Y}) { 13082 for (QualType T : Ts) { 13083 auto Res = Found.try_emplace(Ctx.getCanonicalType(T), Out.size()); 13084 if (!Res.second) { 13085 QualType &U = Out[Res.first->second]; 13086 U = Ctx.getCommonSugaredType(U, T); 13087 } else { 13088 Out.emplace_back(T); 13089 } 13090 } 13091 } 13092 } 13093 13094 FunctionProtoType::ExceptionSpecInfo 13095 ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1, 13096 FunctionProtoType::ExceptionSpecInfo ESI2, 13097 SmallVectorImpl<QualType> &ExceptionTypeStorage, 13098 bool AcceptDependent) { 13099 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type; 13100 13101 // If either of them can throw anything, that is the result. 13102 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) { 13103 if (EST1 == I) 13104 return ESI1; 13105 if (EST2 == I) 13106 return ESI2; 13107 } 13108 13109 // If either of them is non-throwing, the result is the other. 13110 for (auto I : 13111 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) { 13112 if (EST1 == I) 13113 return ESI2; 13114 if (EST2 == I) 13115 return ESI1; 13116 } 13117 13118 // If we're left with value-dependent computed noexcept expressions, we're 13119 // stuck. Before C++17, we can just drop the exception specification entirely, 13120 // since it's not actually part of the canonical type. And this should never 13121 // happen in C++17, because it would mean we were computing the composite 13122 // pointer type of dependent types, which should never happen. 13123 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) { 13124 assert(AcceptDependent && 13125 "computing composite pointer type of dependent types"); 13126 return FunctionProtoType::ExceptionSpecInfo(); 13127 } 13128 13129 // Switch over the possibilities so that people adding new values know to 13130 // update this function. 13131 switch (EST1) { 13132 case EST_None: 13133 case EST_DynamicNone: 13134 case EST_MSAny: 13135 case EST_BasicNoexcept: 13136 case EST_DependentNoexcept: 13137 case EST_NoexceptFalse: 13138 case EST_NoexceptTrue: 13139 case EST_NoThrow: 13140 llvm_unreachable("These ESTs should be handled above"); 13141 13142 case EST_Dynamic: { 13143 // This is the fun case: both exception specifications are dynamic. Form 13144 // the union of the two lists. 13145 assert(EST2 == EST_Dynamic && "other cases should already be handled"); 13146 mergeTypeLists(*this, ExceptionTypeStorage, ESI1.Exceptions, 13147 ESI2.Exceptions); 13148 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic); 13149 Result.Exceptions = ExceptionTypeStorage; 13150 return Result; 13151 } 13152 13153 case EST_Unevaluated: 13154 case EST_Uninstantiated: 13155 case EST_Unparsed: 13156 llvm_unreachable("shouldn't see unresolved exception specifications here"); 13157 } 13158 13159 llvm_unreachable("invalid ExceptionSpecificationType"); 13160 } 13161 13162 static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X, 13163 Qualifiers &QX, const Type *Y, 13164 Qualifiers &QY) { 13165 Type::TypeClass TC = X->getTypeClass(); 13166 assert(TC == Y->getTypeClass()); 13167 switch (TC) { 13168 #define UNEXPECTED_TYPE(Class, Kind) \ 13169 case Type::Class: \ 13170 llvm_unreachable("Unexpected " Kind ": " #Class); 13171 13172 #define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical") 13173 #define TYPE(Class, Base) 13174 #include "clang/AST/TypeNodes.inc" 13175 13176 #define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free") 13177 SUGAR_FREE_TYPE(Builtin) 13178 SUGAR_FREE_TYPE(DeducedTemplateSpecialization) 13179 SUGAR_FREE_TYPE(DependentBitInt) 13180 SUGAR_FREE_TYPE(Enum) 13181 SUGAR_FREE_TYPE(BitInt) 13182 SUGAR_FREE_TYPE(ObjCInterface) 13183 SUGAR_FREE_TYPE(Record) 13184 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack) 13185 SUGAR_FREE_TYPE(UnresolvedUsing) 13186 #undef SUGAR_FREE_TYPE 13187 #define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique") 13188 NON_UNIQUE_TYPE(TypeOfExpr) 13189 NON_UNIQUE_TYPE(VariableArray) 13190 #undef NON_UNIQUE_TYPE 13191 13192 UNEXPECTED_TYPE(TypeOf, "sugar") 13193 13194 #undef UNEXPECTED_TYPE 13195 13196 case Type::Auto: { 13197 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 13198 assert(AX->getDeducedType().isNull()); 13199 assert(AY->getDeducedType().isNull()); 13200 assert(AX->getKeyword() == AY->getKeyword()); 13201 assert(AX->isInstantiationDependentType() == 13202 AY->isInstantiationDependentType()); 13203 auto As = getCommonTemplateArguments(Ctx, AX->getTypeConstraintArguments(), 13204 AY->getTypeConstraintArguments()); 13205 return Ctx.getAutoType(QualType(), AX->getKeyword(), 13206 AX->isInstantiationDependentType(), 13207 AX->containsUnexpandedParameterPack(), 13208 getCommonDeclChecked(AX->getTypeConstraintConcept(), 13209 AY->getTypeConstraintConcept()), 13210 As); 13211 } 13212 case Type::IncompleteArray: { 13213 const auto *AX = cast<IncompleteArrayType>(X), 13214 *AY = cast<IncompleteArrayType>(Y); 13215 return Ctx.getIncompleteArrayType( 13216 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 13217 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 13218 } 13219 case Type::DependentSizedArray: { 13220 const auto *AX = cast<DependentSizedArrayType>(X), 13221 *AY = cast<DependentSizedArrayType>(Y); 13222 return Ctx.getDependentSizedArrayType( 13223 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 13224 getCommonSizeExpr(Ctx, AX, AY), getCommonSizeModifier(AX, AY), 13225 getCommonIndexTypeCVRQualifiers(AX, AY), 13226 AX->getBracketsRange() == AY->getBracketsRange() 13227 ? AX->getBracketsRange() 13228 : SourceRange()); 13229 } 13230 case Type::ConstantArray: { 13231 const auto *AX = cast<ConstantArrayType>(X), 13232 *AY = cast<ConstantArrayType>(Y); 13233 assert(AX->getSize() == AY->getSize()); 13234 const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr()) 13235 ? AX->getSizeExpr() 13236 : nullptr; 13237 return Ctx.getConstantArrayType( 13238 getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr, 13239 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 13240 } 13241 case Type::ArrayParameter: { 13242 const auto *AX = cast<ArrayParameterType>(X), 13243 *AY = cast<ArrayParameterType>(Y); 13244 assert(AX->getSize() == AY->getSize()); 13245 const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr()) 13246 ? AX->getSizeExpr() 13247 : nullptr; 13248 auto ArrayTy = Ctx.getConstantArrayType( 13249 getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr, 13250 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 13251 return Ctx.getArrayParameterType(ArrayTy); 13252 } 13253 case Type::Atomic: { 13254 const auto *AX = cast<AtomicType>(X), *AY = cast<AtomicType>(Y); 13255 return Ctx.getAtomicType( 13256 Ctx.getCommonSugaredType(AX->getValueType(), AY->getValueType())); 13257 } 13258 case Type::Complex: { 13259 const auto *CX = cast<ComplexType>(X), *CY = cast<ComplexType>(Y); 13260 return Ctx.getComplexType(getCommonArrayElementType(Ctx, CX, QX, CY, QY)); 13261 } 13262 case Type::Pointer: { 13263 const auto *PX = cast<PointerType>(X), *PY = cast<PointerType>(Y); 13264 return Ctx.getPointerType(getCommonPointeeType(Ctx, PX, PY)); 13265 } 13266 case Type::BlockPointer: { 13267 const auto *PX = cast<BlockPointerType>(X), *PY = cast<BlockPointerType>(Y); 13268 return Ctx.getBlockPointerType(getCommonPointeeType(Ctx, PX, PY)); 13269 } 13270 case Type::ObjCObjectPointer: { 13271 const auto *PX = cast<ObjCObjectPointerType>(X), 13272 *PY = cast<ObjCObjectPointerType>(Y); 13273 return Ctx.getObjCObjectPointerType(getCommonPointeeType(Ctx, PX, PY)); 13274 } 13275 case Type::MemberPointer: { 13276 const auto *PX = cast<MemberPointerType>(X), 13277 *PY = cast<MemberPointerType>(Y); 13278 return Ctx.getMemberPointerType( 13279 getCommonPointeeType(Ctx, PX, PY), 13280 Ctx.getCommonSugaredType(QualType(PX->getClass(), 0), 13281 QualType(PY->getClass(), 0)) 13282 .getTypePtr()); 13283 } 13284 case Type::LValueReference: { 13285 const auto *PX = cast<LValueReferenceType>(X), 13286 *PY = cast<LValueReferenceType>(Y); 13287 // FIXME: Preserve PointeeTypeAsWritten. 13288 return Ctx.getLValueReferenceType(getCommonPointeeType(Ctx, PX, PY), 13289 PX->isSpelledAsLValue() || 13290 PY->isSpelledAsLValue()); 13291 } 13292 case Type::RValueReference: { 13293 const auto *PX = cast<RValueReferenceType>(X), 13294 *PY = cast<RValueReferenceType>(Y); 13295 // FIXME: Preserve PointeeTypeAsWritten. 13296 return Ctx.getRValueReferenceType(getCommonPointeeType(Ctx, PX, PY)); 13297 } 13298 case Type::DependentAddressSpace: { 13299 const auto *PX = cast<DependentAddressSpaceType>(X), 13300 *PY = cast<DependentAddressSpaceType>(Y); 13301 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr())); 13302 return Ctx.getDependentAddressSpaceType(getCommonPointeeType(Ctx, PX, PY), 13303 PX->getAddrSpaceExpr(), 13304 getCommonAttrLoc(PX, PY)); 13305 } 13306 case Type::FunctionNoProto: { 13307 const auto *FX = cast<FunctionNoProtoType>(X), 13308 *FY = cast<FunctionNoProtoType>(Y); 13309 assert(FX->getExtInfo() == FY->getExtInfo()); 13310 return Ctx.getFunctionNoProtoType( 13311 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()), 13312 FX->getExtInfo()); 13313 } 13314 case Type::FunctionProto: { 13315 const auto *FX = cast<FunctionProtoType>(X), 13316 *FY = cast<FunctionProtoType>(Y); 13317 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(), 13318 EPIY = FY->getExtProtoInfo(); 13319 assert(EPIX.ExtInfo == EPIY.ExtInfo); 13320 assert(EPIX.ExtParameterInfos == EPIY.ExtParameterInfos); 13321 assert(EPIX.RefQualifier == EPIY.RefQualifier); 13322 assert(EPIX.TypeQuals == EPIY.TypeQuals); 13323 assert(EPIX.Variadic == EPIY.Variadic); 13324 13325 // FIXME: Can we handle an empty EllipsisLoc? 13326 // Use emtpy EllipsisLoc if X and Y differ. 13327 13328 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn; 13329 13330 QualType R = 13331 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()); 13332 auto P = getCommonTypes(Ctx, FX->param_types(), FY->param_types(), 13333 /*Unqualified=*/true); 13334 13335 SmallVector<QualType, 8> Exceptions; 13336 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs( 13337 EPIX.ExceptionSpec, EPIY.ExceptionSpec, Exceptions, true); 13338 return Ctx.getFunctionType(R, P, EPIX); 13339 } 13340 case Type::ObjCObject: { 13341 const auto *OX = cast<ObjCObjectType>(X), *OY = cast<ObjCObjectType>(Y); 13342 assert( 13343 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(), 13344 OY->getProtocols().begin(), OY->getProtocols().end(), 13345 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) { 13346 return P0->getCanonicalDecl() == P1->getCanonicalDecl(); 13347 }) && 13348 "protocol lists must be the same"); 13349 auto TAs = getCommonTypes(Ctx, OX->getTypeArgsAsWritten(), 13350 OY->getTypeArgsAsWritten()); 13351 return Ctx.getObjCObjectType( 13352 Ctx.getCommonSugaredType(OX->getBaseType(), OY->getBaseType()), TAs, 13353 OX->getProtocols(), 13354 OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten()); 13355 } 13356 case Type::ConstantMatrix: { 13357 const auto *MX = cast<ConstantMatrixType>(X), 13358 *MY = cast<ConstantMatrixType>(Y); 13359 assert(MX->getNumRows() == MY->getNumRows()); 13360 assert(MX->getNumColumns() == MY->getNumColumns()); 13361 return Ctx.getConstantMatrixType(getCommonElementType(Ctx, MX, MY), 13362 MX->getNumRows(), MX->getNumColumns()); 13363 } 13364 case Type::DependentSizedMatrix: { 13365 const auto *MX = cast<DependentSizedMatrixType>(X), 13366 *MY = cast<DependentSizedMatrixType>(Y); 13367 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr())); 13368 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr())); 13369 return Ctx.getDependentSizedMatrixType( 13370 getCommonElementType(Ctx, MX, MY), MX->getRowExpr(), 13371 MX->getColumnExpr(), getCommonAttrLoc(MX, MY)); 13372 } 13373 case Type::Vector: { 13374 const auto *VX = cast<VectorType>(X), *VY = cast<VectorType>(Y); 13375 assert(VX->getNumElements() == VY->getNumElements()); 13376 assert(VX->getVectorKind() == VY->getVectorKind()); 13377 return Ctx.getVectorType(getCommonElementType(Ctx, VX, VY), 13378 VX->getNumElements(), VX->getVectorKind()); 13379 } 13380 case Type::ExtVector: { 13381 const auto *VX = cast<ExtVectorType>(X), *VY = cast<ExtVectorType>(Y); 13382 assert(VX->getNumElements() == VY->getNumElements()); 13383 return Ctx.getExtVectorType(getCommonElementType(Ctx, VX, VY), 13384 VX->getNumElements()); 13385 } 13386 case Type::DependentSizedExtVector: { 13387 const auto *VX = cast<DependentSizedExtVectorType>(X), 13388 *VY = cast<DependentSizedExtVectorType>(Y); 13389 return Ctx.getDependentSizedExtVectorType(getCommonElementType(Ctx, VX, VY), 13390 getCommonSizeExpr(Ctx, VX, VY), 13391 getCommonAttrLoc(VX, VY)); 13392 } 13393 case Type::DependentVector: { 13394 const auto *VX = cast<DependentVectorType>(X), 13395 *VY = cast<DependentVectorType>(Y); 13396 assert(VX->getVectorKind() == VY->getVectorKind()); 13397 return Ctx.getDependentVectorType( 13398 getCommonElementType(Ctx, VX, VY), getCommonSizeExpr(Ctx, VX, VY), 13399 getCommonAttrLoc(VX, VY), VX->getVectorKind()); 13400 } 13401 case Type::InjectedClassName: { 13402 const auto *IX = cast<InjectedClassNameType>(X), 13403 *IY = cast<InjectedClassNameType>(Y); 13404 return Ctx.getInjectedClassNameType( 13405 getCommonDeclChecked(IX->getDecl(), IY->getDecl()), 13406 Ctx.getCommonSugaredType(IX->getInjectedSpecializationType(), 13407 IY->getInjectedSpecializationType())); 13408 } 13409 case Type::TemplateSpecialization: { 13410 const auto *TX = cast<TemplateSpecializationType>(X), 13411 *TY = cast<TemplateSpecializationType>(Y); 13412 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 13413 TY->template_arguments()); 13414 return Ctx.getTemplateSpecializationType( 13415 ::getCommonTemplateNameChecked(Ctx, TX->getTemplateName(), 13416 TY->getTemplateName()), 13417 As, X->getCanonicalTypeInternal()); 13418 } 13419 case Type::Decltype: { 13420 const auto *DX = cast<DecltypeType>(X); 13421 [[maybe_unused]] const auto *DY = cast<DecltypeType>(Y); 13422 assert(DX->isDependentType()); 13423 assert(DY->isDependentType()); 13424 assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr())); 13425 // As Decltype is not uniqued, building a common type would be wasteful. 13426 return QualType(DX, 0); 13427 } 13428 case Type::PackIndexing: { 13429 const auto *DX = cast<PackIndexingType>(X); 13430 [[maybe_unused]] const auto *DY = cast<PackIndexingType>(Y); 13431 assert(DX->isDependentType()); 13432 assert(DY->isDependentType()); 13433 assert(Ctx.hasSameExpr(DX->getIndexExpr(), DY->getIndexExpr())); 13434 return QualType(DX, 0); 13435 } 13436 case Type::DependentName: { 13437 const auto *NX = cast<DependentNameType>(X), 13438 *NY = cast<DependentNameType>(Y); 13439 assert(NX->getIdentifier() == NY->getIdentifier()); 13440 return Ctx.getDependentNameType( 13441 getCommonTypeKeyword(NX, NY), getCommonNNS(Ctx, NX, NY), 13442 NX->getIdentifier(), NX->getCanonicalTypeInternal()); 13443 } 13444 case Type::DependentTemplateSpecialization: { 13445 const auto *TX = cast<DependentTemplateSpecializationType>(X), 13446 *TY = cast<DependentTemplateSpecializationType>(Y); 13447 assert(TX->getIdentifier() == TY->getIdentifier()); 13448 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 13449 TY->template_arguments()); 13450 return Ctx.getDependentTemplateSpecializationType( 13451 getCommonTypeKeyword(TX, TY), getCommonNNS(Ctx, TX, TY), 13452 TX->getIdentifier(), As); 13453 } 13454 case Type::UnaryTransform: { 13455 const auto *TX = cast<UnaryTransformType>(X), 13456 *TY = cast<UnaryTransformType>(Y); 13457 assert(TX->getUTTKind() == TY->getUTTKind()); 13458 return Ctx.getUnaryTransformType( 13459 Ctx.getCommonSugaredType(TX->getBaseType(), TY->getBaseType()), 13460 Ctx.getCommonSugaredType(TX->getUnderlyingType(), 13461 TY->getUnderlyingType()), 13462 TX->getUTTKind()); 13463 } 13464 case Type::PackExpansion: { 13465 const auto *PX = cast<PackExpansionType>(X), 13466 *PY = cast<PackExpansionType>(Y); 13467 assert(PX->getNumExpansions() == PY->getNumExpansions()); 13468 return Ctx.getPackExpansionType( 13469 Ctx.getCommonSugaredType(PX->getPattern(), PY->getPattern()), 13470 PX->getNumExpansions(), false); 13471 } 13472 case Type::Pipe: { 13473 const auto *PX = cast<PipeType>(X), *PY = cast<PipeType>(Y); 13474 assert(PX->isReadOnly() == PY->isReadOnly()); 13475 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType 13476 : &ASTContext::getWritePipeType; 13477 return (Ctx.*MP)(getCommonElementType(Ctx, PX, PY)); 13478 } 13479 case Type::TemplateTypeParm: { 13480 const auto *TX = cast<TemplateTypeParmType>(X), 13481 *TY = cast<TemplateTypeParmType>(Y); 13482 assert(TX->getDepth() == TY->getDepth()); 13483 assert(TX->getIndex() == TY->getIndex()); 13484 assert(TX->isParameterPack() == TY->isParameterPack()); 13485 return Ctx.getTemplateTypeParmType( 13486 TX->getDepth(), TX->getIndex(), TX->isParameterPack(), 13487 getCommonDecl(TX->getDecl(), TY->getDecl())); 13488 } 13489 } 13490 llvm_unreachable("Unknown Type Class"); 13491 } 13492 13493 static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X, 13494 const Type *Y, 13495 SplitQualType Underlying) { 13496 Type::TypeClass TC = X->getTypeClass(); 13497 if (TC != Y->getTypeClass()) 13498 return QualType(); 13499 switch (TC) { 13500 #define UNEXPECTED_TYPE(Class, Kind) \ 13501 case Type::Class: \ 13502 llvm_unreachable("Unexpected " Kind ": " #Class); 13503 #define TYPE(Class, Base) 13504 #define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent") 13505 #include "clang/AST/TypeNodes.inc" 13506 13507 #define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical") 13508 CANONICAL_TYPE(Atomic) 13509 CANONICAL_TYPE(BitInt) 13510 CANONICAL_TYPE(BlockPointer) 13511 CANONICAL_TYPE(Builtin) 13512 CANONICAL_TYPE(Complex) 13513 CANONICAL_TYPE(ConstantArray) 13514 CANONICAL_TYPE(ArrayParameter) 13515 CANONICAL_TYPE(ConstantMatrix) 13516 CANONICAL_TYPE(Enum) 13517 CANONICAL_TYPE(ExtVector) 13518 CANONICAL_TYPE(FunctionNoProto) 13519 CANONICAL_TYPE(FunctionProto) 13520 CANONICAL_TYPE(IncompleteArray) 13521 CANONICAL_TYPE(LValueReference) 13522 CANONICAL_TYPE(MemberPointer) 13523 CANONICAL_TYPE(ObjCInterface) 13524 CANONICAL_TYPE(ObjCObject) 13525 CANONICAL_TYPE(ObjCObjectPointer) 13526 CANONICAL_TYPE(Pipe) 13527 CANONICAL_TYPE(Pointer) 13528 CANONICAL_TYPE(Record) 13529 CANONICAL_TYPE(RValueReference) 13530 CANONICAL_TYPE(VariableArray) 13531 CANONICAL_TYPE(Vector) 13532 #undef CANONICAL_TYPE 13533 13534 #undef UNEXPECTED_TYPE 13535 13536 case Type::Adjusted: { 13537 const auto *AX = cast<AdjustedType>(X), *AY = cast<AdjustedType>(Y); 13538 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType(); 13539 if (!Ctx.hasSameType(OX, OY)) 13540 return QualType(); 13541 // FIXME: It's inefficient to have to unify the original types. 13542 return Ctx.getAdjustedType(Ctx.getCommonSugaredType(OX, OY), 13543 Ctx.getQualifiedType(Underlying)); 13544 } 13545 case Type::Decayed: { 13546 const auto *DX = cast<DecayedType>(X), *DY = cast<DecayedType>(Y); 13547 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType(); 13548 if (!Ctx.hasSameType(OX, OY)) 13549 return QualType(); 13550 // FIXME: It's inefficient to have to unify the original types. 13551 return Ctx.getDecayedType(Ctx.getCommonSugaredType(OX, OY), 13552 Ctx.getQualifiedType(Underlying)); 13553 } 13554 case Type::Attributed: { 13555 const auto *AX = cast<AttributedType>(X), *AY = cast<AttributedType>(Y); 13556 AttributedType::Kind Kind = AX->getAttrKind(); 13557 if (Kind != AY->getAttrKind()) 13558 return QualType(); 13559 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType(); 13560 if (!Ctx.hasSameType(MX, MY)) 13561 return QualType(); 13562 // FIXME: It's inefficient to have to unify the modified types. 13563 return Ctx.getAttributedType(Kind, Ctx.getCommonSugaredType(MX, MY), 13564 Ctx.getQualifiedType(Underlying)); 13565 } 13566 case Type::BTFTagAttributed: { 13567 const auto *BX = cast<BTFTagAttributedType>(X); 13568 const BTFTypeTagAttr *AX = BX->getAttr(); 13569 // The attribute is not uniqued, so just compare the tag. 13570 if (AX->getBTFTypeTag() != 13571 cast<BTFTagAttributedType>(Y)->getAttr()->getBTFTypeTag()) 13572 return QualType(); 13573 return Ctx.getBTFTagAttributedType(AX, Ctx.getQualifiedType(Underlying)); 13574 } 13575 case Type::Auto: { 13576 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 13577 13578 AutoTypeKeyword KW = AX->getKeyword(); 13579 if (KW != AY->getKeyword()) 13580 return QualType(); 13581 13582 ConceptDecl *CD = ::getCommonDecl(AX->getTypeConstraintConcept(), 13583 AY->getTypeConstraintConcept()); 13584 SmallVector<TemplateArgument, 8> As; 13585 if (CD && 13586 getCommonTemplateArguments(Ctx, As, AX->getTypeConstraintArguments(), 13587 AY->getTypeConstraintArguments())) { 13588 CD = nullptr; // The arguments differ, so make it unconstrained. 13589 As.clear(); 13590 } 13591 13592 // Both auto types can't be dependent, otherwise they wouldn't have been 13593 // sugar. This implies they can't contain unexpanded packs either. 13594 return Ctx.getAutoType(Ctx.getQualifiedType(Underlying), AX->getKeyword(), 13595 /*IsDependent=*/false, /*IsPack=*/false, CD, As); 13596 } 13597 case Type::PackIndexing: 13598 case Type::Decltype: 13599 return QualType(); 13600 case Type::DeducedTemplateSpecialization: 13601 // FIXME: Try to merge these. 13602 return QualType(); 13603 13604 case Type::Elaborated: { 13605 const auto *EX = cast<ElaboratedType>(X), *EY = cast<ElaboratedType>(Y); 13606 return Ctx.getElaboratedType( 13607 ::getCommonTypeKeyword(EX, EY), ::getCommonNNS(Ctx, EX, EY), 13608 Ctx.getQualifiedType(Underlying), 13609 ::getCommonDecl(EX->getOwnedTagDecl(), EY->getOwnedTagDecl())); 13610 } 13611 case Type::MacroQualified: { 13612 const auto *MX = cast<MacroQualifiedType>(X), 13613 *MY = cast<MacroQualifiedType>(Y); 13614 const IdentifierInfo *IX = MX->getMacroIdentifier(); 13615 if (IX != MY->getMacroIdentifier()) 13616 return QualType(); 13617 return Ctx.getMacroQualifiedType(Ctx.getQualifiedType(Underlying), IX); 13618 } 13619 case Type::SubstTemplateTypeParm: { 13620 const auto *SX = cast<SubstTemplateTypeParmType>(X), 13621 *SY = cast<SubstTemplateTypeParmType>(Y); 13622 Decl *CD = 13623 ::getCommonDecl(SX->getAssociatedDecl(), SY->getAssociatedDecl()); 13624 if (!CD) 13625 return QualType(); 13626 unsigned Index = SX->getIndex(); 13627 if (Index != SY->getIndex()) 13628 return QualType(); 13629 auto PackIndex = SX->getPackIndex(); 13630 if (PackIndex != SY->getPackIndex()) 13631 return QualType(); 13632 return Ctx.getSubstTemplateTypeParmType(Ctx.getQualifiedType(Underlying), 13633 CD, Index, PackIndex); 13634 } 13635 case Type::ObjCTypeParam: 13636 // FIXME: Try to merge these. 13637 return QualType(); 13638 case Type::Paren: 13639 return Ctx.getParenType(Ctx.getQualifiedType(Underlying)); 13640 13641 case Type::TemplateSpecialization: { 13642 const auto *TX = cast<TemplateSpecializationType>(X), 13643 *TY = cast<TemplateSpecializationType>(Y); 13644 TemplateName CTN = ::getCommonTemplateName(Ctx, TX->getTemplateName(), 13645 TY->getTemplateName()); 13646 if (!CTN.getAsVoidPointer()) 13647 return QualType(); 13648 SmallVector<TemplateArgument, 8> Args; 13649 if (getCommonTemplateArguments(Ctx, Args, TX->template_arguments(), 13650 TY->template_arguments())) 13651 return QualType(); 13652 return Ctx.getTemplateSpecializationType(CTN, Args, 13653 Ctx.getQualifiedType(Underlying)); 13654 } 13655 case Type::Typedef: { 13656 const auto *TX = cast<TypedefType>(X), *TY = cast<TypedefType>(Y); 13657 const TypedefNameDecl *CD = ::getCommonDecl(TX->getDecl(), TY->getDecl()); 13658 if (!CD) 13659 return QualType(); 13660 return Ctx.getTypedefType(CD, Ctx.getQualifiedType(Underlying)); 13661 } 13662 case Type::TypeOf: { 13663 // The common sugar between two typeof expressions, where one is 13664 // potentially a typeof_unqual and the other is not, we unify to the 13665 // qualified type as that retains the most information along with the type. 13666 // We only return a typeof_unqual type when both types are unqual types. 13667 TypeOfKind Kind = TypeOfKind::Qualified; 13668 if (cast<TypeOfType>(X)->getKind() == cast<TypeOfType>(Y)->getKind() && 13669 cast<TypeOfType>(X)->getKind() == TypeOfKind::Unqualified) 13670 Kind = TypeOfKind::Unqualified; 13671 return Ctx.getTypeOfType(Ctx.getQualifiedType(Underlying), Kind); 13672 } 13673 case Type::TypeOfExpr: 13674 return QualType(); 13675 13676 case Type::UnaryTransform: { 13677 const auto *UX = cast<UnaryTransformType>(X), 13678 *UY = cast<UnaryTransformType>(Y); 13679 UnaryTransformType::UTTKind KX = UX->getUTTKind(); 13680 if (KX != UY->getUTTKind()) 13681 return QualType(); 13682 QualType BX = UX->getBaseType(), BY = UY->getBaseType(); 13683 if (!Ctx.hasSameType(BX, BY)) 13684 return QualType(); 13685 // FIXME: It's inefficient to have to unify the base types. 13686 return Ctx.getUnaryTransformType(Ctx.getCommonSugaredType(BX, BY), 13687 Ctx.getQualifiedType(Underlying), KX); 13688 } 13689 case Type::Using: { 13690 const auto *UX = cast<UsingType>(X), *UY = cast<UsingType>(Y); 13691 const UsingShadowDecl *CD = 13692 ::getCommonDecl(UX->getFoundDecl(), UY->getFoundDecl()); 13693 if (!CD) 13694 return QualType(); 13695 return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying)); 13696 } 13697 case Type::CountAttributed: { 13698 const auto *DX = cast<CountAttributedType>(X), 13699 *DY = cast<CountAttributedType>(Y); 13700 if (DX->isCountInBytes() != DY->isCountInBytes()) 13701 return QualType(); 13702 if (DX->isOrNull() != DY->isOrNull()) 13703 return QualType(); 13704 Expr *CEX = DX->getCountExpr(); 13705 Expr *CEY = DY->getCountExpr(); 13706 llvm::ArrayRef<clang::TypeCoupledDeclRefInfo> CDX = DX->getCoupledDecls(); 13707 if (Ctx.hasSameExpr(CEX, CEY)) 13708 return Ctx.getCountAttributedType(Ctx.getQualifiedType(Underlying), CEX, 13709 DX->isCountInBytes(), DX->isOrNull(), 13710 CDX); 13711 if (!CEX->isIntegerConstantExpr(Ctx) || !CEY->isIntegerConstantExpr(Ctx)) 13712 return QualType(); 13713 // Two declarations with the same integer constant may still differ in their 13714 // expression pointers, so we need to evaluate them. 13715 llvm::APSInt VX = *CEX->getIntegerConstantExpr(Ctx); 13716 llvm::APSInt VY = *CEY->getIntegerConstantExpr(Ctx); 13717 if (VX != VY) 13718 return QualType(); 13719 return Ctx.getCountAttributedType(Ctx.getQualifiedType(Underlying), CEX, 13720 DX->isCountInBytes(), DX->isOrNull(), 13721 CDX); 13722 } 13723 } 13724 llvm_unreachable("Unhandled Type Class"); 13725 } 13726 13727 static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) { 13728 SmallVector<SplitQualType, 8> R; 13729 while (true) { 13730 QTotal.addConsistentQualifiers(T.Quals); 13731 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType(); 13732 if (NT == QualType(T.Ty, 0)) 13733 break; 13734 R.push_back(T); 13735 T = NT.split(); 13736 } 13737 return R; 13738 } 13739 13740 QualType ASTContext::getCommonSugaredType(QualType X, QualType Y, 13741 bool Unqualified) { 13742 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y)); 13743 if (X == Y) 13744 return X; 13745 if (!Unqualified) { 13746 if (X.isCanonical()) 13747 return X; 13748 if (Y.isCanonical()) 13749 return Y; 13750 } 13751 13752 SplitQualType SX = X.split(), SY = Y.split(); 13753 Qualifiers QX, QY; 13754 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys, 13755 // until we reach their underlying "canonical nodes". Note these are not 13756 // necessarily canonical types, as they may still have sugared properties. 13757 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively. 13758 auto Xs = ::unwrapSugar(SX, QX), Ys = ::unwrapSugar(SY, QY); 13759 if (SX.Ty != SY.Ty) { 13760 // The canonical nodes differ. Build a common canonical node out of the two, 13761 // unifying their sugar. This may recurse back here. 13762 SX.Ty = 13763 ::getCommonNonSugarTypeNode(*this, SX.Ty, QX, SY.Ty, QY).getTypePtr(); 13764 } else { 13765 // The canonical nodes were identical: We may have desugared too much. 13766 // Add any common sugar back in. 13767 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) { 13768 QX -= SX.Quals; 13769 QY -= SY.Quals; 13770 SX = Xs.pop_back_val(); 13771 SY = Ys.pop_back_val(); 13772 } 13773 } 13774 if (Unqualified) 13775 QX = Qualifiers::removeCommonQualifiers(QX, QY); 13776 else 13777 assert(QX == QY); 13778 13779 // Even though the remaining sugar nodes in Xs and Ys differ, some may be 13780 // related. Walk up these nodes, unifying them and adding the result. 13781 while (!Xs.empty() && !Ys.empty()) { 13782 auto Underlying = SplitQualType( 13783 SX.Ty, Qualifiers::removeCommonQualifiers(SX.Quals, SY.Quals)); 13784 SX = Xs.pop_back_val(); 13785 SY = Ys.pop_back_val(); 13786 SX.Ty = ::getCommonSugarTypeNode(*this, SX.Ty, SY.Ty, Underlying) 13787 .getTypePtrOrNull(); 13788 // Stop at the first pair which is unrelated. 13789 if (!SX.Ty) { 13790 SX.Ty = Underlying.Ty; 13791 break; 13792 } 13793 QX -= Underlying.Quals; 13794 }; 13795 13796 // Add back the missing accumulated qualifiers, which were stripped off 13797 // with the sugar nodes we could not unify. 13798 QualType R = getQualifiedType(SX.Ty, QX); 13799 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X)); 13800 return R; 13801 } 13802 13803 QualType ASTContext::getCorrespondingUnsaturatedType(QualType Ty) const { 13804 assert(Ty->isFixedPointType()); 13805 13806 if (Ty->isUnsaturatedFixedPointType()) 13807 return Ty; 13808 13809 switch (Ty->castAs<BuiltinType>()->getKind()) { 13810 default: 13811 llvm_unreachable("Not a saturated fixed point type!"); 13812 case BuiltinType::SatShortAccum: 13813 return ShortAccumTy; 13814 case BuiltinType::SatAccum: 13815 return AccumTy; 13816 case BuiltinType::SatLongAccum: 13817 return LongAccumTy; 13818 case BuiltinType::SatUShortAccum: 13819 return UnsignedShortAccumTy; 13820 case BuiltinType::SatUAccum: 13821 return UnsignedAccumTy; 13822 case BuiltinType::SatULongAccum: 13823 return UnsignedLongAccumTy; 13824 case BuiltinType::SatShortFract: 13825 return ShortFractTy; 13826 case BuiltinType::SatFract: 13827 return FractTy; 13828 case BuiltinType::SatLongFract: 13829 return LongFractTy; 13830 case BuiltinType::SatUShortFract: 13831 return UnsignedShortFractTy; 13832 case BuiltinType::SatUFract: 13833 return UnsignedFractTy; 13834 case BuiltinType::SatULongFract: 13835 return UnsignedLongFractTy; 13836 } 13837 } 13838 13839 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 13840 assert(Ty->isFixedPointType()); 13841 13842 if (Ty->isSaturatedFixedPointType()) return Ty; 13843 13844 switch (Ty->castAs<BuiltinType>()->getKind()) { 13845 default: 13846 llvm_unreachable("Not a fixed point type!"); 13847 case BuiltinType::ShortAccum: 13848 return SatShortAccumTy; 13849 case BuiltinType::Accum: 13850 return SatAccumTy; 13851 case BuiltinType::LongAccum: 13852 return SatLongAccumTy; 13853 case BuiltinType::UShortAccum: 13854 return SatUnsignedShortAccumTy; 13855 case BuiltinType::UAccum: 13856 return SatUnsignedAccumTy; 13857 case BuiltinType::ULongAccum: 13858 return SatUnsignedLongAccumTy; 13859 case BuiltinType::ShortFract: 13860 return SatShortFractTy; 13861 case BuiltinType::Fract: 13862 return SatFractTy; 13863 case BuiltinType::LongFract: 13864 return SatLongFractTy; 13865 case BuiltinType::UShortFract: 13866 return SatUnsignedShortFractTy; 13867 case BuiltinType::UFract: 13868 return SatUnsignedFractTy; 13869 case BuiltinType::ULongFract: 13870 return SatUnsignedLongFractTy; 13871 } 13872 } 13873 13874 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 13875 if (LangOpts.OpenCL) 13876 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 13877 13878 if (LangOpts.CUDA) 13879 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 13880 13881 return getLangASFromTargetAS(AS); 13882 } 13883 13884 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 13885 // doesn't include ASTContext.h 13886 template 13887 clang::LazyGenerationalUpdatePtr< 13888 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 13889 clang::LazyGenerationalUpdatePtr< 13890 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 13891 const clang::ASTContext &Ctx, Decl *Value); 13892 13893 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 13894 assert(Ty->isFixedPointType()); 13895 13896 const TargetInfo &Target = getTargetInfo(); 13897 switch (Ty->castAs<BuiltinType>()->getKind()) { 13898 default: 13899 llvm_unreachable("Not a fixed point type!"); 13900 case BuiltinType::ShortAccum: 13901 case BuiltinType::SatShortAccum: 13902 return Target.getShortAccumScale(); 13903 case BuiltinType::Accum: 13904 case BuiltinType::SatAccum: 13905 return Target.getAccumScale(); 13906 case BuiltinType::LongAccum: 13907 case BuiltinType::SatLongAccum: 13908 return Target.getLongAccumScale(); 13909 case BuiltinType::UShortAccum: 13910 case BuiltinType::SatUShortAccum: 13911 return Target.getUnsignedShortAccumScale(); 13912 case BuiltinType::UAccum: 13913 case BuiltinType::SatUAccum: 13914 return Target.getUnsignedAccumScale(); 13915 case BuiltinType::ULongAccum: 13916 case BuiltinType::SatULongAccum: 13917 return Target.getUnsignedLongAccumScale(); 13918 case BuiltinType::ShortFract: 13919 case BuiltinType::SatShortFract: 13920 return Target.getShortFractScale(); 13921 case BuiltinType::Fract: 13922 case BuiltinType::SatFract: 13923 return Target.getFractScale(); 13924 case BuiltinType::LongFract: 13925 case BuiltinType::SatLongFract: 13926 return Target.getLongFractScale(); 13927 case BuiltinType::UShortFract: 13928 case BuiltinType::SatUShortFract: 13929 return Target.getUnsignedShortFractScale(); 13930 case BuiltinType::UFract: 13931 case BuiltinType::SatUFract: 13932 return Target.getUnsignedFractScale(); 13933 case BuiltinType::ULongFract: 13934 case BuiltinType::SatULongFract: 13935 return Target.getUnsignedLongFractScale(); 13936 } 13937 } 13938 13939 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 13940 assert(Ty->isFixedPointType()); 13941 13942 const TargetInfo &Target = getTargetInfo(); 13943 switch (Ty->castAs<BuiltinType>()->getKind()) { 13944 default: 13945 llvm_unreachable("Not a fixed point type!"); 13946 case BuiltinType::ShortAccum: 13947 case BuiltinType::SatShortAccum: 13948 return Target.getShortAccumIBits(); 13949 case BuiltinType::Accum: 13950 case BuiltinType::SatAccum: 13951 return Target.getAccumIBits(); 13952 case BuiltinType::LongAccum: 13953 case BuiltinType::SatLongAccum: 13954 return Target.getLongAccumIBits(); 13955 case BuiltinType::UShortAccum: 13956 case BuiltinType::SatUShortAccum: 13957 return Target.getUnsignedShortAccumIBits(); 13958 case BuiltinType::UAccum: 13959 case BuiltinType::SatUAccum: 13960 return Target.getUnsignedAccumIBits(); 13961 case BuiltinType::ULongAccum: 13962 case BuiltinType::SatULongAccum: 13963 return Target.getUnsignedLongAccumIBits(); 13964 case BuiltinType::ShortFract: 13965 case BuiltinType::SatShortFract: 13966 case BuiltinType::Fract: 13967 case BuiltinType::SatFract: 13968 case BuiltinType::LongFract: 13969 case BuiltinType::SatLongFract: 13970 case BuiltinType::UShortFract: 13971 case BuiltinType::SatUShortFract: 13972 case BuiltinType::UFract: 13973 case BuiltinType::SatUFract: 13974 case BuiltinType::ULongFract: 13975 case BuiltinType::SatULongFract: 13976 return 0; 13977 } 13978 } 13979 13980 llvm::FixedPointSemantics 13981 ASTContext::getFixedPointSemantics(QualType Ty) const { 13982 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 13983 "Can only get the fixed point semantics for a " 13984 "fixed point or integer type."); 13985 if (Ty->isIntegerType()) 13986 return llvm::FixedPointSemantics::GetIntegerSemantics( 13987 getIntWidth(Ty), Ty->isSignedIntegerType()); 13988 13989 bool isSigned = Ty->isSignedFixedPointType(); 13990 return llvm::FixedPointSemantics( 13991 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 13992 Ty->isSaturatedFixedPointType(), 13993 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 13994 } 13995 13996 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 13997 assert(Ty->isFixedPointType()); 13998 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 13999 } 14000 14001 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 14002 assert(Ty->isFixedPointType()); 14003 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 14004 } 14005 14006 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 14007 assert(Ty->isUnsignedFixedPointType() && 14008 "Expected unsigned fixed point type"); 14009 14010 switch (Ty->castAs<BuiltinType>()->getKind()) { 14011 case BuiltinType::UShortAccum: 14012 return ShortAccumTy; 14013 case BuiltinType::UAccum: 14014 return AccumTy; 14015 case BuiltinType::ULongAccum: 14016 return LongAccumTy; 14017 case BuiltinType::SatUShortAccum: 14018 return SatShortAccumTy; 14019 case BuiltinType::SatUAccum: 14020 return SatAccumTy; 14021 case BuiltinType::SatULongAccum: 14022 return SatLongAccumTy; 14023 case BuiltinType::UShortFract: 14024 return ShortFractTy; 14025 case BuiltinType::UFract: 14026 return FractTy; 14027 case BuiltinType::ULongFract: 14028 return LongFractTy; 14029 case BuiltinType::SatUShortFract: 14030 return SatShortFractTy; 14031 case BuiltinType::SatUFract: 14032 return SatFractTy; 14033 case BuiltinType::SatULongFract: 14034 return SatLongFractTy; 14035 default: 14036 llvm_unreachable("Unexpected unsigned fixed point type"); 14037 } 14038 } 14039 14040 // Given a list of FMV features, return a concatenated list of the 14041 // corresponding backend features (which may contain duplicates). 14042 static std::vector<std::string> getFMVBackendFeaturesFor( 14043 const llvm::SmallVectorImpl<StringRef> &FMVFeatStrings) { 14044 std::vector<std::string> BackendFeats; 14045 for (StringRef F : FMVFeatStrings) 14046 if (auto FMVExt = llvm::AArch64::parseFMVExtension(F)) 14047 for (StringRef F : FMVExt->getImpliedFeatures()) 14048 BackendFeats.push_back(F.str()); 14049 return BackendFeats; 14050 } 14051 14052 ParsedTargetAttr 14053 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 14054 assert(TD != nullptr); 14055 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(TD->getFeaturesStr()); 14056 14057 llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { 14058 return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); 14059 }); 14060 return ParsedAttr; 14061 } 14062 14063 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 14064 const FunctionDecl *FD) const { 14065 if (FD) 14066 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 14067 else 14068 Target->initFeatureMap(FeatureMap, getDiagnostics(), 14069 Target->getTargetOpts().CPU, 14070 Target->getTargetOpts().Features); 14071 } 14072 14073 // Fills in the supplied string map with the set of target features for the 14074 // passed in function. 14075 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 14076 GlobalDecl GD) const { 14077 StringRef TargetCPU = Target->getTargetOpts().CPU; 14078 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 14079 if (const auto *TD = FD->getAttr<TargetAttr>()) { 14080 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 14081 14082 // Make a copy of the features as passed on the command line into the 14083 // beginning of the additional features from the function to override. 14084 // AArch64 handles command line option features in parseTargetAttr(). 14085 if (!Target->getTriple().isAArch64()) 14086 ParsedAttr.Features.insert( 14087 ParsedAttr.Features.begin(), 14088 Target->getTargetOpts().FeaturesAsWritten.begin(), 14089 Target->getTargetOpts().FeaturesAsWritten.end()); 14090 14091 if (ParsedAttr.CPU != "" && Target->isValidCPUName(ParsedAttr.CPU)) 14092 TargetCPU = ParsedAttr.CPU; 14093 14094 // Now populate the feature map, first with the TargetCPU which is either 14095 // the default or a new one from the target attribute string. Then we'll use 14096 // the passed in features (FeaturesAsWritten) along with the new ones from 14097 // the attribute. 14098 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 14099 ParsedAttr.Features); 14100 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 14101 llvm::SmallVector<StringRef, 32> FeaturesTmp; 14102 Target->getCPUSpecificCPUDispatchFeatures( 14103 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 14104 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 14105 Features.insert(Features.begin(), 14106 Target->getTargetOpts().FeaturesAsWritten.begin(), 14107 Target->getTargetOpts().FeaturesAsWritten.end()); 14108 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 14109 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { 14110 if (Target->getTriple().isAArch64()) { 14111 llvm::SmallVector<StringRef, 8> Feats; 14112 TC->getFeatures(Feats, GD.getMultiVersionIndex()); 14113 std::vector<std::string> Features = getFMVBackendFeaturesFor(Feats); 14114 Features.insert(Features.begin(), 14115 Target->getTargetOpts().FeaturesAsWritten.begin(), 14116 Target->getTargetOpts().FeaturesAsWritten.end()); 14117 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 14118 } else { 14119 std::vector<std::string> Features; 14120 StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); 14121 if (VersionStr.starts_with("arch=")) 14122 TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); 14123 else if (VersionStr != "default") 14124 Features.push_back((StringRef{"+"} + VersionStr).str()); 14125 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 14126 } 14127 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) { 14128 llvm::SmallVector<StringRef, 8> Feats; 14129 TV->getFeatures(Feats); 14130 std::vector<std::string> Features = getFMVBackendFeaturesFor(Feats); 14131 Features.insert(Features.begin(), 14132 Target->getTargetOpts().FeaturesAsWritten.begin(), 14133 Target->getTargetOpts().FeaturesAsWritten.end()); 14134 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 14135 } else { 14136 FeatureMap = Target->getTargetOpts().FeatureMap; 14137 } 14138 } 14139 14140 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 14141 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 14142 return *OMPTraitInfoVector.back(); 14143 } 14144 14145 const StreamingDiagnostic &clang:: 14146 operator<<(const StreamingDiagnostic &DB, 14147 const ASTContext::SectionInfo &Section) { 14148 if (Section.Decl) 14149 return DB << Section.Decl; 14150 return DB << "a prior #pragma section"; 14151 } 14152 14153 bool ASTContext::mayExternalize(const Decl *D) const { 14154 bool IsInternalVar = 14155 isa<VarDecl>(D) && 14156 basicGVALinkageForVariable(*this, cast<VarDecl>(D)) == GVA_Internal; 14157 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 14158 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 14159 (D->hasAttr<CUDAConstantAttr>() && 14160 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 14161 // CUDA/HIP: managed variables need to be externalized since it is 14162 // a declaration in IR, therefore cannot have internal linkage. Kernels in 14163 // anonymous name space needs to be externalized to avoid duplicate symbols. 14164 return (IsInternalVar && 14165 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) || 14166 (D->hasAttr<CUDAGlobalAttr>() && 14167 basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) == 14168 GVA_Internal); 14169 } 14170 14171 bool ASTContext::shouldExternalize(const Decl *D) const { 14172 return mayExternalize(D) && 14173 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() || 14174 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 14175 } 14176 14177 StringRef ASTContext::getCUIDHash() const { 14178 if (!CUIDHash.empty()) 14179 return CUIDHash; 14180 if (LangOpts.CUID.empty()) 14181 return StringRef(); 14182 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 14183 return CUIDHash; 14184 } 14185 14186 const CXXRecordDecl * 14187 ASTContext::baseForVTableAuthentication(const CXXRecordDecl *ThisClass) { 14188 assert(ThisClass); 14189 assert(ThisClass->isPolymorphic()); 14190 const CXXRecordDecl *PrimaryBase = ThisClass; 14191 while (1) { 14192 assert(PrimaryBase); 14193 assert(PrimaryBase->isPolymorphic()); 14194 auto &Layout = getASTRecordLayout(PrimaryBase); 14195 auto Base = Layout.getPrimaryBase(); 14196 if (!Base || Base == PrimaryBase || !Base->isPolymorphic()) 14197 break; 14198 PrimaryBase = Base; 14199 } 14200 return PrimaryBase; 14201 } 14202 14203 bool ASTContext::useAbbreviatedThunkName(GlobalDecl VirtualMethodDecl, 14204 StringRef MangledName) { 14205 auto *Method = cast<CXXMethodDecl>(VirtualMethodDecl.getDecl()); 14206 assert(Method->isVirtual()); 14207 bool DefaultIncludesPointerAuth = 14208 LangOpts.PointerAuthCalls || LangOpts.PointerAuthIntrinsics; 14209 14210 if (!DefaultIncludesPointerAuth) 14211 return true; 14212 14213 auto Existing = ThunksToBeAbbreviated.find(VirtualMethodDecl); 14214 if (Existing != ThunksToBeAbbreviated.end()) 14215 return Existing->second.contains(MangledName.str()); 14216 14217 std::unique_ptr<MangleContext> Mangler(createMangleContext()); 14218 llvm::StringMap<llvm::SmallVector<std::string, 2>> Thunks; 14219 auto VtableContext = getVTableContext(); 14220 if (const auto *ThunkInfos = VtableContext->getThunkInfo(VirtualMethodDecl)) { 14221 auto *Destructor = dyn_cast<CXXDestructorDecl>(Method); 14222 for (const auto &Thunk : *ThunkInfos) { 14223 SmallString<256> ElidedName; 14224 llvm::raw_svector_ostream ElidedNameStream(ElidedName); 14225 if (Destructor) 14226 Mangler->mangleCXXDtorThunk(Destructor, VirtualMethodDecl.getDtorType(), 14227 Thunk, /* elideOverrideInfo */ true, 14228 ElidedNameStream); 14229 else 14230 Mangler->mangleThunk(Method, Thunk, /* elideOverrideInfo */ true, 14231 ElidedNameStream); 14232 SmallString<256> MangledName; 14233 llvm::raw_svector_ostream mangledNameStream(MangledName); 14234 if (Destructor) 14235 Mangler->mangleCXXDtorThunk(Destructor, VirtualMethodDecl.getDtorType(), 14236 Thunk, /* elideOverrideInfo */ false, 14237 mangledNameStream); 14238 else 14239 Mangler->mangleThunk(Method, Thunk, /* elideOverrideInfo */ false, 14240 mangledNameStream); 14241 14242 if (Thunks.find(ElidedName) == Thunks.end()) 14243 Thunks[ElidedName] = {}; 14244 Thunks[ElidedName].push_back(std::string(MangledName)); 14245 } 14246 } 14247 llvm::StringSet<> SimplifiedThunkNames; 14248 for (auto &ThunkList : Thunks) { 14249 llvm::sort(ThunkList.second); 14250 SimplifiedThunkNames.insert(ThunkList.second[0]); 14251 } 14252 bool Result = SimplifiedThunkNames.contains(MangledName); 14253 ThunksToBeAbbreviated[VirtualMethodDecl] = std::move(SimplifiedThunkNames); 14254 return Result; 14255 } 14256